Update ia64 patch to 2.5.69-030521, throwing away the parts included
[linux-flexiantxendom0-3.2.10.git] / arch / ia64 / sn / io / sn2 / pcibr / pcibr_intr.c
1 /*
2  *
3  * This file is subject to the terms and conditions of the GNU General Public
4  * License.  See the file "COPYING" in the main directory of this archive
5  * for more details.
6  *
7  * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
8  */
9
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <asm/sn/sgi.h>
14 #include <asm/sn/sn_cpuid.h>
15 #include <asm/sn/addrs.h>
16 #include <asm/sn/arch.h>
17 #include <asm/sn/iograph.h>
18 #include <asm/sn/invent.h>
19 #include <asm/sn/hcl.h>
20 #include <asm/sn/labelcl.h>
21 #include <asm/sn/xtalk/xwidget.h>
22 #include <asm/sn/pci/bridge.h>
23 #include <asm/sn/pci/pciio.h>
24 #include <asm/sn/pci/pcibr.h>
25 #include <asm/sn/pci/pcibr_private.h>
26 #include <asm/sn/pci/pci_defs.h>
27 #include <asm/sn/prio.h>
28 #include <asm/sn/xtalk/xbow.h>
29 #include <asm/sn/ioc3.h>
30 #include <asm/sn/io.h>
31 #include <asm/sn/sn_private.h>
32
33 #ifdef __ia64
34 #define rmallocmap atemapalloc
35 #define rmfreemap atemapfree
36 #define rmfree atefree
37 #define rmalloc atealloc
38
39 inline int
40 compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
41 {
42         FIXME("compare_and_swap_ptr : NOT ATOMIC");
43         if (*location == old_ptr) {
44                 *location = new_ptr;
45                 return(1);
46         }
47         else
48                 return(0);
49 }
50 #endif
51
52 unsigned                pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
53 pcibr_intr_t            pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
54 void                    pcibr_intr_free(pcibr_intr_t);
55 void              pcibr_setpciint(xtalk_intr_t);
56 int                     pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
57 void                    pcibr_intr_disconnect(pcibr_intr_t);
58
59 vertex_hdl_t            pcibr_intr_cpu_get(pcibr_intr_t);
60 void                    pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
61 void                    pcibr_intr_func(intr_arg_t);
62
63 extern pcibr_info_t      pcibr_info_get(vertex_hdl_t);
64
65 /* =====================================================================
66  *    INTERRUPT MANAGEMENT
67  */
68
69 unsigned
70 pcibr_intr_bits(pciio_info_t info,
71                 pciio_intr_line_t lines, int nslots)
72 {
73     pciio_slot_t            slot = PCIBR_INFO_SLOT_GET_INT(info);
74     unsigned                bbits = 0;
75
76     /*
77      * Currently favored mapping from PCI
78      * slot number and INTA/B/C/D to Bridge
79      * PCI Interrupt Bit Number:
80      *
81      *     SLOT     A B C D
82      *      0       0 4 0 4
83      *      1       1 5 1 5
84      *      2       2 6 2 6
85      *      3       3 7 3 7
86      *      4       4 0 4 0
87      *      5       5 1 5 1
88      *      6       6 2 6 2
89      *      7       7 3 7 3
90      */
91
92     if (slot < nslots) {
93         if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
94             bbits |= 1 << slot;
95         if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
96             bbits |= 1 << (slot ^ 4);
97     }
98     return bbits;
99 }
100
101
102 /*
103  *      Get the next wrapper pointer queued in the interrupt circular buffer.
104  */
105 pcibr_intr_wrap_t
106 pcibr_wrap_get(pcibr_intr_cbuf_t cbuf)
107 {
108     pcibr_intr_wrap_t   wrap;
109
110         if (cbuf->ib_in == cbuf->ib_out)
111             PRINT_PANIC( "pcibr intr circular buffer empty, cbuf=0x%p, ib_in=ib_out=%d\n",
112                 (void *)cbuf, cbuf->ib_out);
113
114         wrap = cbuf->ib_cbuf[cbuf->ib_out++];
115         cbuf->ib_out = cbuf->ib_out % IBUFSIZE;
116         return(wrap);
117 }
118
119 /* 
120  *      Queue a wrapper pointer in the interrupt circular buffer.
121  */
122 void
123 pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
124 {
125         int     in;
126         int     s;
127
128         /*
129          * Multiple CPUs could be executing this code simultaneously
130          * if a handler has registered multiple interrupt lines and
131          * the interrupts are directed to different CPUs.
132          */
133         s = mutex_spinlock(&cbuf->ib_lock);
134         in = (cbuf->ib_in + 1) % IBUFSIZE;
135         if (in == cbuf->ib_out) 
136             PRINT_PANIC( "pcibr intr circular buffer full, cbuf=0x%p, ib_in=%d\n",
137                 (void *)cbuf, cbuf->ib_in);
138
139         cbuf->ib_cbuf[cbuf->ib_in] = wrap;
140         cbuf->ib_in = in;
141         mutex_spinunlock(&cbuf->ib_lock, s);
142         return;
143 }
144
145 /*
146  *      On SN systems there is a race condition between a PIO read response
147  *      and DMA's.  In rare cases, the read response may beat the DMA, causing
148  *      the driver to think that data in memory is complete and meaningful.
149  *      This code eliminates that race.
150  *      This routine is called by the PIO read routines after doing the read.
151  *      This routine then forces a fake interrupt on another line, which
152  *      is logically associated with the slot that the PIO is addressed to.
153  *      (see sn_dma_flush_init() )
154  *      It then spins while watching the memory location that the interrupt
155  *      is targetted to.  When the interrupt response arrives, we are sure
156  *      that the DMA has landed in memory and it is safe for the driver
157  *      to proceed.
158  */
159
160 extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
161
162 void
163 sn_dma_flush(unsigned long addr) {
164         nasid_t nasid;
165         int wid_num;
166         volatile struct sn_flush_device_list *p;
167         int i,j;
168         int bwin;
169         unsigned long flags;
170
171         nasid = NASID_GET(addr);
172         wid_num = SWIN_WIDGETNUM(addr);
173         bwin = BWIN_WINDOWNUM(addr);
174
175         if (flush_nasid_list[nasid].widget_p == NULL) return;
176         if (bwin > 0) {
177                 bwin--;
178                 switch (bwin) {
179                         case 0:
180                                 wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
181                                 break;
182                         case 1:
183                                 wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
184                                 break;
185                         case 2: 
186                                 wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
187                                 break;
188                         case 3: 
189                                 wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
190                                 break;
191                         case 4: 
192                                 wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
193                                 break;
194                         case 5: 
195                                 wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
196                                 break;
197                         case 6: 
198                                 wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
199                                 break;
200                 }
201         }
202         if (flush_nasid_list[nasid].widget_p == NULL) return;
203         if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) return;
204         p = &flush_nasid_list[nasid].widget_p[wid_num][0];
205
206         // find a matching BAR
207
208         for (i=0; i<DEV_PER_WIDGET;i++) {
209                 for (j=0; j<PCI_ROM_RESOURCE;j++) {
210                         if (p->bar_list[j].start == 0) break;
211                         if (addr >= p->bar_list[j].start && addr <= p->bar_list[j].end) break;
212                 }
213                 if (j < PCI_ROM_RESOURCE && p->bar_list[j].start != 0) break;
214                 p++;
215         }
216
217         // if no matching BAR, return without doing anything.
218
219         if (i == DEV_PER_WIDGET) return;
220
221         spin_lock_irqsave(&p->flush_lock, flags);
222
223         p->flush_addr = 0;
224
225         // force an interrupt.
226
227         *(bridgereg_t *)(p->force_int_addr) = 1;
228
229         // wait for the interrupt to come back.
230
231         while (p->flush_addr != 0x10f);
232
233         // okay, everything is synched up.
234         spin_unlock_irqrestore(&p->flush_lock, flags);
235
236         return;
237 }
238
239 EXPORT_SYMBOL(sn_dma_flush);
240
241 /*
242  *      There are end cases where a deadlock can occur if interrupt 
243  *      processing completes and the Bridge b_int_status bit is still set.
244  *
245  *      One scenerio is if a second PCI interrupt occurs within 60ns of
246  *      the previous interrupt being cleared. In this case the Bridge
247  *      does not detect the transition, the Bridge b_int_status bit
248  *      remains set, and because no transition was detected no interrupt
249  *      packet is sent to the Hub/Heart.
250  *
251  *      A second scenerio is possible when a b_int_status bit is being
252  *      shared by multiple devices:
253  *                                              Device #1 generates interrupt
254  *                                              Bridge b_int_status bit set
255  *                                              Device #2 generates interrupt
256  *              interrupt processing begins
257  *                ISR for device #1 runs and
258  *                      clears interrupt
259  *                                              Device #1 generates interrupt
260  *                ISR for device #2 runs and
261  *                      clears interrupt
262  *                                              (b_int_status bit still set)
263  *              interrupt processing completes
264  *                
265  *      Interrupt processing is now complete, but an interrupt is still
266  *      outstanding for Device #1. But because there was no transition of
267  *      the b_int_status bit, no interrupt packet will be generated and
268  *      a deadlock will occur.
269  *
270  *      To avoid these deadlock situations, this function is used
271  *      to check if a specific Bridge b_int_status bit is set, and if so,
272  *      cause the setting of the corresponding interrupt bit.
273  *
274  *      On a XBridge (SN1) and PIC (SN2), we do this by writing the appropriate Bridge Force 
275  *      Interrupt register.
276  */
277 void
278 pcibr_force_interrupt(pcibr_intr_t intr)
279 {
280         unsigned        bit;
281         unsigned        bits;
282         pcibr_soft_t    pcibr_soft = intr->bi_soft;
283         bridge_t       *bridge = pcibr_soft->bs_base;
284
285         bits = intr->bi_ibits;
286         for (bit = 0; bit < 8; bit++) {
287                 if (bits & (1 << bit)) {
288
289                         PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
290                                 "pcibr_force_interrupt: bit=0x%x\n", bit));
291
292                         if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
293                                 bridge->b_force_pin[bit].intr = 1;
294                         }
295                 }
296         }
297 }
298
299 /*ARGSUSED */
300 pcibr_intr_t
301 pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
302                  device_desc_t dev_desc,
303                  pciio_intr_line_t lines,
304                  vertex_hdl_t owner_dev)
305 {
306     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
307     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
308     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
309     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
310     bridge_t               *bridge = pcibr_soft->bs_base;
311     int                     is_threaded = 0;
312
313     xtalk_intr_t           *xtalk_intr_p;
314     pcibr_intr_t           *pcibr_intr_p;
315     pcibr_intr_list_t      *intr_list_p;
316
317     unsigned                pcibr_int_bits;
318     unsigned                pcibr_int_bit;
319     xtalk_intr_t            xtalk_intr = (xtalk_intr_t)0;
320     hub_intr_t              hub_intr;
321     pcibr_intr_t            pcibr_intr;
322     pcibr_intr_list_t       intr_entry;
323     pcibr_intr_list_t       intr_list;
324     bridgereg_t             int_dev;
325
326
327     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
328                 "pcibr_intr_alloc: %s%s%s%s%s\n",
329                 !(lines & 15) ? " No INTs?" : "",
330                 lines & 1 ? " INTA" : "",
331                 lines & 2 ? " INTB" : "",
332                 lines & 4 ? " INTC" : "",
333                 lines & 8 ? " INTD" : ""));
334
335     NEW(pcibr_intr);
336     if (!pcibr_intr)
337         return NULL;
338
339     pcibr_intr->bi_dev = pconn_vhdl;
340     pcibr_intr->bi_lines = lines;
341     pcibr_intr->bi_soft = pcibr_soft;
342     pcibr_intr->bi_ibits = 0;           /* bits will be added below */
343     pcibr_intr->bi_func = 0;            /* unset until connect */
344     pcibr_intr->bi_arg = 0;             /* unset until connect */
345     pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
346     pcibr_intr->bi_mustruncpu = CPU_NONE;
347     pcibr_intr->bi_ibuf.ib_in = 0;
348     pcibr_intr->bi_ibuf.ib_out = 0;
349     mutex_spinlock_init(&pcibr_intr->bi_ibuf.ib_lock);
350     pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines, 
351                 PCIBR_NUM_SLOTS(pcibr_soft));
352
353
354     /*
355      * For each PCI interrupt line requested, figure
356      * out which Bridge PCI Interrupt Line it maps
357      * to, and make sure there are xtalk resources
358      * allocated for it.
359      */
360     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
361                 "pcibr_intr_alloc: pcibr_int_bits: 0x%x\n", pcibr_int_bits));
362     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
363         if (pcibr_int_bits & (1 << pcibr_int_bit)) {
364             xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
365
366             xtalk_intr = *xtalk_intr_p;
367
368             if (xtalk_intr == NULL) {
369                 /*
370                  * This xtalk_intr_alloc is constrained for two reasons:
371                  * 1) Normal interrupts and error interrupts need to be delivered
372                  *    through a single xtalk target widget so that there aren't any
373                  *    ordering problems with DMA, completion interrupts, and error
374                  *    interrupts. (Use of xconn_vhdl forces this.)
375                  *
376                  * 2) On SN1, addressing constraints on SN1 and Bridge force
377                  *    us to use a single PI number for all interrupts from a
378                  *    single Bridge. (SN1-specific code forces this).
379                  */
380
381                 /*
382                  * All code dealing with threaded PCI interrupt handlers
383                  * is located at the pcibr level. Because of this,
384                  * we always want the lower layers (hub/heart_intr_alloc, 
385                  * intr_level_connect) to treat us as non-threaded so we
386                  * don't set up a duplicate threaded environment. We make
387                  * this happen by calling a special xtalk interface.
388                  */
389                 xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc, 
390                         owner_dev);
391
392                 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
393                             "pcibr_intr_alloc: xtalk_intr=0x%x\n", xtalk_intr));
394
395                 /* both an assert and a runtime check on this:
396                  * we need to check in non-DEBUG kernels, and
397                  * the ASSERT gets us more information when
398                  * we use DEBUG kernels.
399                  */
400                 ASSERT(xtalk_intr != NULL);
401                 if (xtalk_intr == NULL) {
402                     /* it is quite possible that our
403                      * xtalk_intr_alloc failed because
404                      * someone else got there first,
405                      * and we can find their results
406                      * in xtalk_intr_p.
407                      */
408                     if (!*xtalk_intr_p) {
409 #ifdef SUPPORT_PRINTING_V_FORMAT
410                         printk(KERN_ALERT  
411                                 "pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
412                                 xconn_vhdl);
413 #else
414                         printk(KERN_ALERT  
415                                 "pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
416                                 (void *)xconn_vhdl);
417 #endif
418                         /* yes, we leak resources here. */
419                         return 0;
420                     }
421                 } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
422                     /*
423                      * now tell the bridge which slot is
424                      * using this interrupt line.
425                      */
426                     int_dev = bridge->b_int_device;
427                     int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
428                     int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
429                     bridge->b_int_device = int_dev;     /* XXXMP */
430
431                     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
432                                 "bridge intr bit %d clears my wrb\n",
433                                 pcibr_int_bit));
434                 } else {
435                     /* someone else got one allocated first;
436                      * free the one we just created, and
437                      * retrieve the one they allocated.
438                      */
439                     xtalk_intr_free(xtalk_intr);
440                     xtalk_intr = *xtalk_intr_p;
441 #if PARANOID
442                     /* once xtalk_intr is set, we never clear it,
443                      * so if the CAS fails above, this condition
444                      * can "never happen" ...
445                      */
446                     if (!xtalk_intr) {
447                         printk(KERN_ALERT  
448                                 "pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
449                                 xconn_vhdl);
450                         /* yes, we leak resources here. */
451                         return 0;
452                     }
453 #endif
454                 }
455             }
456
457             pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
458
459             NEW(intr_entry);
460             intr_entry->il_next = NULL;
461             intr_entry->il_intr = pcibr_intr;
462             intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
463             intr_list_p = 
464                 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
465
466             PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
467                         "Bridge bit 0x%x wrap=0x%x\n", pcibr_int_bit,
468                         pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap));
469
470             if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
471                 /* we are the first interrupt on this bridge bit.
472                  */
473                 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
474                             "INT 0x%x (bridge bit %d) allocated [FIRST]\n",
475                             pcibr_int_bits, pcibr_int_bit));
476                 continue;
477             }
478             intr_list = *intr_list_p;
479             pcibr_intr_p = &intr_list->il_intr;
480             if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
481                 /* first entry on list was erased,
482                  * and we replaced it, so we
483                  * don't need our intr_entry.
484                  */
485                 DEL(intr_entry);
486                 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
487                             "INT 0x%x (bridge bit %d) replaces erased first\n",
488                             pcibr_int_bits, pcibr_int_bit));
489                 continue;
490             }
491             intr_list_p = &intr_list->il_next;
492             if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
493                 /* we are the new second interrupt on this bit.
494                  */
495                 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
496                 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
497                             "INT 0x%x (bridge bit %d) is new SECOND\n",
498                             pcibr_int_bits, pcibr_int_bit));
499                 continue;
500             }
501             while (1) {
502                 pcibr_intr_p = &intr_list->il_intr;
503                 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
504                     /* an entry on list was erased,
505                      * and we replaced it, so we
506                      * don't need our intr_entry.
507                      */
508                     DEL(intr_entry);
509
510                     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
511                                 "INT 0x%x (bridge bit %d) replaces erase Nth\n",
512                                 pcibr_int_bits, pcibr_int_bit));
513                     break;
514                 }
515                 intr_list_p = &intr_list->il_next;
516                 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
517                     /* entry appended to share list
518                      */
519                     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
520                                 "INT 0x%x (bridge bit %d) is new Nth\n",
521                                 pcibr_int_bits, pcibr_int_bit));
522                     break;
523                 }
524                 /* step to next record in chain
525                  */
526                 intr_list = *intr_list_p;
527             }
528         }
529     }
530
531 #if DEBUG && INTR_DEBUG
532     printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
533 #endif
534     hub_intr = (hub_intr_t)xtalk_intr;
535     pcibr_intr->bi_irq = hub_intr->i_bit;
536     pcibr_intr->bi_cpu = hub_intr->i_cpuid;
537     return pcibr_intr;
538 }
539
540 /*ARGSUSED */
541 void
542 pcibr_intr_free(pcibr_intr_t pcibr_intr)
543 {
544     unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
545     pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
546     unsigned                pcibr_int_bit;
547     pcibr_intr_list_t       intr_list;
548     int                     intr_shared;
549     xtalk_intr_t            *xtalk_intrp;
550
551     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
552         if (pcibr_int_bits & (1 << pcibr_int_bit)) {
553             for (intr_list = 
554                      pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
555                  intr_list != NULL;
556                  intr_list = intr_list->il_next)
557                 if (compare_and_swap_ptr((void **) &intr_list->il_intr, 
558                                          pcibr_intr, 
559                                          NULL)) {
560
561                     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, 
562                                 pcibr_intr->bi_dev,
563                                 "pcibr_intr_free: cleared hdlr from bit 0x%x\n",
564                                 pcibr_int_bit));
565                 }
566             /* If this interrupt line is not being shared between multiple
567              * devices release the xtalk interrupt resources.
568              */
569             intr_shared = 
570                 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
571             xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
572
573             if ((!intr_shared) && (*xtalk_intrp)) {
574
575                 bridge_t        *bridge = pcibr_soft->bs_base;
576                 bridgereg_t     int_dev;
577
578                 xtalk_intr_free(*xtalk_intrp);
579                 *xtalk_intrp = 0;
580
581                 /* Clear the PCI device interrupt to bridge interrupt pin
582                  * mapping.
583                  */
584                 int_dev = bridge->b_int_device;
585                 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
586                 bridge->b_int_device = int_dev;
587
588             }
589         }
590     }
591     DEL(pcibr_intr);
592 }
593
594 void
595 pcibr_setpciint(xtalk_intr_t xtalk_intr)
596 {
597     iopaddr_t            addr;
598     xtalk_intr_vector_t  vect;
599     vertex_hdl_t         vhdl;
600     bridge_t            *bridge;
601     picreg_t    *int_addr;
602
603     addr = xtalk_intr_addr_get(xtalk_intr);
604     vect = xtalk_intr_vector_get(xtalk_intr);
605     vhdl = xtalk_intr_dev_get(xtalk_intr);
606     bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
607
608     int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
609     *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
610                      (PIC_INT_ADDR_HOST & addr));
611 }
612
613 /*ARGSUSED */
614 int
615 pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
616 {
617     pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
618     bridge_t               *bridge = pcibr_soft->bs_base;
619     unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
620     unsigned                pcibr_int_bit;
621     uint64_t                int_enable;
622     unsigned long           s;
623
624     if (pcibr_intr == NULL)
625         return -1;
626
627     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
628                 "pcibr_intr_connect: intr_func=0x%x\n",
629                 pcibr_intr));
630
631     pcibr_intr->bi_func = intr_func;
632     pcibr_intr->bi_arg = intr_arg;
633     *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
634
635     /*
636      * For each PCI interrupt line requested, figure
637      * out which Bridge PCI Interrupt Line it maps
638      * to, and make sure there are xtalk resources
639      * allocated for it.
640      */
641     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
642         if (pcibr_int_bits & (1 << pcibr_int_bit)) {
643             pcibr_intr_wrap_t       intr_wrap;
644             xtalk_intr_t            xtalk_intr;
645             void                   *int_addr;
646
647             xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
648             intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
649
650             /*
651              * If this interrupt line is being shared and the connect has
652              * already been done, no need to do it again.
653              */
654             if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
655                 continue;
656
657
658             /*
659              * Use the pcibr wrapper function to handle all Bridge interrupts
660              * regardless of whether the interrupt line is shared or not.
661              */
662             if (IS_PIC_SOFT(pcibr_soft)) 
663                 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
664             else
665                 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
666
667             xtalk_intr_connect(xtalk_intr, pcibr_intr_func, (intr_arg_t) intr_wrap,
668                                         (xtalk_intr_setfunc_t) pcibr_setpciint,
669                                                 (void *)int_addr);
670
671             pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
672
673             PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
674                         "pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
675                         "pcibr_int_bit=0x%x\n", int_addr,
676                          *(picreg_t *)int_addr,
677                         pcibr_int_bit));
678         }
679
680         /* PIC WAR. PV# 854697
681          * On PIC we must write 64-bit MMRs with 64-bit stores
682          */
683         s = pcibr_lock(pcibr_soft);
684         if (IS_PIC_SOFT(pcibr_soft) &&
685                         PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
686             int_enable = bridge->p_int_enable_64;
687             int_enable |= pcibr_int_bits;
688             bridge->p_int_enable_64 = int_enable;
689         } else {
690             bridgereg_t int_enable;
691
692             int_enable = bridge->b_int_enable;
693             int_enable |= pcibr_int_bits;
694             bridge->b_int_enable = int_enable;
695         }
696         bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
697         pcibr_unlock(pcibr_soft, s);
698
699     return 0;
700 }
701
702 /*ARGSUSED */
703 void
704 pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
705 {
706     pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
707     bridge_t               *bridge = pcibr_soft->bs_base;
708     unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
709     unsigned                pcibr_int_bit;
710     pcibr_intr_wrap_t       intr_wrap;
711     uint64_t                int_enable;
712     unsigned long           s;
713
714     /* Stop calling the function. Now.
715      */
716     *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
717     pcibr_intr->bi_func = 0;
718     pcibr_intr->bi_arg = 0;
719     /*
720      * For each PCI interrupt line requested, figure
721      * out which Bridge PCI Interrupt Line it maps
722      * to, and disconnect the interrupt.
723      */
724
725     /* don't disable interrupts for lines that
726      * are shared between devices.
727      */
728     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
729         if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
730             (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
731             pcibr_int_bits &= ~(1 << pcibr_int_bit);
732     if (!pcibr_int_bits)
733         return;
734
735     /* PIC WAR. PV# 854697
736      * On PIC we must write 64-bit MMRs with 64-bit stores
737      */
738     s = pcibr_lock(pcibr_soft);
739     if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
740         int_enable = bridge->p_int_enable_64;
741         int_enable &= ~pcibr_int_bits;
742         bridge->p_int_enable_64 = int_enable;
743     } else {
744         int_enable = (uint64_t)bridge->b_int_enable;
745         int_enable &= ~pcibr_int_bits;
746         bridge->b_int_enable = (bridgereg_t)int_enable;
747     }
748     bridge->b_wid_tflush;               /* wait until Bridge PIO complete */
749     pcibr_unlock(pcibr_soft, s);
750
751     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
752                 "pcibr_intr_disconnect: disabled int_bits=0x%x\n", 
753                 pcibr_int_bits));
754
755     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
756         if (pcibr_int_bits & (1 << pcibr_int_bit)) {
757             void                   *int_addr;
758
759             /* if the interrupt line is now shared,
760              * do not disconnect it.
761              */
762             if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
763                 continue;
764
765             xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
766             pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
767
768             PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
769                         "pcibr_intr_disconnect: disconnect int_bits=0x%x\n",
770                         pcibr_int_bits));
771
772             /* if we are sharing the interrupt line,
773              * connect us up; this closes the hole
774              * where the another pcibr_intr_alloc()
775              * was in progress as we disconnected.
776              */
777             if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
778                 continue;
779
780             intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
781             if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
782                 continue;
783
784             if (IS_PIC_SOFT(pcibr_soft))
785                 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
786             else
787                 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
788
789             xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
790                                 pcibr_intr_func, (intr_arg_t) intr_wrap,
791                                (xtalk_intr_setfunc_t)pcibr_setpciint,
792                                (void *)(long)pcibr_int_bit);
793             PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
794                         "pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
795                         pcibr_int_bit));
796         }
797 }
798
799 /*ARGSUSED */
800 vertex_hdl_t
801 pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
802 {
803     pcibr_soft_t            pcibr_soft = pcibr_intr->bi_soft;
804     unsigned                pcibr_int_bits = pcibr_intr->bi_ibits;
805     unsigned                pcibr_int_bit;
806
807     for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
808         if (pcibr_int_bits & (1 << pcibr_int_bit))
809             return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
810     return 0;
811 }
812
813 /* =====================================================================
814  *    INTERRUPT HANDLING
815  */
816 void
817 pcibr_clearwidint(bridge_t *bridge)
818 {
819     bridge->b_wid_int_upper = 0;
820     bridge->b_wid_int_lower = 0;
821 }
822
823
824 void
825 pcibr_setwidint(xtalk_intr_t intr)
826 {
827     xwidgetnum_t            targ = xtalk_intr_target_get(intr);
828     iopaddr_t               addr = xtalk_intr_addr_get(intr);
829     xtalk_intr_vector_t     vect = xtalk_intr_vector_get(intr);
830     widgetreg_t             NEW_b_wid_int_upper, NEW_b_wid_int_lower;
831     widgetreg_t             OLD_b_wid_int_upper, OLD_b_wid_int_lower;
832
833     bridge_t               *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
834
835     NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
836                                XTALK_ADDR_TO_UPPER(addr));
837     NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
838
839     OLD_b_wid_int_upper = bridge->b_wid_int_upper;
840     OLD_b_wid_int_lower = bridge->b_wid_int_lower;
841
842     /* Verify that all interrupts from this Bridge are using a single PI */
843     if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
844         /*
845          * Once set, these registers shouldn't change; they should
846          * be set multiple times with the same values.
847          *
848          * If we're attempting to change these registers, it means
849          * that our heuristics for allocating interrupts in a way
850          * appropriate for IP35 have failed, and the admin needs to
851          * explicitly direct some interrupts (or we need to make the
852          * heuristics more clever).
853          *
854          * In practice, we hope this doesn't happen very often, if
855          * at all.
856          */
857         if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
858             (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
859                 printk(KERN_WARNING  "Interrupt allocation is too complex.\n");
860                 printk(KERN_WARNING  "Use explicit administrative interrupt targetting.\n");
861                 printk(KERN_WARNING  "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
862                 printk(KERN_WARNING  "NEW=0x%x/0x%x  OLD=0x%x/0x%x\n",
863                         NEW_b_wid_int_upper, NEW_b_wid_int_lower,
864                         OLD_b_wid_int_upper, OLD_b_wid_int_lower);
865                 PRINT_PANIC("PCI Bridge interrupt targetting error\n");
866         }
867     }
868
869     bridge->b_wid_int_upper = NEW_b_wid_int_upper;
870     bridge->b_wid_int_lower = NEW_b_wid_int_lower;
871     bridge->b_int_host_err = vect;
872
873 }
874
875 /*
876  * pcibr_intr_preset: called during mlreset time
877  * if the platform specific code needs to route
878  * one of the Bridge's xtalk interrupts before the
879  * xtalk infrastructure is available.
880  */
881 void
882 pcibr_xintr_preset(void *which_widget,
883                    int which_widget_intr,
884                    xwidgetnum_t targ,
885                    iopaddr_t addr,
886                    xtalk_intr_vector_t vect)
887 {
888     bridge_t               *bridge = (bridge_t *) which_widget;
889
890     if (which_widget_intr == -1) {
891         /* bridge widget error interrupt */
892         bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
893                                    XTALK_ADDR_TO_UPPER(addr));
894         bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
895         bridge->b_int_host_err = vect;
896 printk("pcibr_xintr_preset: b_wid_int_upper 0x%lx b_wid_int_lower 0x%lx b_int_host_err 0x%x\n",
897         ( (0x000F0000 & (targ << 16)) | XTALK_ADDR_TO_UPPER(addr)),
898         XTALK_ADDR_TO_LOWER(addr), vect);
899
900         /* turn on all interrupts except
901          * the PCI interrupt requests,
902          * at least at heart.
903          */
904         bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
905
906     } else {
907         /* routing a PCI device interrupt.
908          * targ and low 38 bits of addr must
909          * be the same as the already set
910          * value for the widget error interrupt.
911          */
912         bridge->b_int_addr[which_widget_intr].addr =
913             ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
914              (BRIDGE_INT_ADDR_FLD & vect));
915         /*
916          * now bridge can let it through;
917          * NB: still should be blocked at
918          * xtalk provider end, until the service
919          * function is set.
920          */
921         bridge->b_int_enable |= 1 << vect;
922     }
923     bridge->b_wid_tflush;               /* wait until Bridge PIO complete */
924 }
925
926
927 /*
928  * pcibr_intr_func()
929  *
930  * This is the pcibr interrupt "wrapper" function that is called,
931  * in interrupt context, to initiate the interrupt handler(s) registered
932  * (via pcibr_intr_alloc/connect) for the occurring interrupt. Non-threaded 
933  * handlers will be called directly, and threaded handlers will have their 
934  * thread woken up.
935  */
936 void
937 pcibr_intr_func(intr_arg_t arg)
938 {
939     pcibr_intr_wrap_t       wrap = (pcibr_intr_wrap_t) arg;
940     reg_p                   wrbf;
941     intr_func_t             func;
942     pcibr_intr_t            intr;
943     pcibr_intr_list_t       list;
944     int                     clearit;
945     int                     do_nonthreaded = 1;
946     int                     is_threaded = 0;
947     int                     x = 0;
948     pcibr_soft_t            pcibr_soft = wrap->iw_soft;
949     bridge_t               *bridge = pcibr_soft->bs_base;
950     uint64_t                p_enable = pcibr_soft->bs_int_enable;
951     int                     bit = wrap->iw_ibit;
952
953         /*
954          * PIC WAR.  PV#855272
955          * Early attempt at a workaround for the runaway
956          * interrupt problem.   Briefly disable the enable bit for
957          * this device.
958          */
959         if (IS_PIC_SOFT(pcibr_soft) &&
960                         PCIBR_WAR_ENABLED(PV855272, pcibr_soft)) {
961                 unsigned s;
962
963                 /* disable-enable interrupts for this bridge pin */
964
965                 p_enable &= ~(1 << bit);
966                 s = pcibr_lock(pcibr_soft);
967                 bridge->p_int_enable_64 = p_enable;
968                 p_enable |= (1 << bit);
969                 bridge->p_int_enable_64 = p_enable;
970                 pcibr_unlock(pcibr_soft, s);
971         }
972
973         /*
974          * If any handler is still running from a previous interrupt
975          * just return. If there's a need to call the handler(s) again,
976          * another interrupt will be generated either by the device or by
977          * pcibr_force_interrupt().
978          */
979
980         if (wrap->iw_hdlrcnt) {
981                 return;
982         }
983
984     /*
985      * Call all interrupt handlers registered.
986      * First, the pcibr_intrd threads for any threaded handlers will be
987      * awoken, then any non-threaded handlers will be called sequentially.
988      */
989         
990         clearit = 1;
991         while (do_nonthreaded) {
992             for (list = wrap->iw_list; list != NULL; list = list->il_next) {
993                 if ((intr = list->il_intr) && (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
994
995                     /*
996                      * This device may have initiated write
997                      * requests since the bridge last saw
998                      * an edge on this interrupt input; flushing
999                      * the buffer prior to invoking the handler
1000                      * should help but may not be sufficient if we 
1001                      * get more requests after the flush, followed
1002                      * by the card deciding it wants service, before
1003                      * the interrupt handler checks to see if things need
1004                      * to be done.
1005                      *
1006                      * There is a similar race condition if
1007                      * an interrupt handler loops around and
1008                      * notices further service is required.
1009                      * Perhaps we need to have an explicit
1010                      * call that interrupt handlers need to
1011                      * do between noticing that DMA to memory
1012                      * has completed, but before observing the
1013                      * contents of memory?
1014                      */
1015
1016                     if ((do_nonthreaded) && (!is_threaded)) {
1017                         /* Non-threaded -  Call the interrupt handler at interrupt level */
1018                         /* Only need to flush write buffers if sharing */
1019
1020                         if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
1021                             if ((x = *wrbf))    /* write request buffer flush */
1022 #ifdef SUPPORT_PRINTING_V_FORMAT
1023                                 printk(KERN_ALERT  "pcibr_intr_func %v: \n"
1024                                     "write buffer flush failed, wrbf=0x%x\n", 
1025                                     list->il_intr->bi_dev, wrbf);
1026 #else
1027                                 printk(KERN_ALERT  "pcibr_intr_func %p: \n"
1028                                     "write buffer flush failed, wrbf=0x%lx\n", 
1029                                     (void *)list->il_intr->bi_dev, (long) wrbf);
1030 #endif
1031                         }
1032                         func = intr->bi_func;
1033                         if ( func )
1034                                 func(intr->bi_arg);
1035                     }
1036                     clearit = 0;
1037                 }
1038             }
1039             do_nonthreaded = 0;
1040
1041             /*
1042              * If the non-threaded handler was the last to complete,
1043              * (i.e., no threaded handlers still running) force an
1044              * interrupt to avoid a potential deadlock situation.
1045              */
1046             if (wrap->iw_hdlrcnt == 0) {
1047                 pcibr_force_interrupt((pcibr_intr_t) wrap);
1048             }
1049         }
1050
1051         /* If there were no handlers,
1052          * disable the interrupt and return.
1053          * It will get enabled again after
1054          * a handler is connected.
1055          * If we don't do this, we would
1056          * sit here and spin through the
1057          * list forever.
1058          */
1059         if (clearit) {
1060             pcibr_soft_t            pcibr_soft = wrap->iw_soft;
1061             bridge_t               *bridge = pcibr_soft->bs_base;
1062             bridgereg_t             int_enable;
1063             bridgereg_t             mask = 1 << wrap->iw_ibit;
1064             unsigned long           s;
1065
1066             /* PIC BRINUGP WAR (PV# 854697):
1067              * On PIC we must write 64-bit MMRs with 64-bit stores
1068              */
1069             s = pcibr_lock(pcibr_soft);
1070             if (IS_PIC_SOFT(pcibr_soft) &&
1071                                 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
1072                 int_enable = bridge->p_int_enable_64;
1073                 int_enable &= ~mask;
1074                 bridge->p_int_enable_64 = int_enable;
1075             } else {
1076                 int_enable = (uint64_t)bridge->b_int_enable;
1077                 int_enable &= ~mask;
1078                 bridge->b_int_enable = (bridgereg_t)int_enable;
1079             }
1080             bridge->b_wid_tflush;       /* wait until Bridge PIO complete */
1081             pcibr_unlock(pcibr_soft, s);
1082             return;
1083         }
1084 }