3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <asm/sn/sgi.h>
14 #include <asm/sn/sn_cpuid.h>
15 #include <asm/sn/addrs.h>
16 #include <asm/sn/arch.h>
17 #include <asm/sn/iograph.h>
18 #include <asm/sn/invent.h>
19 #include <asm/sn/hcl.h>
20 #include <asm/sn/labelcl.h>
21 #include <asm/sn/xtalk/xwidget.h>
22 #include <asm/sn/pci/bridge.h>
23 #include <asm/sn/pci/pciio.h>
24 #include <asm/sn/pci/pcibr.h>
25 #include <asm/sn/pci/pcibr_private.h>
26 #include <asm/sn/pci/pci_defs.h>
27 #include <asm/sn/prio.h>
28 #include <asm/sn/xtalk/xbow.h>
29 #include <asm/sn/ioc3.h>
30 #include <asm/sn/io.h>
31 #include <asm/sn/sn_private.h>
34 #define rmallocmap atemapalloc
35 #define rmfreemap atemapfree
36 #define rmfree atefree
37 #define rmalloc atealloc
40 compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
42 FIXME("compare_and_swap_ptr : NOT ATOMIC");
43 if (*location == old_ptr) {
52 unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
53 pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
54 void pcibr_intr_free(pcibr_intr_t);
55 void pcibr_setpciint(xtalk_intr_t);
56 int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
57 void pcibr_intr_disconnect(pcibr_intr_t);
59 vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
60 void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
61 void pcibr_intr_func(intr_arg_t);
63 extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
65 /* =====================================================================
66 * INTERRUPT MANAGEMENT
70 pcibr_intr_bits(pciio_info_t info,
71 pciio_intr_line_t lines, int nslots)
73 pciio_slot_t slot = PCIBR_INFO_SLOT_GET_INT(info);
77 * Currently favored mapping from PCI
78 * slot number and INTA/B/C/D to Bridge
79 * PCI Interrupt Bit Number:
93 if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
95 if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
96 bbits |= 1 << (slot ^ 4);
103 * Get the next wrapper pointer queued in the interrupt circular buffer.
106 pcibr_wrap_get(pcibr_intr_cbuf_t cbuf)
108 pcibr_intr_wrap_t wrap;
110 if (cbuf->ib_in == cbuf->ib_out)
111 PRINT_PANIC( "pcibr intr circular buffer empty, cbuf=0x%p, ib_in=ib_out=%d\n",
112 (void *)cbuf, cbuf->ib_out);
114 wrap = cbuf->ib_cbuf[cbuf->ib_out++];
115 cbuf->ib_out = cbuf->ib_out % IBUFSIZE;
120 * Queue a wrapper pointer in the interrupt circular buffer.
123 pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
129 * Multiple CPUs could be executing this code simultaneously
130 * if a handler has registered multiple interrupt lines and
131 * the interrupts are directed to different CPUs.
133 s = mutex_spinlock(&cbuf->ib_lock);
134 in = (cbuf->ib_in + 1) % IBUFSIZE;
135 if (in == cbuf->ib_out)
136 PRINT_PANIC( "pcibr intr circular buffer full, cbuf=0x%p, ib_in=%d\n",
137 (void *)cbuf, cbuf->ib_in);
139 cbuf->ib_cbuf[cbuf->ib_in] = wrap;
141 mutex_spinunlock(&cbuf->ib_lock, s);
146 * On SN systems there is a race condition between a PIO read response
147 * and DMA's. In rare cases, the read response may beat the DMA, causing
148 * the driver to think that data in memory is complete and meaningful.
149 * This code eliminates that race.
150 * This routine is called by the PIO read routines after doing the read.
151 * This routine then forces a fake interrupt on another line, which
152 * is logically associated with the slot that the PIO is addressed to.
153 * (see sn_dma_flush_init() )
154 * It then spins while watching the memory location that the interrupt
155 * is targetted to. When the interrupt response arrives, we are sure
156 * that the DMA has landed in memory and it is safe for the driver
160 extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
163 sn_dma_flush(unsigned long addr) {
166 volatile struct sn_flush_device_list *p;
171 nasid = NASID_GET(addr);
172 wid_num = SWIN_WIDGETNUM(addr);
173 bwin = BWIN_WINDOWNUM(addr);
175 if (flush_nasid_list[nasid].widget_p == NULL) return;
180 wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
183 wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
186 wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
189 wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
192 wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
195 wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
198 wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
202 if (flush_nasid_list[nasid].widget_p == NULL) return;
203 if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) return;
204 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
206 // find a matching BAR
208 for (i=0; i<DEV_PER_WIDGET;i++) {
209 for (j=0; j<PCI_ROM_RESOURCE;j++) {
210 if (p->bar_list[j].start == 0) break;
211 if (addr >= p->bar_list[j].start && addr <= p->bar_list[j].end) break;
213 if (j < PCI_ROM_RESOURCE && p->bar_list[j].start != 0) break;
217 // if no matching BAR, return without doing anything.
219 if (i == DEV_PER_WIDGET) return;
221 spin_lock_irqsave(&p->flush_lock, flags);
225 // force an interrupt.
227 *(bridgereg_t *)(p->force_int_addr) = 1;
229 // wait for the interrupt to come back.
231 while (p->flush_addr != 0x10f);
233 // okay, everything is synched up.
234 spin_unlock_irqrestore(&p->flush_lock, flags);
239 EXPORT_SYMBOL(sn_dma_flush);
242 * There are end cases where a deadlock can occur if interrupt
243 * processing completes and the Bridge b_int_status bit is still set.
245 * One scenerio is if a second PCI interrupt occurs within 60ns of
246 * the previous interrupt being cleared. In this case the Bridge
247 * does not detect the transition, the Bridge b_int_status bit
248 * remains set, and because no transition was detected no interrupt
249 * packet is sent to the Hub/Heart.
251 * A second scenerio is possible when a b_int_status bit is being
252 * shared by multiple devices:
253 * Device #1 generates interrupt
254 * Bridge b_int_status bit set
255 * Device #2 generates interrupt
256 * interrupt processing begins
257 * ISR for device #1 runs and
259 * Device #1 generates interrupt
260 * ISR for device #2 runs and
262 * (b_int_status bit still set)
263 * interrupt processing completes
265 * Interrupt processing is now complete, but an interrupt is still
266 * outstanding for Device #1. But because there was no transition of
267 * the b_int_status bit, no interrupt packet will be generated and
268 * a deadlock will occur.
270 * To avoid these deadlock situations, this function is used
271 * to check if a specific Bridge b_int_status bit is set, and if so,
272 * cause the setting of the corresponding interrupt bit.
274 * On a XBridge (SN1) and PIC (SN2), we do this by writing the appropriate Bridge Force
275 * Interrupt register.
278 pcibr_force_interrupt(pcibr_intr_t intr)
282 pcibr_soft_t pcibr_soft = intr->bi_soft;
283 bridge_t *bridge = pcibr_soft->bs_base;
285 bits = intr->bi_ibits;
286 for (bit = 0; bit < 8; bit++) {
287 if (bits & (1 << bit)) {
289 PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
290 "pcibr_force_interrupt: bit=0x%x\n", bit));
292 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
293 bridge->b_force_pin[bit].intr = 1;
301 pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
302 device_desc_t dev_desc,
303 pciio_intr_line_t lines,
304 vertex_hdl_t owner_dev)
306 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
307 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
308 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
309 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
310 bridge_t *bridge = pcibr_soft->bs_base;
313 xtalk_intr_t *xtalk_intr_p;
314 pcibr_intr_t *pcibr_intr_p;
315 pcibr_intr_list_t *intr_list_p;
317 unsigned pcibr_int_bits;
318 unsigned pcibr_int_bit;
319 xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
321 pcibr_intr_t pcibr_intr;
322 pcibr_intr_list_t intr_entry;
323 pcibr_intr_list_t intr_list;
327 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
328 "pcibr_intr_alloc: %s%s%s%s%s\n",
329 !(lines & 15) ? " No INTs?" : "",
330 lines & 1 ? " INTA" : "",
331 lines & 2 ? " INTB" : "",
332 lines & 4 ? " INTC" : "",
333 lines & 8 ? " INTD" : ""));
339 pcibr_intr->bi_dev = pconn_vhdl;
340 pcibr_intr->bi_lines = lines;
341 pcibr_intr->bi_soft = pcibr_soft;
342 pcibr_intr->bi_ibits = 0; /* bits will be added below */
343 pcibr_intr->bi_func = 0; /* unset until connect */
344 pcibr_intr->bi_arg = 0; /* unset until connect */
345 pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
346 pcibr_intr->bi_mustruncpu = CPU_NONE;
347 pcibr_intr->bi_ibuf.ib_in = 0;
348 pcibr_intr->bi_ibuf.ib_out = 0;
349 mutex_spinlock_init(&pcibr_intr->bi_ibuf.ib_lock);
350 pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines,
351 PCIBR_NUM_SLOTS(pcibr_soft));
355 * For each PCI interrupt line requested, figure
356 * out which Bridge PCI Interrupt Line it maps
357 * to, and make sure there are xtalk resources
360 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
361 "pcibr_intr_alloc: pcibr_int_bits: 0x%x\n", pcibr_int_bits));
362 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
363 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
364 xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
366 xtalk_intr = *xtalk_intr_p;
368 if (xtalk_intr == NULL) {
370 * This xtalk_intr_alloc is constrained for two reasons:
371 * 1) Normal interrupts and error interrupts need to be delivered
372 * through a single xtalk target widget so that there aren't any
373 * ordering problems with DMA, completion interrupts, and error
374 * interrupts. (Use of xconn_vhdl forces this.)
376 * 2) On SN1, addressing constraints on SN1 and Bridge force
377 * us to use a single PI number for all interrupts from a
378 * single Bridge. (SN1-specific code forces this).
382 * All code dealing with threaded PCI interrupt handlers
383 * is located at the pcibr level. Because of this,
384 * we always want the lower layers (hub/heart_intr_alloc,
385 * intr_level_connect) to treat us as non-threaded so we
386 * don't set up a duplicate threaded environment. We make
387 * this happen by calling a special xtalk interface.
389 xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc,
392 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
393 "pcibr_intr_alloc: xtalk_intr=0x%x\n", xtalk_intr));
395 /* both an assert and a runtime check on this:
396 * we need to check in non-DEBUG kernels, and
397 * the ASSERT gets us more information when
398 * we use DEBUG kernels.
400 ASSERT(xtalk_intr != NULL);
401 if (xtalk_intr == NULL) {
402 /* it is quite possible that our
403 * xtalk_intr_alloc failed because
404 * someone else got there first,
405 * and we can find their results
408 if (!*xtalk_intr_p) {
409 #ifdef SUPPORT_PRINTING_V_FORMAT
411 "pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
415 "pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
418 /* yes, we leak resources here. */
421 } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
423 * now tell the bridge which slot is
424 * using this interrupt line.
426 int_dev = bridge->b_int_device;
427 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
428 int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
429 bridge->b_int_device = int_dev; /* XXXMP */
431 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
432 "bridge intr bit %d clears my wrb\n",
435 /* someone else got one allocated first;
436 * free the one we just created, and
437 * retrieve the one they allocated.
439 xtalk_intr_free(xtalk_intr);
440 xtalk_intr = *xtalk_intr_p;
442 /* once xtalk_intr is set, we never clear it,
443 * so if the CAS fails above, this condition
444 * can "never happen" ...
448 "pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
450 /* yes, we leak resources here. */
457 pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
460 intr_entry->il_next = NULL;
461 intr_entry->il_intr = pcibr_intr;
462 intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
464 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
466 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
467 "Bridge bit 0x%x wrap=0x%x\n", pcibr_int_bit,
468 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap));
470 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
471 /* we are the first interrupt on this bridge bit.
473 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
474 "INT 0x%x (bridge bit %d) allocated [FIRST]\n",
475 pcibr_int_bits, pcibr_int_bit));
478 intr_list = *intr_list_p;
479 pcibr_intr_p = &intr_list->il_intr;
480 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
481 /* first entry on list was erased,
482 * and we replaced it, so we
483 * don't need our intr_entry.
486 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
487 "INT 0x%x (bridge bit %d) replaces erased first\n",
488 pcibr_int_bits, pcibr_int_bit));
491 intr_list_p = &intr_list->il_next;
492 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
493 /* we are the new second interrupt on this bit.
495 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
496 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
497 "INT 0x%x (bridge bit %d) is new SECOND\n",
498 pcibr_int_bits, pcibr_int_bit));
502 pcibr_intr_p = &intr_list->il_intr;
503 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
504 /* an entry on list was erased,
505 * and we replaced it, so we
506 * don't need our intr_entry.
510 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
511 "INT 0x%x (bridge bit %d) replaces erase Nth\n",
512 pcibr_int_bits, pcibr_int_bit));
515 intr_list_p = &intr_list->il_next;
516 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
517 /* entry appended to share list
519 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
520 "INT 0x%x (bridge bit %d) is new Nth\n",
521 pcibr_int_bits, pcibr_int_bit));
524 /* step to next record in chain
526 intr_list = *intr_list_p;
531 #if DEBUG && INTR_DEBUG
532 printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
534 hub_intr = (hub_intr_t)xtalk_intr;
535 pcibr_intr->bi_irq = hub_intr->i_bit;
536 pcibr_intr->bi_cpu = hub_intr->i_cpuid;
542 pcibr_intr_free(pcibr_intr_t pcibr_intr)
544 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
545 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
546 unsigned pcibr_int_bit;
547 pcibr_intr_list_t intr_list;
549 xtalk_intr_t *xtalk_intrp;
551 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
552 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
554 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
556 intr_list = intr_list->il_next)
557 if (compare_and_swap_ptr((void **) &intr_list->il_intr,
561 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC,
563 "pcibr_intr_free: cleared hdlr from bit 0x%x\n",
566 /* If this interrupt line is not being shared between multiple
567 * devices release the xtalk interrupt resources.
570 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
571 xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
573 if ((!intr_shared) && (*xtalk_intrp)) {
575 bridge_t *bridge = pcibr_soft->bs_base;
578 xtalk_intr_free(*xtalk_intrp);
581 /* Clear the PCI device interrupt to bridge interrupt pin
584 int_dev = bridge->b_int_device;
585 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
586 bridge->b_int_device = int_dev;
595 pcibr_setpciint(xtalk_intr_t xtalk_intr)
598 xtalk_intr_vector_t vect;
603 addr = xtalk_intr_addr_get(xtalk_intr);
604 vect = xtalk_intr_vector_get(xtalk_intr);
605 vhdl = xtalk_intr_dev_get(xtalk_intr);
606 bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
608 int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
609 *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
610 (PIC_INT_ADDR_HOST & addr));
615 pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
617 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
618 bridge_t *bridge = pcibr_soft->bs_base;
619 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
620 unsigned pcibr_int_bit;
624 if (pcibr_intr == NULL)
627 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
628 "pcibr_intr_connect: intr_func=0x%x\n",
631 pcibr_intr->bi_func = intr_func;
632 pcibr_intr->bi_arg = intr_arg;
633 *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
636 * For each PCI interrupt line requested, figure
637 * out which Bridge PCI Interrupt Line it maps
638 * to, and make sure there are xtalk resources
641 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
642 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
643 pcibr_intr_wrap_t intr_wrap;
644 xtalk_intr_t xtalk_intr;
647 xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
648 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
651 * If this interrupt line is being shared and the connect has
652 * already been done, no need to do it again.
654 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
659 * Use the pcibr wrapper function to handle all Bridge interrupts
660 * regardless of whether the interrupt line is shared or not.
662 if (IS_PIC_SOFT(pcibr_soft))
663 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
665 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
667 xtalk_intr_connect(xtalk_intr, pcibr_intr_func, (intr_arg_t) intr_wrap,
668 (xtalk_intr_setfunc_t) pcibr_setpciint,
671 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
673 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
674 "pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
675 "pcibr_int_bit=0x%x\n", int_addr,
676 *(picreg_t *)int_addr,
680 /* PIC WAR. PV# 854697
681 * On PIC we must write 64-bit MMRs with 64-bit stores
683 s = pcibr_lock(pcibr_soft);
684 if (IS_PIC_SOFT(pcibr_soft) &&
685 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
686 int_enable = bridge->p_int_enable_64;
687 int_enable |= pcibr_int_bits;
688 bridge->p_int_enable_64 = int_enable;
690 bridgereg_t int_enable;
692 int_enable = bridge->b_int_enable;
693 int_enable |= pcibr_int_bits;
694 bridge->b_int_enable = int_enable;
696 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
697 pcibr_unlock(pcibr_soft, s);
704 pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
706 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
707 bridge_t *bridge = pcibr_soft->bs_base;
708 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
709 unsigned pcibr_int_bit;
710 pcibr_intr_wrap_t intr_wrap;
714 /* Stop calling the function. Now.
716 *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
717 pcibr_intr->bi_func = 0;
718 pcibr_intr->bi_arg = 0;
720 * For each PCI interrupt line requested, figure
721 * out which Bridge PCI Interrupt Line it maps
722 * to, and disconnect the interrupt.
725 /* don't disable interrupts for lines that
726 * are shared between devices.
728 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
729 if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
730 (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
731 pcibr_int_bits &= ~(1 << pcibr_int_bit);
735 /* PIC WAR. PV# 854697
736 * On PIC we must write 64-bit MMRs with 64-bit stores
738 s = pcibr_lock(pcibr_soft);
739 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
740 int_enable = bridge->p_int_enable_64;
741 int_enable &= ~pcibr_int_bits;
742 bridge->p_int_enable_64 = int_enable;
744 int_enable = (uint64_t)bridge->b_int_enable;
745 int_enable &= ~pcibr_int_bits;
746 bridge->b_int_enable = (bridgereg_t)int_enable;
748 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
749 pcibr_unlock(pcibr_soft, s);
751 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
752 "pcibr_intr_disconnect: disabled int_bits=0x%x\n",
755 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
756 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
759 /* if the interrupt line is now shared,
760 * do not disconnect it.
762 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
765 xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
766 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
768 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
769 "pcibr_intr_disconnect: disconnect int_bits=0x%x\n",
772 /* if we are sharing the interrupt line,
773 * connect us up; this closes the hole
774 * where the another pcibr_intr_alloc()
775 * was in progress as we disconnected.
777 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
780 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
781 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
784 if (IS_PIC_SOFT(pcibr_soft))
785 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
787 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
789 xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
790 pcibr_intr_func, (intr_arg_t) intr_wrap,
791 (xtalk_intr_setfunc_t)pcibr_setpciint,
792 (void *)(long)pcibr_int_bit);
793 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
794 "pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
801 pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
803 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
804 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
805 unsigned pcibr_int_bit;
807 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
808 if (pcibr_int_bits & (1 << pcibr_int_bit))
809 return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
813 /* =====================================================================
817 pcibr_clearwidint(bridge_t *bridge)
819 bridge->b_wid_int_upper = 0;
820 bridge->b_wid_int_lower = 0;
825 pcibr_setwidint(xtalk_intr_t intr)
827 xwidgetnum_t targ = xtalk_intr_target_get(intr);
828 iopaddr_t addr = xtalk_intr_addr_get(intr);
829 xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
830 widgetreg_t NEW_b_wid_int_upper, NEW_b_wid_int_lower;
831 widgetreg_t OLD_b_wid_int_upper, OLD_b_wid_int_lower;
833 bridge_t *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
835 NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
836 XTALK_ADDR_TO_UPPER(addr));
837 NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
839 OLD_b_wid_int_upper = bridge->b_wid_int_upper;
840 OLD_b_wid_int_lower = bridge->b_wid_int_lower;
842 /* Verify that all interrupts from this Bridge are using a single PI */
843 if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
845 * Once set, these registers shouldn't change; they should
846 * be set multiple times with the same values.
848 * If we're attempting to change these registers, it means
849 * that our heuristics for allocating interrupts in a way
850 * appropriate for IP35 have failed, and the admin needs to
851 * explicitly direct some interrupts (or we need to make the
852 * heuristics more clever).
854 * In practice, we hope this doesn't happen very often, if
857 if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
858 (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
859 printk(KERN_WARNING "Interrupt allocation is too complex.\n");
860 printk(KERN_WARNING "Use explicit administrative interrupt targetting.\n");
861 printk(KERN_WARNING "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
862 printk(KERN_WARNING "NEW=0x%x/0x%x OLD=0x%x/0x%x\n",
863 NEW_b_wid_int_upper, NEW_b_wid_int_lower,
864 OLD_b_wid_int_upper, OLD_b_wid_int_lower);
865 PRINT_PANIC("PCI Bridge interrupt targetting error\n");
869 bridge->b_wid_int_upper = NEW_b_wid_int_upper;
870 bridge->b_wid_int_lower = NEW_b_wid_int_lower;
871 bridge->b_int_host_err = vect;
876 * pcibr_intr_preset: called during mlreset time
877 * if the platform specific code needs to route
878 * one of the Bridge's xtalk interrupts before the
879 * xtalk infrastructure is available.
882 pcibr_xintr_preset(void *which_widget,
883 int which_widget_intr,
886 xtalk_intr_vector_t vect)
888 bridge_t *bridge = (bridge_t *) which_widget;
890 if (which_widget_intr == -1) {
891 /* bridge widget error interrupt */
892 bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
893 XTALK_ADDR_TO_UPPER(addr));
894 bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
895 bridge->b_int_host_err = vect;
896 printk("pcibr_xintr_preset: b_wid_int_upper 0x%lx b_wid_int_lower 0x%lx b_int_host_err 0x%x\n",
897 ( (0x000F0000 & (targ << 16)) | XTALK_ADDR_TO_UPPER(addr)),
898 XTALK_ADDR_TO_LOWER(addr), vect);
900 /* turn on all interrupts except
901 * the PCI interrupt requests,
904 bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
907 /* routing a PCI device interrupt.
908 * targ and low 38 bits of addr must
909 * be the same as the already set
910 * value for the widget error interrupt.
912 bridge->b_int_addr[which_widget_intr].addr =
913 ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
914 (BRIDGE_INT_ADDR_FLD & vect));
916 * now bridge can let it through;
917 * NB: still should be blocked at
918 * xtalk provider end, until the service
921 bridge->b_int_enable |= 1 << vect;
923 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
930 * This is the pcibr interrupt "wrapper" function that is called,
931 * in interrupt context, to initiate the interrupt handler(s) registered
932 * (via pcibr_intr_alloc/connect) for the occurring interrupt. Non-threaded
933 * handlers will be called directly, and threaded handlers will have their
937 pcibr_intr_func(intr_arg_t arg)
939 pcibr_intr_wrap_t wrap = (pcibr_intr_wrap_t) arg;
943 pcibr_intr_list_t list;
945 int do_nonthreaded = 1;
948 pcibr_soft_t pcibr_soft = wrap->iw_soft;
949 bridge_t *bridge = pcibr_soft->bs_base;
950 uint64_t p_enable = pcibr_soft->bs_int_enable;
951 int bit = wrap->iw_ibit;
955 * Early attempt at a workaround for the runaway
956 * interrupt problem. Briefly disable the enable bit for
959 if (IS_PIC_SOFT(pcibr_soft) &&
960 PCIBR_WAR_ENABLED(PV855272, pcibr_soft)) {
963 /* disable-enable interrupts for this bridge pin */
965 p_enable &= ~(1 << bit);
966 s = pcibr_lock(pcibr_soft);
967 bridge->p_int_enable_64 = p_enable;
968 p_enable |= (1 << bit);
969 bridge->p_int_enable_64 = p_enable;
970 pcibr_unlock(pcibr_soft, s);
974 * If any handler is still running from a previous interrupt
975 * just return. If there's a need to call the handler(s) again,
976 * another interrupt will be generated either by the device or by
977 * pcibr_force_interrupt().
980 if (wrap->iw_hdlrcnt) {
985 * Call all interrupt handlers registered.
986 * First, the pcibr_intrd threads for any threaded handlers will be
987 * awoken, then any non-threaded handlers will be called sequentially.
991 while (do_nonthreaded) {
992 for (list = wrap->iw_list; list != NULL; list = list->il_next) {
993 if ((intr = list->il_intr) && (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
996 * This device may have initiated write
997 * requests since the bridge last saw
998 * an edge on this interrupt input; flushing
999 * the buffer prior to invoking the handler
1000 * should help but may not be sufficient if we
1001 * get more requests after the flush, followed
1002 * by the card deciding it wants service, before
1003 * the interrupt handler checks to see if things need
1006 * There is a similar race condition if
1007 * an interrupt handler loops around and
1008 * notices further service is required.
1009 * Perhaps we need to have an explicit
1010 * call that interrupt handlers need to
1011 * do between noticing that DMA to memory
1012 * has completed, but before observing the
1013 * contents of memory?
1016 if ((do_nonthreaded) && (!is_threaded)) {
1017 /* Non-threaded - Call the interrupt handler at interrupt level */
1018 /* Only need to flush write buffers if sharing */
1020 if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
1021 if ((x = *wrbf)) /* write request buffer flush */
1022 #ifdef SUPPORT_PRINTING_V_FORMAT
1023 printk(KERN_ALERT "pcibr_intr_func %v: \n"
1024 "write buffer flush failed, wrbf=0x%x\n",
1025 list->il_intr->bi_dev, wrbf);
1027 printk(KERN_ALERT "pcibr_intr_func %p: \n"
1028 "write buffer flush failed, wrbf=0x%lx\n",
1029 (void *)list->il_intr->bi_dev, (long) wrbf);
1032 func = intr->bi_func;
1042 * If the non-threaded handler was the last to complete,
1043 * (i.e., no threaded handlers still running) force an
1044 * interrupt to avoid a potential deadlock situation.
1046 if (wrap->iw_hdlrcnt == 0) {
1047 pcibr_force_interrupt((pcibr_intr_t) wrap);
1051 /* If there were no handlers,
1052 * disable the interrupt and return.
1053 * It will get enabled again after
1054 * a handler is connected.
1055 * If we don't do this, we would
1056 * sit here and spin through the
1060 pcibr_soft_t pcibr_soft = wrap->iw_soft;
1061 bridge_t *bridge = pcibr_soft->bs_base;
1062 bridgereg_t int_enable;
1063 bridgereg_t mask = 1 << wrap->iw_ibit;
1066 /* PIC BRINUGP WAR (PV# 854697):
1067 * On PIC we must write 64-bit MMRs with 64-bit stores
1069 s = pcibr_lock(pcibr_soft);
1070 if (IS_PIC_SOFT(pcibr_soft) &&
1071 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
1072 int_enable = bridge->p_int_enable_64;
1073 int_enable &= ~mask;
1074 bridge->p_int_enable_64 = int_enable;
1076 int_enable = (uint64_t)bridge->b_int_enable;
1077 int_enable &= ~mask;
1078 bridge->b_int_enable = (bridgereg_t)int_enable;
1080 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
1081 pcibr_unlock(pcibr_soft, s);