3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <asm/sn/sgi.h>
14 #include <asm/sn/sn_cpuid.h>
15 #include <asm/sn/addrs.h>
16 #include <asm/sn/arch.h>
17 #include <asm/sn/iograph.h>
18 #include <asm/sn/invent.h>
19 #include <asm/sn/hcl.h>
20 #include <asm/sn/labelcl.h>
21 #include <asm/sn/xtalk/xwidget.h>
22 #include <asm/sn/pci/bridge.h>
23 #include <asm/sn/pci/pciio.h>
24 #include <asm/sn/pci/pcibr.h>
25 #include <asm/sn/pci/pcibr_private.h>
26 #include <asm/sn/pci/pci_defs.h>
27 #include <asm/sn/prio.h>
28 #include <asm/sn/xtalk/xbow.h>
29 #include <asm/sn/ioc3.h>
30 #include <asm/sn/eeprom.h>
31 #include <asm/sn/io.h>
32 #include <asm/sn/sn_private.h>
35 #define rmallocmap atemapalloc
36 #define rmfreemap atemapfree
37 #define rmfree atefree
38 #define rmalloc atealloc
41 unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
42 pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
43 void pcibr_intr_free(pcibr_intr_t);
44 void pcibr_setpciint(xtalk_intr_t);
45 int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
46 void pcibr_intr_disconnect(pcibr_intr_t);
48 devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
49 void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
50 void pcibr_intr_func(intr_arg_t);
52 extern pcibr_info_t pcibr_info_get(devfs_handle_t);
54 /* =====================================================================
55 * INTERRUPT MANAGEMENT
59 pcibr_intr_bits(pciio_info_t info,
60 pciio_intr_line_t lines, int nslots)
62 pciio_slot_t slot = PCIBR_INFO_SLOT_GET_INT(info);
66 * Currently favored mapping from PCI
67 * slot number and INTA/B/C/D to Bridge
68 * PCI Interrupt Bit Number:
82 if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
84 if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
85 bbits |= 1 << (slot ^ 4);
92 * Get the next wrapper pointer queued in the interrupt circular buffer.
95 pcibr_wrap_get(pcibr_intr_cbuf_t cbuf)
97 pcibr_intr_wrap_t wrap;
99 if (cbuf->ib_in == cbuf->ib_out)
100 PRINT_PANIC( "pcibr intr circular buffer empty, cbuf=0x%p, ib_in=ib_out=%d\n",
101 (void *)cbuf, cbuf->ib_out);
103 wrap = cbuf->ib_cbuf[cbuf->ib_out++];
104 cbuf->ib_out = cbuf->ib_out % IBUFSIZE;
109 * Queue a wrapper pointer in the interrupt circular buffer.
112 pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
118 * Multiple CPUs could be executing this code simultaneously
119 * if a handler has registered multiple interrupt lines and
120 * the interrupts are directed to different CPUs.
122 s = mutex_spinlock(&cbuf->ib_lock);
123 in = (cbuf->ib_in + 1) % IBUFSIZE;
124 if (in == cbuf->ib_out)
125 PRINT_PANIC( "pcibr intr circular buffer full, cbuf=0x%p, ib_in=%d\n",
126 (void *)cbuf, cbuf->ib_in);
128 cbuf->ib_cbuf[cbuf->ib_in] = wrap;
130 mutex_spinunlock(&cbuf->ib_lock, s);
135 * There are end cases where a deadlock can occur if interrupt
136 * processing completes and the Bridge b_int_status bit is still set.
138 * One scenerio is if a second PCI interrupt occurs within 60ns of
139 * the previous interrupt being cleared. In this case the Bridge
140 * does not detect the transition, the Bridge b_int_status bit
141 * remains set, and because no transition was detected no interrupt
142 * packet is sent to the Hub/Heart.
144 * A second scenerio is possible when a b_int_status bit is being
145 * shared by multiple devices:
146 * Device #1 generates interrupt
147 * Bridge b_int_status bit set
148 * Device #2 generates interrupt
149 * interrupt processing begins
150 * ISR for device #1 runs and
152 * Device #1 generates interrupt
153 * ISR for device #2 runs and
155 * (b_int_status bit still set)
156 * interrupt processing completes
158 * Interrupt processing is now complete, but an interrupt is still
159 * outstanding for Device #1. But because there was no transition of
160 * the b_int_status bit, no interrupt packet will be generated and
161 * a deadlock will occur.
163 * To avoid these deadlock situations, this function is used
164 * to check if a specific Bridge b_int_status bit is set, and if so,
165 * cause the setting of the corresponding interrupt bit.
167 * On a XBridge (SN1), we do this by writing the appropriate Bridge Force
168 * Interrupt register. On SN0, or SN1 with an older Bridge, the Bridge
169 * Force Interrupt register does not exist, so we write the Hub
170 * INT_PEND_MOD register directly. Likewise for Octane, where we write the
171 * Heart Set Interrupt Status register directly.
174 pcibr_force_interrupt(pcibr_intr_wrap_t wrap)
178 pcibr_soft_t pcibr_soft = wrap->iw_soft;
179 bridge_t *bridge = pcibr_soft->bs_base;
183 PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
184 "pcibr_force_interrupt: bit=0x%x\n", bit));
186 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
187 bridge->b_force_pin[bit].intr = 1;
188 } else if ((1 << bit) & *wrap->iw_stat) {
191 xtalk_intr_t xtalk_intr =
192 pcibr_soft->bs_intr[bit].bsi_xtalk_intr;
194 intr_bit = (short) xtalk_intr_vector_get(xtalk_intr);
195 cpu = xtalk_intr_cpuid_get(xtalk_intr);
196 REMOTE_CPU_SEND_INTR(cpu, intr_bit);
198 #endif /* PIC_LATER */
203 pcibr_intr_alloc(devfs_handle_t pconn_vhdl,
204 device_desc_t dev_desc,
205 pciio_intr_line_t lines,
206 devfs_handle_t owner_dev)
208 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
209 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
210 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
211 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
212 bridge_t *bridge = pcibr_soft->bs_base;
215 xtalk_intr_t *xtalk_intr_p;
216 pcibr_intr_t *pcibr_intr_p;
217 pcibr_intr_list_t *intr_list_p;
219 unsigned pcibr_int_bits;
220 unsigned pcibr_int_bit;
221 xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
223 pcibr_intr_t pcibr_intr;
224 pcibr_intr_list_t intr_entry;
225 pcibr_intr_list_t intr_list;
229 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
230 "pcibr_intr_alloc: %s%s%s%s%s\n",
231 !(lines & 15) ? " No INTs?" : "",
232 lines & 1 ? " INTA" : "",
233 lines & 2 ? " INTB" : "",
234 lines & 4 ? " INTC" : "",
235 lines & 8 ? " INTD" : ""));
241 pcibr_intr->bi_dev = pconn_vhdl;
242 pcibr_intr->bi_lines = lines;
243 pcibr_intr->bi_soft = pcibr_soft;
244 pcibr_intr->bi_ibits = 0; /* bits will be added below */
245 pcibr_intr->bi_func = 0; /* unset until connect */
246 pcibr_intr->bi_arg = 0; /* unset until connect */
247 pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
248 pcibr_intr->bi_mustruncpu = CPU_NONE;
249 pcibr_intr->bi_ibuf.ib_in = 0;
250 pcibr_intr->bi_ibuf.ib_out = 0;
251 mutex_spinlock_init(&pcibr_intr->bi_ibuf.ib_lock);
252 pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines,
253 PCIBR_NUM_SLOTS(pcibr_soft));
257 * For each PCI interrupt line requested, figure
258 * out which Bridge PCI Interrupt Line it maps
259 * to, and make sure there are xtalk resources
262 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
263 "pcibr_intr_alloc: pcibr_int_bits: 0x%x\n", pcibr_int_bits));
264 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
265 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
266 xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
268 xtalk_intr = *xtalk_intr_p;
270 if (xtalk_intr == NULL) {
272 * This xtalk_intr_alloc is constrained for two reasons:
273 * 1) Normal interrupts and error interrupts need to be delivered
274 * through a single xtalk target widget so that there aren't any
275 * ordering problems with DMA, completion interrupts, and error
276 * interrupts. (Use of xconn_vhdl forces this.)
278 * 2) On SN1, addressing constraints on SN1 and Bridge force
279 * us to use a single PI number for all interrupts from a
280 * single Bridge. (SN1-specific code forces this).
284 * All code dealing with threaded PCI interrupt handlers
285 * is located at the pcibr level. Because of this,
286 * we always want the lower layers (hub/heart_intr_alloc,
287 * intr_level_connect) to treat us as non-threaded so we
288 * don't set up a duplicate threaded environment. We make
289 * this happen by calling a special xtalk interface.
291 xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc,
294 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
295 "pcibr_intr_alloc: xtalk_intr=0x%x\n", xtalk_intr));
297 /* both an assert and a runtime check on this:
298 * we need to check in non-DEBUG kernels, and
299 * the ASSERT gets us more information when
300 * we use DEBUG kernels.
302 ASSERT(xtalk_intr != NULL);
303 if (xtalk_intr == NULL) {
304 /* it is quite possible that our
305 * xtalk_intr_alloc failed because
306 * someone else got there first,
307 * and we can find their results
310 if (!*xtalk_intr_p) {
311 #ifdef SUPPORT_PRINTING_V_FORMAT
313 "pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
317 "pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
320 /* yes, we leak resources here. */
323 } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
325 * now tell the bridge which slot is
326 * using this interrupt line.
328 int_dev = bridge->b_int_device;
329 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
330 int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
331 bridge->b_int_device = int_dev; /* XXXMP */
333 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
334 "bridge intr bit %d clears my wrb\n",
337 /* someone else got one allocated first;
338 * free the one we just created, and
339 * retrieve the one they allocated.
341 xtalk_intr_free(xtalk_intr);
342 xtalk_intr = *xtalk_intr_p;
344 /* once xtalk_intr is set, we never clear it,
345 * so if the CAS fails above, this condition
346 * can "never happen" ...
350 "pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
352 /* yes, we leak resources here. */
359 pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
362 intr_entry->il_next = NULL;
363 intr_entry->il_intr = pcibr_intr;
364 intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
366 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
368 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
369 "Bridge bit 0x%x wrap=0x%x\n", pcibr_int_bit,
370 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap));
372 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
373 /* we are the first interrupt on this bridge bit.
375 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
376 "INT 0x%x (bridge bit %d) allocated [FIRST]\n",
377 pcibr_int_bits, pcibr_int_bit));
380 intr_list = *intr_list_p;
381 pcibr_intr_p = &intr_list->il_intr;
382 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
383 /* first entry on list was erased,
384 * and we replaced it, so we
385 * don't need our intr_entry.
388 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
389 "INT 0x%x (bridge bit %d) replaces erased first\n",
390 pcibr_int_bits, pcibr_int_bit));
393 intr_list_p = &intr_list->il_next;
394 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
395 /* we are the new second interrupt on this bit.
397 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
398 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
399 "INT 0x%x (bridge bit %d) is new SECOND\n",
400 pcibr_int_bits, pcibr_int_bit));
404 pcibr_intr_p = &intr_list->il_intr;
405 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
406 /* an entry on list was erased,
407 * and we replaced it, so we
408 * don't need our intr_entry.
412 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
413 "INT 0x%x (bridge bit %d) replaces erase Nth\n",
414 pcibr_int_bits, pcibr_int_bit));
417 intr_list_p = &intr_list->il_next;
418 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
419 /* entry appended to share list
421 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
422 "INT 0x%x (bridge bit %d) is new Nth\n",
423 pcibr_int_bits, pcibr_int_bit));
426 /* step to next record in chain
428 intr_list = *intr_list_p;
433 #if DEBUG && INTR_DEBUG
434 printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
436 hub_intr = (hub_intr_t)xtalk_intr;
437 pcibr_intr->bi_irq = hub_intr->i_bit;
438 pcibr_intr->bi_cpu = hub_intr->i_cpuid;
444 pcibr_intr_free(pcibr_intr_t pcibr_intr)
446 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
447 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
448 unsigned pcibr_int_bit;
449 pcibr_intr_list_t intr_list;
451 xtalk_intr_t *xtalk_intrp;
453 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
454 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
456 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
458 intr_list = intr_list->il_next)
459 if (compare_and_swap_ptr((void **) &intr_list->il_intr,
463 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC,
465 "pcibr_intr_free: cleared hdlr from bit 0x%x\n",
468 /* If this interrupt line is not being shared between multiple
469 * devices release the xtalk interrupt resources.
472 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
473 xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
475 if ((!intr_shared) && (*xtalk_intrp)) {
477 bridge_t *bridge = pcibr_soft->bs_base;
480 xtalk_intr_free(*xtalk_intrp);
483 /* Clear the PCI device interrupt to bridge interrupt pin
486 int_dev = bridge->b_int_device;
487 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
488 bridge->b_int_device = int_dev;
497 pcibr_setpciint(xtalk_intr_t xtalk_intr)
500 xtalk_intr_vector_t vect;
504 addr = xtalk_intr_addr_get(xtalk_intr);
505 vect = xtalk_intr_vector_get(xtalk_intr);
506 vhdl = xtalk_intr_dev_get(xtalk_intr);
507 bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
509 if (is_pic(bridge)) {
511 int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
512 *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
513 (PIC_INT_ADDR_HOST & addr));
515 bridgereg_t *int_addr;
516 int_addr = (bridgereg_t *)xtalk_intr_sfarg_get(xtalk_intr);
517 *int_addr = ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
518 (BRIDGE_INT_ADDR_FLD & vect));
524 pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
526 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
527 bridge_t *bridge = pcibr_soft->bs_base;
528 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
529 unsigned pcibr_int_bit;
533 if (pcibr_intr == NULL)
536 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
537 "pcibr_intr_connect: intr_func=0x%x\n",
540 pcibr_intr->bi_func = intr_func;
541 pcibr_intr->bi_arg = intr_arg;
542 *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
545 * For each PCI interrupt line requested, figure
546 * out which Bridge PCI Interrupt Line it maps
547 * to, and make sure there are xtalk resources
550 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
551 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
552 pcibr_intr_wrap_t intr_wrap;
553 xtalk_intr_t xtalk_intr;
556 xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
557 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
560 * If this interrupt line is being shared and the connect has
561 * already been done, no need to do it again.
563 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
568 * Use the pcibr wrapper function to handle all Bridge interrupts
569 * regardless of whether the interrupt line is shared or not.
571 if (IS_PIC_SOFT(pcibr_soft))
572 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
574 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
576 xtalk_intr_connect(xtalk_intr, pcibr_intr_func, (intr_arg_t) intr_wrap,
577 (xtalk_intr_setfunc_t) pcibr_setpciint,
580 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
582 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
583 "pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
584 "pcibr_int_bit=0x%x\n", int_addr,
586 *(picreg_t *)int_addr : *(bridgereg_t *)int_addr),
590 /* PIC WAR. PV# 854697
591 * On PIC we must write 64-bit MMRs with 64-bit stores
593 s = pcibr_lock(pcibr_soft);
594 if (IS_PIC_SOFT(pcibr_soft) &&
595 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
596 int_enable = bridge->p_int_enable_64;
597 int_enable |= pcibr_int_bits;
598 bridge->p_int_enable_64 = int_enable;
600 bridgereg_t int_enable;
602 int_enable = bridge->b_int_enable;
603 int_enable |= pcibr_int_bits;
604 bridge->b_int_enable = int_enable;
606 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
607 pcibr_unlock(pcibr_soft, s);
614 pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
616 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
617 bridge_t *bridge = pcibr_soft->bs_base;
618 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
619 unsigned pcibr_int_bit;
620 pcibr_intr_wrap_t intr_wrap;
624 /* Stop calling the function. Now.
626 *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
627 pcibr_intr->bi_func = 0;
628 pcibr_intr->bi_arg = 0;
630 * For each PCI interrupt line requested, figure
631 * out which Bridge PCI Interrupt Line it maps
632 * to, and disconnect the interrupt.
635 /* don't disable interrupts for lines that
636 * are shared between devices.
638 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
639 if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
640 (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
641 pcibr_int_bits &= ~(1 << pcibr_int_bit);
645 /* PIC WAR. PV# 854697
646 * On PIC we must write 64-bit MMRs with 64-bit stores
648 s = pcibr_lock(pcibr_soft);
649 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
650 int_enable = bridge->p_int_enable_64;
651 int_enable &= ~pcibr_int_bits;
652 bridge->p_int_enable_64 = int_enable;
654 int_enable = (uint64_t)bridge->b_int_enable;
655 int_enable &= ~pcibr_int_bits;
656 bridge->b_int_enable = (bridgereg_t)int_enable;
658 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
659 pcibr_unlock(pcibr_soft, s);
661 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
662 "pcibr_intr_disconnect: disabled int_bits=0x%x\n",
665 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
666 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
669 /* if the interrupt line is now shared,
670 * do not disconnect it.
672 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
675 xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
676 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
678 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
679 "pcibr_intr_disconnect: disconnect int_bits=0x%x\n",
682 /* if we are sharing the interrupt line,
683 * connect us up; this closes the hole
684 * where the another pcibr_intr_alloc()
685 * was in progress as we disconnected.
687 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
690 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
691 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
694 if (IS_PIC_SOFT(pcibr_soft))
695 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
697 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
699 xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
700 pcibr_intr_func, (intr_arg_t) intr_wrap,
701 (xtalk_intr_setfunc_t)pcibr_setpciint,
702 (void *)pcibr_int_bit);
703 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
704 "pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
711 pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
713 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
714 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
715 unsigned pcibr_int_bit;
717 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
718 if (pcibr_int_bits & (1 << pcibr_int_bit))
719 return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
723 /* =====================================================================
727 pcibr_clearwidint(bridge_t *bridge)
729 bridge->b_wid_int_upper = 0;
730 bridge->b_wid_int_lower = 0;
735 pcibr_setwidint(xtalk_intr_t intr)
737 xwidgetnum_t targ = xtalk_intr_target_get(intr);
738 iopaddr_t addr = xtalk_intr_addr_get(intr);
739 xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
740 widgetreg_t NEW_b_wid_int_upper, NEW_b_wid_int_lower;
741 widgetreg_t OLD_b_wid_int_upper, OLD_b_wid_int_lower;
743 bridge_t *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
745 NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
746 XTALK_ADDR_TO_UPPER(addr));
747 NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
749 OLD_b_wid_int_upper = bridge->b_wid_int_upper;
750 OLD_b_wid_int_lower = bridge->b_wid_int_lower;
752 /* Verify that all interrupts from this Bridge are using a single PI */
753 if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
755 * Once set, these registers shouldn't change; they should
756 * be set multiple times with the same values.
758 * If we're attempting to change these registers, it means
759 * that our heuristics for allocating interrupts in a way
760 * appropriate for IP35 have failed, and the admin needs to
761 * explicitly direct some interrupts (or we need to make the
762 * heuristics more clever).
764 * In practice, we hope this doesn't happen very often, if
767 if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
768 (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
769 printk(KERN_WARNING "Interrupt allocation is too complex.\n");
770 printk(KERN_WARNING "Use explicit administrative interrupt targetting.\n");
771 printk(KERN_WARNING "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
772 printk(KERN_WARNING "NEW=0x%x/0x%x OLD=0x%x/0x%x\n",
773 NEW_b_wid_int_upper, NEW_b_wid_int_lower,
774 OLD_b_wid_int_upper, OLD_b_wid_int_lower);
775 PRINT_PANIC("PCI Bridge interrupt targetting error\n");
779 bridge->b_wid_int_upper = NEW_b_wid_int_upper;
780 bridge->b_wid_int_lower = NEW_b_wid_int_lower;
781 bridge->b_int_host_err = vect;
783 printk("pcibr_setwidint: b_wid_int_upper 0x%x b_wid_int_lower 0x%x b_int_host_err 0x%x\n",
784 NEW_b_wid_int_upper, NEW_b_wid_int_lower, vect);
789 * pcibr_intr_preset: called during mlreset time
790 * if the platform specific code needs to route
791 * one of the Bridge's xtalk interrupts before the
792 * xtalk infrastructure is available.
795 pcibr_xintr_preset(void *which_widget,
796 int which_widget_intr,
799 xtalk_intr_vector_t vect)
801 bridge_t *bridge = (bridge_t *) which_widget;
803 if (which_widget_intr == -1) {
804 /* bridge widget error interrupt */
805 bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
806 XTALK_ADDR_TO_UPPER(addr));
807 bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
808 bridge->b_int_host_err = vect;
809 printk("pcibr_xintr_preset: b_wid_int_upper 0x%lx b_wid_int_lower 0x%lx b_int_host_err 0x%x\n",
810 ( (0x000F0000 & (targ << 16)) | XTALK_ADDR_TO_UPPER(addr)),
811 XTALK_ADDR_TO_LOWER(addr), vect);
813 /* turn on all interrupts except
814 * the PCI interrupt requests,
817 bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
820 /* routing a PCI device interrupt.
821 * targ and low 38 bits of addr must
822 * be the same as the already set
823 * value for the widget error interrupt.
825 bridge->b_int_addr[which_widget_intr].addr =
826 ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
827 (BRIDGE_INT_ADDR_FLD & vect));
829 * now bridge can let it through;
830 * NB: still should be blocked at
831 * xtalk provider end, until the service
834 bridge->b_int_enable |= 1 << vect;
836 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
843 * This is the pcibr interrupt "wrapper" function that is called,
844 * in interrupt context, to initiate the interrupt handler(s) registered
845 * (via pcibr_intr_alloc/connect) for the occurring interrupt. Non-threaded
846 * handlers will be called directly, and threaded handlers will have their
850 pcibr_intr_func(intr_arg_t arg)
852 pcibr_intr_wrap_t wrap = (pcibr_intr_wrap_t) arg;
856 pcibr_intr_list_t list;
858 int do_nonthreaded = 1;
861 pcibr_soft_t pcibr_soft = wrap->iw_soft;
862 bridge_t *bridge = pcibr_soft->bs_base;
863 uint64_t p_enable = pcibr_soft->bs_int_enable;
864 int bit = wrap->iw_ibit;
868 * Early attempt at a workaround for the runaway
869 * interrupt problem. Briefly disable the enable bit for
872 if (IS_PIC_SOFT(pcibr_soft) &&
873 PCIBR_WAR_ENABLED(PV855272, pcibr_soft)) {
876 /* disable-enable interrupts for this bridge pin */
878 p_enable &= ~(1 << bit);
879 s = pcibr_lock(pcibr_soft);
880 bridge->p_int_enable_64 = p_enable;
881 p_enable |= (1 << bit);
882 bridge->p_int_enable_64 = p_enable;
883 pcibr_unlock(pcibr_soft, s);
887 * If any handler is still running from a previous interrupt
888 * just return. If there's a need to call the handler(s) again,
889 * another interrupt will be generated either by the device or by
890 * pcibr_force_interrupt().
893 if (wrap->iw_hdlrcnt) {
898 * Call all interrupt handlers registered.
899 * First, the pcibr_intrd threads for any threaded handlers will be
900 * awoken, then any non-threaded handlers will be called sequentially.
904 while (do_nonthreaded) {
905 for (list = wrap->iw_list; list != NULL; list = list->il_next) {
906 if ((intr = list->il_intr) && (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
909 * This device may have initiated write
910 * requests since the bridge last saw
911 * an edge on this interrupt input; flushing
912 * the buffer prior to invoking the handler
913 * should help but may not be sufficient if we
914 * get more requests after the flush, followed
915 * by the card deciding it wants service, before
916 * the interrupt handler checks to see if things need
919 * There is a similar race condition if
920 * an interrupt handler loops around and
921 * notices further service is required.
922 * Perhaps we need to have an explicit
923 * call that interrupt handlers need to
924 * do between noticing that DMA to memory
925 * has completed, but before observing the
926 * contents of memory?
929 if ((do_nonthreaded) && (!is_threaded)) {
930 /* Non-threaded - Call the interrupt handler at interrupt level */
931 /* Only need to flush write buffers if sharing */
933 if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
934 if ((x = *wrbf)) /* write request buffer flush */
935 #ifdef SUPPORT_PRINTING_V_FORMAT
936 printk(KERN_ALERT "pcibr_intr_func %v: \n"
937 "write buffer flush failed, wrbf=0x%x\n",
938 list->il_intr->bi_dev, wrbf);
940 printk(KERN_ALERT "pcibr_intr_func %p: \n"
941 "write buffer flush failed, wrbf=0x%lx\n",
942 (void *)list->il_intr->bi_dev, (long) wrbf);
945 func = intr->bi_func;
955 * If the non-threaded handler was the last to complete,
956 * (i.e., no threaded handlers still running) force an
957 * interrupt to avoid a potential deadlock situation.
959 if (wrap->iw_hdlrcnt == 0) {
960 pcibr_force_interrupt(wrap);
964 /* If there were no handlers,
965 * disable the interrupt and return.
966 * It will get enabled again after
967 * a handler is connected.
968 * If we don't do this, we would
969 * sit here and spin through the
973 pcibr_soft_t pcibr_soft = wrap->iw_soft;
974 bridge_t *bridge = pcibr_soft->bs_base;
975 bridgereg_t int_enable;
976 bridgereg_t mask = 1 << wrap->iw_ibit;
979 /* PIC BRINUGP WAR (PV# 854697):
980 * On PIC we must write 64-bit MMRs with 64-bit stores
982 s = pcibr_lock(pcibr_soft);
983 if (IS_PIC_SOFT(pcibr_soft) &&
984 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
985 int_enable = bridge->p_int_enable_64;
987 bridge->p_int_enable_64 = int_enable;
989 int_enable = (uint64_t)bridge->b_int_enable;
991 bridge->b_int_enable = (bridgereg_t)int_enable;
993 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
994 pcibr_unlock(pcibr_soft, s);