2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <asm/sn/sgi.h>
12 #include <asm/sn/arch.h>
13 #include <asm/sn/iograph.h>
14 #include <asm/sn/pci/pciio.h>
15 #include <asm/sn/pci/pcibr.h>
16 #include <asm/sn/pci/pcibr_private.h>
17 #include <asm/sn/pci/pci_defs.h>
18 #include <asm/sn/io.h>
19 #include <asm/sn/sn_private.h>
23 compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
25 /* FIXME - compare_and_swap_ptr NOT ATOMIC */
26 if (*location == old_ptr) {
35 unsigned int pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
36 pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
37 void pcibr_intr_free(pcibr_intr_t);
38 void pcibr_setpciint(xtalk_intr_t);
39 int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
40 void pcibr_intr_disconnect(pcibr_intr_t);
42 vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
44 extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
46 /* =====================================================================
47 * INTERRUPT MANAGEMENT
51 pcibr_intr_bits(pciio_info_t info,
52 pciio_intr_line_t lines, int nslots)
54 pciio_slot_t slot = PCIBR_INFO_SLOT_GET_INT(info);
58 * Currently favored mapping from PCI
59 * slot number and INTA/B/C/D to Bridge
60 * PCI Interrupt Bit Number:
74 if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
76 if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
77 bbits |= 1 << (slot ^ 4);
84 * On SN systems there is a race condition between a PIO read response
85 * and DMA's. In rare cases, the read response may beat the DMA, causing
86 * the driver to think that data in memory is complete and meaningful.
87 * This code eliminates that race.
88 * This routine is called by the PIO read routines after doing the read.
89 * This routine then forces a fake interrupt on another line, which
90 * is logically associated with the slot that the PIO is addressed to.
91 * (see sn_dma_flush_init() )
92 * It then spins while watching the memory location that the interrupt
93 * is targetted to. When the interrupt response arrives, we are sure
94 * that the DMA has landed in memory and it is safe for the driver
98 extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
101 sn_dma_flush(unsigned long addr)
105 volatile struct sn_flush_device_list *p;
110 nasid = NASID_GET(addr);
111 wid_num = SWIN_WIDGETNUM(addr);
112 bwin = BWIN_WINDOWNUM(addr);
114 if (flush_nasid_list[nasid].widget_p == NULL) return;
116 unsigned long itte = flush_nasid_list[nasid].iio_itte[bwin];
118 wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
119 IIO_ITTE_WIDGET_MASK;
121 if (flush_nasid_list[nasid].widget_p == NULL) return;
122 if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) return;
123 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
125 /* find a matching BAR */
127 for (i=0; i<DEV_PER_WIDGET;i++) {
128 for (j=0; j<PCI_ROM_RESOURCE;j++) {
129 if (p->bar_list[j].start == 0) break;
130 if (addr >= p->bar_list[j].start && addr <= p->bar_list[j].end) break;
132 if (j < PCI_ROM_RESOURCE && p->bar_list[j].start != 0) break;
136 /* if no matching BAR, return without doing anything. */
138 if (i == DEV_PER_WIDGET) return;
140 spin_lock_irqsave(&p->flush_lock, flags);
144 /* force an interrupt. */
146 *(volatile uint32_t *)(p->force_int_addr) = 1;
148 /* wait for the interrupt to come back. */
150 while (p->flush_addr != 0x10f);
152 /* okay, everything is synched up. */
153 spin_unlock_irqrestore(&p->flush_lock, flags);
156 EXPORT_SYMBOL(sn_dma_flush);
159 * There are end cases where a deadlock can occur if interrupt
160 * processing completes and the Bridge b_int_status bit is still set.
162 * One scenerio is if a second PCI interrupt occurs within 60ns of
163 * the previous interrupt being cleared. In this case the Bridge
164 * does not detect the transition, the Bridge b_int_status bit
165 * remains set, and because no transition was detected no interrupt
166 * packet is sent to the Hub/Heart.
168 * A second scenerio is possible when a b_int_status bit is being
169 * shared by multiple devices:
170 * Device #1 generates interrupt
171 * Bridge b_int_status bit set
172 * Device #2 generates interrupt
173 * interrupt processing begins
174 * ISR for device #1 runs and
176 * Device #1 generates interrupt
177 * ISR for device #2 runs and
179 * (b_int_status bit still set)
180 * interrupt processing completes
182 * Interrupt processing is now complete, but an interrupt is still
183 * outstanding for Device #1. But because there was no transition of
184 * the b_int_status bit, no interrupt packet will be generated and
185 * a deadlock will occur.
187 * To avoid these deadlock situations, this function is used
188 * to check if a specific Bridge b_int_status bit is set, and if so,
189 * cause the setting of the corresponding interrupt bit.
191 * On a XBridge (SN1) and PIC (SN2), we do this by writing the appropriate Bridge Force
192 * Interrupt register.
195 pcibr_force_interrupt(pcibr_intr_t intr)
199 pcibr_soft_t pcibr_soft = intr->bi_soft;
201 bits = intr->bi_ibits;
202 for (bit = 0; bit < 8; bit++) {
203 if (bits & (1 << bit)) {
205 PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
206 "pcibr_force_interrupt: bit=0x%x\n", bit));
208 pcireg_force_intr_set(pcibr_soft, bit);
215 pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
216 device_desc_t dev_desc,
217 pciio_intr_line_t lines,
218 vertex_hdl_t owner_dev)
220 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
221 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
222 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
223 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
226 xtalk_intr_t *xtalk_intr_p;
227 pcibr_intr_t *pcibr_intr_p;
228 pcibr_intr_list_t *intr_list_p;
230 unsigned pcibr_int_bits;
231 unsigned pcibr_int_bit;
232 xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
234 pcibr_intr_t pcibr_intr;
235 pcibr_intr_list_t intr_entry;
236 pcibr_intr_list_t intr_list;
238 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
239 "pcibr_intr_alloc: %s%s%s%s%s\n",
240 !(lines & 15) ? " No INTs?" : "",
241 lines & 1 ? " INTA" : "",
242 lines & 2 ? " INTB" : "",
243 lines & 4 ? " INTC" : "",
244 lines & 8 ? " INTD" : ""));
246 pcibr_intr = kmalloc(sizeof (*(pcibr_intr)), GFP_KERNEL);
249 memset(pcibr_intr, 0, sizeof (*(pcibr_intr)));
251 pcibr_intr->bi_dev = pconn_vhdl;
252 pcibr_intr->bi_lines = lines;
253 pcibr_intr->bi_soft = pcibr_soft;
254 pcibr_intr->bi_ibits = 0; /* bits will be added below */
255 pcibr_intr->bi_func = 0; /* unset until connect */
256 pcibr_intr->bi_arg = 0; /* unset until connect */
257 pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
258 pcibr_intr->bi_mustruncpu = CPU_NONE;
259 pcibr_intr->bi_ibuf.ib_in = 0;
260 pcibr_intr->bi_ibuf.ib_out = 0;
261 spin_lock_init(&pcibr_intr->bi_ibuf.ib_lock);
263 pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info,
264 lines, PCIBR_NUM_SLOTS(pcibr_soft));
267 * For each PCI interrupt line requested, figure
268 * out which Bridge PCI Interrupt Line it maps
269 * to, and make sure there are xtalk resources
272 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
273 "pcibr_intr_alloc: pcibr_int_bits: 0x%x\n", pcibr_int_bits));
274 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
275 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
276 xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
278 xtalk_intr = *xtalk_intr_p;
280 if (xtalk_intr == NULL) {
282 * This xtalk_intr_alloc is constrained for two reasons:
283 * 1) Normal interrupts and error interrupts need to be delivered
284 * through a single xtalk target widget so that there aren't any
285 * ordering problems with DMA, completion interrupts, and error
286 * interrupts. (Use of xconn_vhdl forces this.)
288 * 2) On SN1, addressing constraints on SN1 and Bridge force
289 * us to use a single PI number for all interrupts from a
290 * single Bridge. (SN1-specific code forces this).
294 * All code dealing with threaded PCI interrupt handlers
295 * is located at the pcibr level. Because of this,
296 * we always want the lower layers (hub/heart_intr_alloc,
297 * intr_level_connect) to treat us as non-threaded so we
298 * don't set up a duplicate threaded environment. We make
299 * this happen by calling a special xtalk interface.
301 xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc,
304 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
305 "pcibr_intr_alloc: xtalk_intr=0x%lx\n", xtalk_intr));
307 /* both an assert and a runtime check on this:
308 * we need to check in non-DEBUG kernels, and
309 * the ASSERT gets us more information when
310 * we use DEBUG kernels.
312 ASSERT(xtalk_intr != NULL);
313 if (xtalk_intr == NULL) {
314 /* it is quite possible that our
315 * xtalk_intr_alloc failed because
316 * someone else got there first,
317 * and we can find their results
320 if (!*xtalk_intr_p) {
321 printk(KERN_ALERT "pcibr_intr_alloc %s: "
322 "unable to get xtalk interrupt resources",
323 pcibr_soft->bs_name);
324 /* yes, we leak resources here. */
327 } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
329 * now tell the bridge which slot is
330 * using this interrupt line.
332 pcireg_intr_device_bit_clr(pcibr_soft,
333 BRIDGE_INT_DEV_MASK(pcibr_int_bit));
334 pcireg_intr_device_bit_set(pcibr_soft,
335 (pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit)));
337 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
338 "bridge intr bit %d clears my wrb\n",
341 /* someone else got one allocated first;
342 * free the one we just created, and
343 * retrieve the one they allocated.
345 xtalk_intr_free(xtalk_intr);
346 xtalk_intr = *xtalk_intr_p;
350 pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
352 intr_entry = kmalloc(sizeof (*(intr_entry)), GFP_KERNEL);
354 printk(KERN_ALERT "pcibr_intr_alloc %s: "
355 "unable to get memory",
356 pcibr_soft->bs_name);
359 memset(intr_entry, 0, sizeof (*(intr_entry)));
361 intr_entry->il_next = NULL;
362 intr_entry->il_intr = pcibr_intr;
363 intr_entry->il_soft = pcibr_soft;
364 intr_entry->il_slot = pciio_slot;
366 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
368 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
369 "Bridge bit 0x%x wrap=0x%lx\n", pcibr_int_bit,
370 &(pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap)));
372 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
373 /* we are the first interrupt on this bridge bit.
375 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
376 "INT 0x%x (bridge bit %d) allocated [FIRST]\n",
377 pcibr_int_bits, pcibr_int_bit));
380 intr_list = *intr_list_p;
381 pcibr_intr_p = &intr_list->il_intr;
382 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
383 /* first entry on list was erased,
384 * and we replaced it, so we
385 * don't need our intr_entry.
388 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
389 "INT 0x%x (bridge bit %d) replaces erased first\n",
390 pcibr_int_bits, pcibr_int_bit));
393 intr_list_p = &intr_list->il_next;
394 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
395 /* we are the new second interrupt on this bit.
397 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
398 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
399 "INT 0x%x (bridge bit %d) is new SECOND\n",
400 pcibr_int_bits, pcibr_int_bit));
404 pcibr_intr_p = &intr_list->il_intr;
405 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
406 /* an entry on list was erased,
407 * and we replaced it, so we
408 * don't need our intr_entry.
412 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
413 "INT 0x%x (bridge bit %d) replaces erase Nth\n",
414 pcibr_int_bits, pcibr_int_bit));
417 intr_list_p = &intr_list->il_next;
418 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
419 /* entry appended to share list
421 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
422 "INT 0x%x (bridge bit %d) is new Nth\n",
423 pcibr_int_bits, pcibr_int_bit));
426 /* step to next record in chain
428 intr_list = *intr_list_p;
433 hub_intr = (hub_intr_t)xtalk_intr;
434 pcibr_intr->bi_irq = hub_intr->i_bit;
435 pcibr_intr->bi_cpu = hub_intr->i_cpuid;
436 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
437 "pcibr_intr_alloc complete: pcibr_intr=0x%lx\n", pcibr_intr));
443 pcibr_intr_free(pcibr_intr_t pcibr_intr)
445 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
446 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
447 unsigned pcibr_int_bit;
448 pcibr_intr_list_t intr_list;
450 xtalk_intr_t *xtalk_intrp;
452 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
453 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
455 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
457 intr_list = intr_list->il_next)
458 if (compare_and_swap_ptr((void **) &intr_list->il_intr,
462 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC,
464 "pcibr_intr_free: cleared hdlr from bit 0x%x\n",
467 /* If this interrupt line is not being shared between multiple
468 * devices release the xtalk interrupt resources.
471 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
472 xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
474 if ((!intr_shared) && (*xtalk_intrp)) {
476 xtalk_intr_free(*xtalk_intrp);
479 /* Clear the PCI device interrupt to bridge interrupt pin
482 pcireg_intr_device_bit_clr(pcibr_soft,
483 BRIDGE_INT_DEV_MASK(pcibr_int_bit));
491 pcibr_setpciint(xtalk_intr_t xtalk_intr)
494 xtalk_intr_vector_t vect;
500 addr = xtalk_intr_addr_get(xtalk_intr);
501 vect = xtalk_intr_vector_get(xtalk_intr);
502 vhdl = xtalk_intr_dev_get(xtalk_intr);
504 /* bus and int_bits are stored in sfarg, bus bit3, int_bits bit2:0 */
505 pcibr_int_bit = *((int *)xtalk_intr_sfarg_get(xtalk_intr)) & 0x7;
506 bus_num = ((*((int *)xtalk_intr_sfarg_get(xtalk_intr)) & 0x8) >> 3);
508 bridge = pcibr_bridge_ptr_get(vhdl, bus_num);
509 pcireg_bridge_intr_addr_vect_set(bridge, pcibr_int_bit, vect);
510 pcireg_bridge_intr_addr_addr_set(bridge, pcibr_int_bit, addr);
515 pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
517 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
518 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
519 unsigned pcibr_int_bit;
522 if (pcibr_intr == NULL)
525 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
526 "pcibr_intr_connect: intr_func=0x%lx, intr_arg=0x%lx\n",
527 intr_func, intr_arg));
529 pcibr_intr->bi_func = intr_func;
530 pcibr_intr->bi_arg = intr_arg;
531 *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
534 * For each PCI interrupt line requested, figure
535 * out which Bridge PCI Interrupt Line it maps
536 * to, and make sure there are xtalk resources
539 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
540 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
541 pcibr_intr_wrap_t intr_wrap;
542 xtalk_intr_t xtalk_intr;
545 xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
546 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
549 * If this interrupt line is being shared and the connect has
550 * already been done, no need to do it again.
552 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
557 * Use the pcibr wrapper function to handle all Bridge interrupts
558 * regardless of whether the interrupt line is shared or not.
560 int_addr = pcireg_intr_addr_addr(pcibr_soft, pcibr_int_bit);
561 pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit =
562 ((pcibr_soft->bs_busnum << 3) | pcibr_int_bit);
563 xtalk_intr_connect(xtalk_intr,
565 (intr_arg_t) intr_wrap,
566 (xtalk_intr_setfunc_t) pcibr_setpciint,
567 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit);
569 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
571 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
572 "pcibr_setpciint: int_addr=0x%lx, *int_addr=0x%lx, "
573 "pcibr_int_bit=0x%x\n", int_addr,
574 pcireg_intr_addr_get(pcibr_soft, pcibr_int_bit),
578 s = pcibr_lock(pcibr_soft);
579 pcireg_intr_enable_bit_set(pcibr_soft, pcibr_int_bits);
580 pcireg_tflush_get(pcibr_soft);
581 pcibr_unlock(pcibr_soft, s);
588 pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
590 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
591 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
592 unsigned pcibr_int_bit;
593 pcibr_intr_wrap_t intr_wrap;
596 /* Stop calling the function. Now.
598 *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
599 pcibr_intr->bi_func = 0;
600 pcibr_intr->bi_arg = 0;
602 * For each PCI interrupt line requested, figure
603 * out which Bridge PCI Interrupt Line it maps
604 * to, and disconnect the interrupt.
607 /* don't disable interrupts for lines that
608 * are shared between devices.
610 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
611 if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
612 (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
613 pcibr_int_bits &= ~(1 << pcibr_int_bit);
617 s = pcibr_lock(pcibr_soft);
618 pcireg_intr_enable_bit_clr(pcibr_soft, pcibr_int_bits);
619 pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */
620 pcibr_unlock(pcibr_soft, s);
622 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
623 "pcibr_intr_disconnect: disabled int_bits=0x%x\n",
626 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
627 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
629 /* if the interrupt line is now shared,
630 * do not disconnect it.
632 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
635 xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
636 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
638 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
639 "pcibr_intr_disconnect: disconnect int_bits=0x%x\n",
642 /* if we are sharing the interrupt line,
643 * connect us up; this closes the hole
644 * where the another pcibr_intr_alloc()
645 * was in progress as we disconnected.
647 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
648 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
651 pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit =
652 ((pcibr_soft->bs_busnum << 3) | pcibr_int_bit);
653 xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
655 (intr_arg_t) intr_wrap,
656 (xtalk_intr_setfunc_t) pcibr_setpciint,
657 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit);
659 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
660 "pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
667 pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
669 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
670 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
671 unsigned pcibr_int_bit;
673 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
674 if (pcibr_int_bits & (1 << pcibr_int_bit))
675 return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
679 /* =====================================================================
683 pcibr_clearwidint(pcibr_soft_t pcibr_soft)
685 pcireg_intr_dst_set(pcibr_soft, 0);
690 pcibr_setwidint(xtalk_intr_t intr)
692 xwidgetnum_t targ = xtalk_intr_target_get(intr);
693 iopaddr_t addr = xtalk_intr_addr_get(intr);
694 xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
696 pcibr_soft_t bridge = (pcibr_soft_t)xtalk_intr_sfarg_get(intr);
698 pcireg_intr_dst_target_id_set(bridge, targ);
699 pcireg_intr_dst_addr_set(bridge, addr);
700 pcireg_intr_host_err_set(bridge, vect);