3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/string.h>
14 #include <linux/interrupt.h>
15 #include <asm/sn/sgi.h>
16 #include <asm/sn/sn_sal.h>
17 #include <asm/sn/sn_cpuid.h>
18 #include <asm/sn/addrs.h>
19 #include <asm/sn/arch.h>
20 #include <asm/sn/iograph.h>
21 #include <asm/sn/invent.h>
22 #include <asm/sn/hcl.h>
23 #include <asm/sn/labelcl.h>
24 #include <asm/sn/klconfig.h>
25 #include <asm/sn/xtalk/xwidget.h>
26 #include <asm/sn/pci/bridge.h>
27 #include <asm/sn/pci/pciio.h>
28 #include <asm/sn/pci/pcibr.h>
29 #include <asm/sn/pci/pcibr_private.h>
30 #include <asm/sn/pci/pci_defs.h>
31 #include <asm/sn/prio.h>
32 #include <asm/sn/xtalk/xbow.h>
33 #include <asm/sn/ioc3.h>
34 #include <asm/sn/io.h>
35 #include <asm/sn/sn_private.h>
38 #define rmallocmap atemapalloc
39 #define rmfreemap atemapfree
40 #define rmfree atefree
41 #define rmalloc atealloc
45 * global variables to toggle the different levels of pcibr debugging.
46 * -pcibr_debug_mask is the mask of the different types of debugging
47 * you want to enable. See sys/PCI/pcibr_private.h
48 * -pcibr_debug_module is the module you want to trace. By default
49 * all modules are trace. For IP35 this value has the format of
50 * something like "001c10". For IP27 this value is a node number,
51 * i.e. "1", "2"... For IP30 this is undefined and should be set to
53 * -pcibr_debug_widget is the widget you want to trace. For IP27
54 * the widget isn't exposed in the hwpath so use the xio slot num.
55 * i.e. for 'io2' set pcibr_debug_widget to "2".
56 * -pcibr_debug_slot is the pci slot you want to trace.
58 uint32_t pcibr_debug_mask = 0x0; /* 0x00000000 to disable */
59 char *pcibr_debug_module = "all"; /* 'all' for all modules */
60 int pcibr_debug_widget = -1; /* '-1' for all widgets */
61 int pcibr_debug_slot = -1; /* '-1' for all slots */
64 * Macros related to the Lucent USS 302/312 usb timeout workaround. It
65 * appears that if the lucent part can get into a retry loop if it sees a
66 * DAC on the bus during a pio read retry. The loop is broken after about
67 * 1ms, so we need to set up bridges holding this part to allow at least
71 #define USS302_TIMEOUT_WAR
73 #ifdef USS302_TIMEOUT_WAR
74 #define LUCENT_USBHC_VENDOR_ID_NUM 0x11c1
75 #define LUCENT_USBHC302_DEVICE_ID_NUM 0x5801
76 #define LUCENT_USBHC312_DEVICE_ID_NUM 0x5802
77 #define USS302_BRIDGE_TIMEOUT_HLD 4
80 int pcibr_devflag = D_MP;
82 /* kbrick widgetnum-to-bus layout */
83 int p_busnum[MAX_PORT_NUM] = { /* widget# */
84 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
95 pcibr_list_p pcibr_list = 0;
98 extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
99 extern long atoi(register char *p);
100 extern cnodeid_t nodevertex_to_cnodeid(vertex_hdl_t vhdl);
101 extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
102 extern struct map *atemapalloc(uint64_t);
103 extern void atefree(struct map *, size_t, uint64_t);
104 extern void atemapfree(struct map *);
105 extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
106 extern void free_pciio_dmamap(pcibr_dmamap_t);
107 extern void xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
109 #define ATE_WRITE() ate_write(pcibr_soft, ate_ptr, ate_count, ate)
110 #if PCIBR_FREEZE_TIME
111 #define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
113 #define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
114 #endif /* PCIBR_FREEZE_TIME */
116 #if PCIBR_FREEZE_TIME
117 #define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
119 #define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
122 /* =====================================================================
123 * Function Table of Contents
125 * The order of functions in this file has stopped
126 * making much sense. We might want to take a look
127 * at it some time and bring back some sanity, or
128 * perhaps bust this file into smaller chunks.
131 extern int do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
132 extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
134 extern int pcibr_wrb_flush(vertex_hdl_t);
135 extern int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
136 extern void pcibr_rrb_flush(vertex_hdl_t);
138 static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
139 void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
141 extern void pcibr_setwidint(xtalk_intr_t);
142 extern void pcibr_clearwidint(bridge_t *);
144 extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
145 pciio_space_t, int, int, int);
147 int pcibr_attach(vertex_hdl_t);
148 int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
149 int, pcibr_soft_t *);
150 int pcibr_detach(vertex_hdl_t);
151 int pcibr_close(vertex_hdl_t, int, int, cred_t *);
152 int pcibr_map(vertex_hdl_t, vhandl_t *, off_t, size_t, uint);
153 int pcibr_unmap(vertex_hdl_t, vhandl_t *);
154 int pcibr_ioctl(vertex_hdl_t, int, void *, int, struct cred *, int *);
155 int pcibr_pcix_rbars_calc(pcibr_soft_t);
156 extern int pcibr_init_ext_ate_ram(bridge_t *);
157 extern int pcibr_ate_alloc(pcibr_soft_t, int);
158 extern void pcibr_ate_free(pcibr_soft_t, int, int);
159 extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
161 extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
162 #if PCIBR_FREEZE_TIME
163 unsigned *freeze_time_ptr,
166 extern void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
167 extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
168 #if PCIBR_FREEZE_TIME
171 unsigned freeze_time_start,
176 pcibr_info_t pcibr_info_get(vertex_hdl_t);
178 static iopaddr_t pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
180 pcibr_piomap_t pcibr_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
181 void pcibr_piomap_free(pcibr_piomap_t);
182 caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
183 void pcibr_piomap_done(pcibr_piomap_t);
184 caddr_t pcibr_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
185 iopaddr_t pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
186 void pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
188 static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
189 extern bridge_ate_t pcibr_flags_to_ate(unsigned);
191 pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
192 void pcibr_dmamap_free(pcibr_dmamap_t);
193 extern bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
194 static iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
195 iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
196 void pcibr_dmamap_done(pcibr_dmamap_t);
197 cnodeid_t pcibr_get_dmatrans_node(vertex_hdl_t);
198 iopaddr_t pcibr_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
199 void pcibr_dmamap_drain(pcibr_dmamap_t);
200 void pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
201 void pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
202 iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
204 extern unsigned pcibr_intr_bits(pciio_info_t info,
205 pciio_intr_line_t lines, int nslots);
206 extern pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
207 extern void pcibr_intr_free(pcibr_intr_t);
208 extern void pcibr_setpciint(xtalk_intr_t);
209 extern int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
210 extern void pcibr_intr_disconnect(pcibr_intr_t);
212 extern vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
213 extern void pcibr_intr_func(intr_arg_t);
215 extern void print_bridge_errcmd(uint32_t, char *);
217 extern void pcibr_error_dump(pcibr_soft_t);
218 extern uint32_t pcibr_errintr_group(uint32_t);
219 extern void pcibr_pioerr_check(pcibr_soft_t);
220 extern void pcibr_error_intr_handler(int, void *, struct pt_regs *);
222 extern int pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
223 extern void pcibr_error_cleanup(pcibr_soft_t, int);
224 extern void pcibr_device_disable(pcibr_soft_t, int);
225 extern int pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
226 extern int pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
227 extern int pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
228 extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
229 extern int pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
230 void pcibr_provider_startup(vertex_hdl_t);
231 void pcibr_provider_shutdown(vertex_hdl_t);
233 int pcibr_reset(vertex_hdl_t);
234 pciio_endian_t pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
235 int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
236 pciio_priority_t pcibr_priority_set(vertex_hdl_t, pciio_priority_t);
237 int pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
239 extern cfg_p pcibr_config_addr(vertex_hdl_t, unsigned);
240 extern uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
241 extern void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
243 extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
244 extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
245 extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
246 extern void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
247 extern void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
248 extern void pcibr_hints_handsoff(vertex_hdl_t);
249 extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
251 extern int pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
252 extern int pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
253 extern int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
254 pcibr_slot_info_resp_t);
255 extern void pcibr_slot_func_info_return(pcibr_info_h, int,
256 pcibr_slot_func_info_resp_t);
257 extern int pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
258 extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
259 extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
260 extern int pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
261 extern int pcibr_slot_call_device_attach(vertex_hdl_t,
263 extern int pcibr_slot_call_device_detach(vertex_hdl_t,
265 extern int pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int,
267 extern int pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
270 extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
271 extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
274 * This is the file operation table for the pcibr driver.
275 * As each of the functions are implemented, put the
276 * appropriate function name below.
278 static int pcibr_mmap(struct file * file, struct vm_area_struct * vma);
279 static int pcibr_open(struct inode *, struct file *);
280 struct file_operations pcibr_fops = {
298 get_unmapped_area: NULL
301 /* =====================================================================
302 * Device(x) register management
305 /* pcibr_try_set_device: attempt to modify Device(x)
306 * for the specified slot on the specified bridge
307 * as requested in flags, limited to the specified
308 * bits. Returns which BRIDGE bits were in conflict,
309 * or ZERO if everything went OK.
311 * Caller MUST hold pcibr_lock when calling this function.
314 pcibr_try_set_device(pcibr_soft_t pcibr_soft,
320 pcibr_soft_slot_t slotp;
333 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
334 if (mask == BRIDGE_DEV_PMU_BITS)
335 xmask = XBRIDGE_DEV_PMU_BITS;
336 if (mask == BRIDGE_DEV_D64_BITS)
337 xmask = XBRIDGE_DEV_D64_BITS;
340 slotp = &pcibr_soft->bs_slot[slot];
342 s = pcibr_lock(pcibr_soft);
344 bridge = pcibr_soft->bs_base;
346 old = slotp->bss_device;
348 /* figure out what the desired
349 * Device(x) bits are based on
350 * the flags specified.
355 /* Currently, we inherit anything that
356 * the new caller has not specified in
357 * one way or another, unless we take
358 * action here to not inherit.
360 * This is needed for the "swap" stuff,
361 * since it could have been set via
362 * pcibr_endian_set -- altho note that
363 * any explicit PCIBR_BYTE_STREAM or
364 * PCIBR_WORD_VALUES will freely override
365 * the effect of that call (and vice
366 * versa, no protection either way).
368 * I want to get rid of pcibr_endian_set
369 * in favor of tracking DMA endianness
370 * using the flags specified when DMA
371 * channels are created.
374 #define BRIDGE_DEV_WRGA_BITS (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
375 #define BRIDGE_DEV_SWAP_BITS (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
377 /* Do not use Barrier, Write Gather,
378 * or Prefetch unless asked.
379 * Leave everything else as it
380 * was from the last time.
383 & ~BRIDGE_DEV_BARRIER
384 & ~BRIDGE_DEV_WRGA_BITS
388 /* Generic macro flags
390 if (flags & PCIIO_DMA_DATA) {
392 & ~BRIDGE_DEV_BARRIER) /* barrier off */
393 | BRIDGE_DEV_PREF; /* prefetch on */
396 if (flags & PCIIO_DMA_CMD) {
398 & ~BRIDGE_DEV_PREF) /* prefetch off */
399 & ~BRIDGE_DEV_WRGA_BITS) /* write gather off */
400 | BRIDGE_DEV_BARRIER; /* barrier on */
402 /* Generic detail flags
404 if (flags & PCIIO_WRITE_GATHER)
405 new |= BRIDGE_DEV_WRGA_BITS;
406 if (flags & PCIIO_NOWRITE_GATHER)
407 new &= ~BRIDGE_DEV_WRGA_BITS;
409 if (flags & PCIIO_PREFETCH)
410 new |= BRIDGE_DEV_PREF;
411 if (flags & PCIIO_NOPREFETCH)
412 new &= ~BRIDGE_DEV_PREF;
414 if (flags & PCIBR_WRITE_GATHER)
415 new |= BRIDGE_DEV_WRGA_BITS;
416 if (flags & PCIBR_NOWRITE_GATHER)
417 new &= ~BRIDGE_DEV_WRGA_BITS;
419 if (flags & PCIIO_BYTE_STREAM)
420 new |= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ?
421 BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
422 if (flags & PCIIO_WORD_VALUES)
423 new &= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ?
424 ~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
426 /* Provider-specific flags
428 if (flags & PCIBR_PREFETCH)
429 new |= BRIDGE_DEV_PREF;
430 if (flags & PCIBR_NOPREFETCH)
431 new &= ~BRIDGE_DEV_PREF;
433 if (flags & PCIBR_PRECISE)
434 new |= BRIDGE_DEV_PRECISE;
435 if (flags & PCIBR_NOPRECISE)
436 new &= ~BRIDGE_DEV_PRECISE;
438 if (flags & PCIBR_BARRIER)
439 new |= BRIDGE_DEV_BARRIER;
440 if (flags & PCIBR_NOBARRIER)
441 new &= ~BRIDGE_DEV_BARRIER;
443 if (flags & PCIBR_64BIT)
444 new |= BRIDGE_DEV_DEV_SIZE;
445 if (flags & PCIBR_NO64BIT)
446 new &= ~BRIDGE_DEV_DEV_SIZE;
449 * PIC BRINGUP WAR (PV# 855271):
450 * Allow setting BRIDGE_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
451 * device. The bit is only intended for 64-bit devices and, on
452 * PIC, can cause problems for 32-bit devices.
454 if (IS_PIC_SOFT(pcibr_soft) && mask == BRIDGE_DEV_D64_BITS &&
455 PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
456 if (flags & PCIBR_VCHAN1) {
457 new |= BRIDGE_DEV_VIRTUAL_EN;
458 xmask |= BRIDGE_DEV_VIRTUAL_EN;
463 chg = old ^ new; /* what are we changing, */
464 chg &= xmask; /* of the interesting bits */
468 badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
469 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
470 badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
471 badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
473 badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
474 badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
476 bad = badpmu | badd32 | badd64;
480 /* some conflicts can be resolved by
481 * forcing the bit on. this may cause
482 * some performance degredation in
483 * the stream(s) that want the bit off,
484 * but the alternative is not allowing
485 * the new stream at all.
487 if ( (fix = bad & (BRIDGE_DEV_PRECISE |
488 BRIDGE_DEV_BARRIER)) ) {
490 /* don't change these bits if
491 * they are already set in "old"
495 /* some conflicts can be resolved by
496 * forcing the bit off. this may cause
497 * some performance degredation in
498 * the stream(s) that want the bit on,
499 * but the alternative is not allowing
500 * the new stream at all.
502 if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
503 BRIDGE_DEV_PREF)) ) {
505 /* don't change these bits if
506 * we wanted to turn them on.
510 /* conflicts in other bits mean
511 * we can not establish this DMA
512 * channel while the other(s) are
516 pcibr_unlock(pcibr_soft, s);
518 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
519 "pcibr_try_set_device: mod blocked by %x\n",
526 if (mask == BRIDGE_DEV_PMU_BITS)
527 slotp->bss_pmu_uctr++;
528 if (mask == BRIDGE_DEV_D32_BITS)
529 slotp->bss_d32_uctr++;
530 if (mask == BRIDGE_DEV_D64_BITS)
531 slotp->bss_d64_uctr++;
533 /* the value we want to write is the
534 * original value, with the bits for
535 * our selected changes flipped, and
536 * with any disabled features turned off.
538 new = old ^ chg; /* only change what we want to change */
540 if (slotp->bss_device == new) {
541 pcibr_unlock(pcibr_soft, s);
544 if ( IS_PIC_SOFT(pcibr_soft) ) {
545 bridge->b_device[slot].reg = new;
546 slotp->bss_device = new;
547 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
550 if (io_get_sh_swapper(NASID_GET(bridge))) {
551 BRIDGE_REG_SET32((&bridge->b_device[slot].reg)) = __swab32(new);
552 slotp->bss_device = new;
553 BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
555 bridge->b_device[slot].reg = new;
556 slotp->bss_device = new;
557 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
560 pcibr_unlock(pcibr_soft, s);
563 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
564 "pcibr_try_set_device: Device(%d): %x\n",
565 slot, new, device_bits));
567 printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
573 pcibr_release_device(pcibr_soft_t pcibr_soft,
577 pcibr_soft_slot_t slotp;
580 slotp = &pcibr_soft->bs_slot[slot];
582 s = pcibr_lock(pcibr_soft);
584 if (mask == BRIDGE_DEV_PMU_BITS)
585 slotp->bss_pmu_uctr--;
586 if (mask == BRIDGE_DEV_D32_BITS)
587 slotp->bss_d32_uctr--;
588 if (mask == BRIDGE_DEV_D64_BITS)
589 slotp->bss_d64_uctr--;
591 pcibr_unlock(pcibr_soft, s);
595 * flush write gather buffer for slot
598 pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
603 volatile uint32_t wrf;
604 s = pcibr_lock(pcibr_soft);
605 bridge = pcibr_soft->bs_base;
607 if ( IS_PIC_SOFT(pcibr_soft) ) {
608 wrf = bridge->b_wr_req_buf[slot].reg;
611 if (io_get_sh_swapper(NASID_GET(bridge))) {
612 wrf = BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
614 wrf = bridge->b_wr_req_buf[slot].reg;
617 pcibr_unlock(pcibr_soft, s);
620 /* =====================================================================
621 * Bridge (pcibr) "Device Driver" entry points
626 * open/close mmap/munmap interface would be used by processes
627 * that plan to map the PCI bridge, and muck around with the
628 * registers. This is dangerous to do, and will be allowed
629 * to a select brand of programs. Typically these are
630 * diagnostics programs, or some user level commands we may
631 * write to do some weird things.
632 * To start with expect them to have root priveleges.
633 * We will ask for more later.
637 pcibr_open(struct inode *x, struct file *y)
644 pcibr_close(vertex_hdl_t dev, int oflag, int otyp, cred_t *crp)
650 pcibr_mmap(struct file * file, struct vm_area_struct * vma)
652 vertex_hdl_t pcibr_vhdl;
653 pcibr_soft_t pcibr_soft;
655 unsigned long phys_addr;
658 #ifdef CONFIG_HWGFS_FS
659 pcibr_vhdl = (vertex_hdl_t) file->f_dentry->d_fsdata;
661 pcibr_vhdl = (vertex_hdl_t) file->private_data;
663 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
664 bridge = pcibr_soft->bs_base;
665 phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
666 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
667 vma->vm_flags |= VM_RESERVED | VM_IO;
668 error = io_remap_page_range(vma, phys_addr, vma->vm_start,
669 vma->vm_end - vma->vm_start,
676 pcibr_map(vertex_hdl_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
679 vertex_hdl_t vhdl = dev_to_vhdl(dev);
680 vertex_hdl_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
681 pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
682 bridge_t *bridge = pcibr_soft->bs_base;
684 hwgraph_vertex_unref(pcibr_vhdl);
687 len = ctob(btoc(len)); /* Make len page aligned */
688 error = v_mapphys(vt, (void *) ((__psunsigned_t) bridge + off), len);
691 * If the offset being mapped corresponds to the flash prom
692 * base, and if the mapping succeeds, and if the user
693 * has requested the protections to be WRITE, enable the
694 * flash prom to be written.
696 * XXX- deprecate this in favor of using the
697 * real flash driver ...
699 if (IS_BRIDGE_SOFT(pcibr_soft) && !error &&
700 ((off == BRIDGE_EXTERNAL_FLASH) ||
701 (len > BRIDGE_EXTERNAL_FLASH))) {
705 * ensure that we write and read without any interruption.
706 * The read following the write is required for the Bridge war
710 if (io_get_sh_swapper(NASID_GET(bridge))) {
711 BRIDGE_REG_SET32((&bridge->b_wid_control)) |= __swab32(BRIDGE_CTRL_FLASH_WR_EN);
712 BRIDGE_REG_GET32((&bridge->b_wid_control)); /* inval addr bug war */
714 bridge->b_wid_control |= BRIDGE_CTRL_FLASH_WR_EN;
715 bridge->b_wid_control; /* inval addr bug war */
724 pcibr_unmap(vertex_hdl_t dev, vhandl_t *vt)
726 vertex_hdl_t pcibr_vhdl = hwgraph_connectpt_get((vertex_hdl_t) dev);
727 pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
728 bridge_t *bridge = pcibr_soft->bs_base;
730 hwgraph_vertex_unref(pcibr_vhdl);
732 if ( IS_PIC_SOFT(pcibr_soft) ) {
734 * If flashprom write was enabled, disable it, as
735 * this is the last unmap.
737 if (IS_BRIDGE_SOFT(pcibr_soft) &&
738 (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN)) {
742 * ensure that we write and read without any interruption.
743 * The read following the write is required for the Bridge war
746 bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
747 bridge->b_wid_control; /* inval addr bug war */
752 if (io_get_sh_swapper(NASID_GET(bridge))) {
753 if (BRIDGE_REG_GET32((&bridge->b_wid_control)) & BRIDGE_CTRL_FLASH_WR_EN) {
757 * ensure that we write and read without any interruption.
758 * The read following the write is required for the Bridge war
761 BRIDGE_REG_SET32((&bridge->b_wid_control)) &= __swab32((unsigned int)~BRIDGE_CTRL_FLASH_WR_EN);
762 BRIDGE_REG_GET32((&bridge->b_wid_control)); /* inval addr bug war */
765 if (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN) {
769 * ensure that we write and read without any interruption.
770 * The read following the write is required for the Bridge war
773 bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
774 bridge->b_wid_control; /* inval addr bug war */
783 /* This is special case code used by grio. There are plans to make
784 * this a bit more general in the future, but till then this should
788 pcibr_device_slot_get(vertex_hdl_t dev_vhdl)
790 char devname[MAXDEVNAME];
792 pciio_info_t pciio_info;
793 pciio_slot_t slot = PCIIO_SLOT_NONE;
795 vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
797 /* run back along the canonical path
798 * until we find a PCI connection point.
800 tdev = hwgraph_connectpt_get(dev_vhdl);
801 while (tdev != GRAPH_VERTEX_NONE) {
802 pciio_info = pciio_info_chk(tdev);
804 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
807 hwgraph_vertex_unref(tdev);
808 tdev = hwgraph_connectpt_get(tdev);
810 hwgraph_vertex_unref(tdev);
817 pcibr_ioctl(vertex_hdl_t dev,
828 pcibr_info_get(vertex_hdl_t vhdl)
830 return (pcibr_info_t) pciio_info_get(vhdl);
834 pcibr_device_info_new(
835 pcibr_soft_t pcibr_soft,
837 pciio_function_t rfunc,
838 pciio_vendor_id_t vendor,
839 pciio_device_id_t device)
841 pcibr_info_t pcibr_info;
842 pciio_function_t func;
845 func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
848 * Create a pciio_info_s for this device. pciio_device_info_new()
849 * will set the c_slot (which is suppose to represent the external
850 * slot (i.e the slot number silk screened on the back of the I/O
851 * brick)). So for PIC we need to adjust this "internal slot" num
852 * passed into us, into its external representation. See comment
853 * for the PCIBR_DEVICE_TO_SLOT macro for more information.
856 pciio_device_info_new(&pcibr_info->f_c, pcibr_soft->bs_vhdl,
857 PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
858 rfunc, vendor, device);
859 pcibr_info->f_dev = slot;
861 /* Set PCI bus number */
862 pcibr_info->f_bus = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
864 if (slot != PCIIO_SLOT_NONE) {
867 * Currently favored mapping from PCI
868 * slot number and INTA/B/C/D to Bridge
869 * PCI Interrupt Bit Number:
881 * XXX- allow pcibr_hints to override default
882 * XXX- allow ADMIN to override pcibr_hints
884 for (ibit = 0; ibit < 4; ++ibit)
885 pcibr_info->f_ibit[ibit] =
886 (slot + 4 * ibit) & 7;
889 * Record the info in the sparse func info space.
891 if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
892 pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
899 * pcibr_device_unregister
900 * This frees up any hardware resources reserved for this PCI device
901 * and removes any PCI infrastructural information setup for it.
902 * This is usually used at the time of shutting down of the PCI card.
905 pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
907 pciio_info_t pciio_info;
908 vertex_hdl_t pcibr_vhdl;
910 pcibr_soft_t pcibr_soft;
912 int count_vchan0, count_vchan1;
917 pciio_info = pciio_info_get(pconn_vhdl);
919 pcibr_vhdl = pciio_info_master_get(pciio_info);
920 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
922 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
923 bridge = pcibr_soft->bs_base;
925 /* Clear all the hardware xtalk resources for this device */
926 xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
928 /* Flush all the rrbs */
929 pcibr_rrb_flush(pconn_vhdl);
932 * If the RRB configuration for this slot has changed, set it
933 * back to the boot-time default
935 if (pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] >= 0) {
937 s = pcibr_lock(pcibr_soft);
939 /* PIC NOTE: If this is a BRIDGE, VCHAN2 & VCHAN3 will be zero so
940 * no need to conditionalize this (ie. "if (IS_PIC_SOFT())" ).
942 pcibr_soft->bs_rrb_res[slot] = pcibr_soft->bs_rrb_res[slot] +
943 pcibr_soft->bs_rrb_valid[slot][VCHAN0] +
944 pcibr_soft->bs_rrb_valid[slot][VCHAN1] +
945 pcibr_soft->bs_rrb_valid[slot][VCHAN2] +
946 pcibr_soft->bs_rrb_valid[slot][VCHAN3];
948 /* Free the rrbs allocated to this slot, both the normal & virtual */
949 do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
951 count_vchan0 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0];
952 count_vchan1 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN1];
954 pcibr_unlock(pcibr_soft, s);
956 pcibr_rrb_alloc(pconn_vhdl, &count_vchan0, &count_vchan1);
960 /* Flush the write buffers !! */
961 error_call = pcibr_wrb_flush(pconn_vhdl);
966 /* Clear the information specific to the slot */
967 error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
977 * pcibr_driver_reg_callback
978 * CDL will call this function for each device found in the PCI
979 * registry that matches the vendor/device IDs supported by
980 * the driver being registered. The device's connection vertex
981 * and the driver's attach function return status enable the
982 * slot's device status to be set.
985 pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
986 int key1, int key2, int error)
988 pciio_info_t pciio_info;
989 pcibr_info_t pcibr_info;
990 vertex_hdl_t pcibr_vhdl;
992 pcibr_soft_t pcibr_soft;
994 /* Do not set slot status for vendor/device ID wildcard drivers */
995 if ((key1 == -1) || (key2 == -1))
998 pciio_info = pciio_info_get(pconn_vhdl);
999 pcibr_info = pcibr_info_get(pconn_vhdl);
1001 pcibr_vhdl = pciio_info_master_get(pciio_info);
1002 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1004 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
1007 /* This may be a loadable driver so lock out any pciconfig actions */
1008 mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
1011 pcibr_info->f_att_det_error = error;
1013 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
1016 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
1018 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
1022 /* Release the bus lock */
1023 mrunlock(pcibr_soft->bs_bus_lock);
1028 * pcibr_driver_unreg_callback
1029 * CDL will call this function for each device found in the PCI
1030 * registry that matches the vendor/device IDs supported by
1031 * the driver being unregistered. The device's connection vertex
1032 * and the driver's detach function return status enable the
1033 * slot's device status to be set.
1036 pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
1037 int key1, int key2, int error)
1039 pciio_info_t pciio_info;
1040 pcibr_info_t pcibr_info;
1041 vertex_hdl_t pcibr_vhdl;
1043 pcibr_soft_t pcibr_soft;
1045 /* Do not set slot status for vendor/device ID wildcard drivers */
1046 if ((key1 == -1) || (key2 == -1))
1049 pciio_info = pciio_info_get(pconn_vhdl);
1050 pcibr_info = pcibr_info_get(pconn_vhdl);
1052 pcibr_vhdl = pciio_info_master_get(pciio_info);
1053 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1055 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
1058 /* This may be a loadable driver so lock out any pciconfig actions */
1059 mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
1062 pcibr_info->f_att_det_error = error;
1064 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
1067 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
1069 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
1073 /* Release the bus lock */
1074 mrunlock(pcibr_soft->bs_bus_lock);
1079 * build a convenience link path in the
1080 * form of ".../<iobrick>/bus/<busnum>"
1082 * returns 1 on success, 0 otherwise
1084 * depends on hwgraph separator == '/'
1087 pcibr_bus_cnvlink(vertex_hdl_t f_c)
1089 char dst[MAXDEVNAME];
1094 vertex_hdl_t nvtx, svtx;
1097 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
1099 if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
1102 /* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
1104 /* find the widget number */
1105 xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
1108 widgetnum = atoi(xp+7);
1109 if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
1112 /* remove "/pci/direct" from path */
1113 cp = strstr(dst, "/" EDGE_LBL_PCI "/" EDGE_LBL_DIRECT);
1118 /* get the vertex for the widget */
1119 if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))
1122 *xp = (char)NULL; /* remove "/xtalk/..." from path */
1124 /* dst example now == /hw/module/001c02/Pbrick */
1126 /* get the bus number */
1128 strcat(dst, EDGE_LBL_BUS);
1129 sprintf(pcibus, "%d", p_busnum[widgetnum]);
1131 /* link to bus to widget */
1132 rv = hwgraph_path_add(NULL, dp, &nvtx);
1133 if (GRAPH_SUCCESS == rv)
1134 rv = hwgraph_edge_add(nvtx, svtx, pcibus);
1136 return (rv == GRAPH_SUCCESS);
1141 * pcibr_attach: called every time the crosstalk
1142 * infrastructure is asked to initialize a widget
1143 * that matches the part number we handed to the
1144 * registration routine above.
1148 pcibr_attach(vertex_hdl_t xconn_vhdl)
1152 vertex_hdl_t pcibr_vhdl;
1155 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
1157 bridge = (bridge_t *)
1158 xtalk_piotrans_addr(xconn_vhdl, NULL,
1159 0, sizeof(bridge_t), 0);
1161 * Create the vertex for the PCI bus, which we
1162 * will also use to hold the pcibr_soft and
1163 * which will be the "master" vertex for all the
1164 * pciio connection points we will hang off it.
1165 * This needs to happen before we call nic_bridge_vertex_info
1166 * as we are some of the *_vmc functions need access to the edges.
1168 * Opening this vertex will provide access to
1169 * the Bridge registers themselves.
1171 rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
1172 ASSERT(rc == GRAPH_SUCCESS);
1174 pciio_provider_register(pcibr_vhdl, &pcibr_provider);
1175 pciio_provider_startup(pcibr_vhdl);
1177 return pcibr_attach2(xconn_vhdl, bridge, pcibr_vhdl, 0, NULL);
1183 pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
1184 vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
1187 vertex_hdl_t ctlr_vhdl;
1190 pcibr_soft_t pcibr_soft;
1191 pcibr_info_t pcibr_info;
1192 xwidget_info_t info;
1193 xtalk_intr_t xtalk_intr;
1196 vertex_hdl_t noslot_conn;
1197 char devnm[MAXDEVNAME], *s;
1198 pcibr_hints_t pcibr_hints;
1199 uint64_t int_enable;
1200 bridgereg_t int_enable_32;
1201 picreg_t int_enable_64;
1202 unsigned rrb_fixed = 0;
1207 int fast_back_to_back_enable;
1210 int iobrick_type_get_nasid(nasid_t nasid);
1211 int iobrick_module_get_nasid(nasid_t nasid);
1213 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1214 "pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
1217 ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
1219 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
1220 (struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
1221 ASSERT(ctlr_vhdl != NULL);
1224 * Get the hint structure; if some NIC callback
1225 * marked this vertex as "hands-off" then we
1226 * just return here, before doing anything else.
1228 pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
1230 if (pcibr_hints && pcibr_hints->ph_hands_off)
1231 return -1; /* generic operations disabled */
1233 id = bridge->b_wid_id;
1234 rev = XWIDGET_PART_REV_NUM(id);
1236 hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
1239 * allocate soft state structure, fill in some
1240 * fields, and hook it up to our vertex.
1244 *ret_softp = pcibr_soft;
1245 BZERO(pcibr_soft, sizeof *pcibr_soft);
1246 pcibr_soft_set(pcibr_vhdl, pcibr_soft);
1247 pcibr_soft->bs_conn = xconn_vhdl;
1248 pcibr_soft->bs_vhdl = pcibr_vhdl;
1249 pcibr_soft->bs_base = bridge;
1250 pcibr_soft->bs_rev_num = rev;
1251 pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
1253 pcibr_soft->bs_min_slot = 0; /* lowest possible slot# */
1254 pcibr_soft->bs_max_slot = 7; /* highest possible slot# */
1255 pcibr_soft->bs_busnum = busnum;
1256 pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
1257 switch(pcibr_soft->bs_bridge_type) {
1258 case PCIBR_BRIDGETYPE_BRIDGE:
1259 pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
1260 pcibr_soft->bs_bridge_mode = 0; /* speed is not available in bridge */
1262 case PCIBR_BRIDGETYPE_PIC:
1263 pcibr_soft->bs_min_slot = 0;
1264 pcibr_soft->bs_max_slot = 3;
1265 pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1266 pcibr_soft->bs_bridge_mode =
1267 (((bridge->p_wid_stat_64 & PIC_STAT_PCIX_SPEED) >> 33) |
1268 ((bridge->p_wid_stat_64 & PIC_STAT_PCIX_ACTIVE) >> 33));
1270 /* We have to clear PIC's write request buffer to avoid parity
1271 * errors. See PV#854845.
1276 for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
1277 bridge->p_wr_req_lower[i] = 0;
1278 bridge->p_wr_req_upper[i] = 0;
1279 bridge->p_wr_req_parity[i] = 0;
1284 case PCIBR_BRIDGETYPE_XBRIDGE:
1285 pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1286 pcibr_soft->bs_bridge_mode =
1287 ((bridge->b_wid_control & BRIDGE_CTRL_PCI_SPEED) >> 3);
1291 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1292 "pcibr_attach2: pcibr_soft=0x%x, mode=0x%x\n",
1293 pcibr_soft, pcibr_soft->bs_bridge_mode));
1294 pcibr_soft->bsi_err_intr = 0;
1296 /* Bridges up through REV C
1297 * are unable to set the direct
1298 * byteswappers to BYTE_STREAM.
1300 if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
1301 pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
1302 pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
1306 * link all the pcibr_soft structs
1312 self->bl_soft = pcibr_soft;
1313 self->bl_vhdl = pcibr_vhdl;
1314 self->bl_next = pcibr_list;
1317 #endif /* PCIBR_SOFT_LIST */
1320 * get the name of this bridge vertex and keep the info. Use this
1321 * only where it is really needed now: like error interrupts.
1323 s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
1324 pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
1325 strcpy(pcibr_soft->bs_name, s);
1327 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1328 "pcibr_attach2: %s ASIC: rev %s (code=0x%x)\n",
1329 IS_XBRIDGE_SOFT(pcibr_soft) ? "XBridge" :
1330 IS_PIC_SOFT(pcibr_soft) ? "PIC" : "Bridge",
1331 (rev == BRIDGE_PART_REV_A) ? "A" :
1332 (rev == BRIDGE_PART_REV_B) ? "B" :
1333 (rev == BRIDGE_PART_REV_C) ? "C" :
1334 (rev == BRIDGE_PART_REV_D) ? "D" :
1335 (rev == XBRIDGE_PART_REV_A) ? "A" :
1336 (rev == XBRIDGE_PART_REV_B) ? "B" :
1337 (IS_PIC_PART_REV_A(rev)) ? "A" :
1338 "unknown", rev, pcibr_soft->bs_name));
1340 info = xwidget_info_get(xconn_vhdl);
1341 pcibr_soft->bs_xid = xwidget_info_id_get(info);
1342 pcibr_soft->bs_master = xwidget_info_master_get(info);
1343 pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
1345 pcibr_soft->bs_first_slot = pcibr_soft->bs_min_slot;
1346 pcibr_soft->bs_last_slot = pcibr_soft->bs_max_slot;
1348 * Bridge can only reset slots 0, 1, 2, and 3. Ibrick internal
1349 * slots 4, 5, 6, and 7 must be reset as a group, so do not
1352 pcibr_soft->bs_last_reset = 3;
1354 nasid = NASID_GET(bridge);
1356 if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
1357 printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
1358 (unsigned int)pcibr_soft->bs_bricktype);
1360 pcibr_soft->bs_moduleid = iobrick_module_get_nasid(nasid);
1362 if (pcibr_soft->bs_bricktype > 0) {
1363 switch (pcibr_soft->bs_bricktype) {
1364 case MODULE_PXBRICK:
1365 case MODULE_IXBRICK:
1366 pcibr_soft->bs_first_slot = 0;
1367 pcibr_soft->bs_last_slot = 1;
1368 pcibr_soft->bs_last_reset = 1;
1370 /* If Bus 1 has IO9 then there are 4 devices in that bus. Note
1371 * we figure this out from klconfig since the kernel has yet to
1374 if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
1375 lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
1378 if (brd->brd_flags & LOCAL_MASTER_IO6) {
1379 pcibr_soft->bs_last_slot = 3;
1380 pcibr_soft->bs_last_reset = 3;
1382 brd = KLCF_NEXT(brd);
1387 pcibr_soft->bs_first_slot = 1;
1388 pcibr_soft->bs_last_slot = 2;
1389 pcibr_soft->bs_last_reset = 2;
1394 * Here's the current baseio layout for SN1 style systems:
1396 * 0 1 2 3 4 5 6 7 slot#
1398 * x scsi x x ioc3 usb x x O300 Ibrick
1400 * x == never occupied
1401 * E == external (add-in) slot
1404 pcibr_soft->bs_first_slot = 1; /* Ibrick first slot == 1 */
1405 if (pcibr_soft->bs_xid == 0xe) {
1406 pcibr_soft->bs_last_slot = 2;
1407 pcibr_soft->bs_last_reset = 2;
1409 pcibr_soft->bs_last_slot = 6;
1416 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1417 "pcibr_attach2: %cbrick, slots %d-%d\n",
1418 MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid),
1419 pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
1423 * Initialize bridge and bus locks
1425 spin_lock_init(&pcibr_soft->bs_lock);
1427 mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
1430 * If we have one, process the hints structure.
1433 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
1434 "pcibr_attach2: pcibr_hints=0x%x\n", pcibr_hints));
1436 rrb_fixed = pcibr_hints->ph_rrb_fixed;
1438 pcibr_soft->bs_rrb_fixed = rrb_fixed;
1440 if (pcibr_hints->ph_intr_bits) {
1441 pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
1444 for (slot = pcibr_soft->bs_min_slot;
1445 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1446 int hslot = pcibr_hints->ph_host_slot[slot] - 1;
1449 pcibr_soft->bs_slot[slot].host_slot = slot;
1451 pcibr_soft->bs_slot[slot].has_host = 1;
1452 pcibr_soft->bs_slot[slot].host_slot = hslot;
1457 * Set-up initial values for state fields
1459 for (slot = pcibr_soft->bs_min_slot;
1460 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1461 pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
1462 pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
1463 pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
1464 pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
1465 pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
1466 pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
1469 for (ibit = 0; ibit < 8; ++ibit) {
1470 pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
1471 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
1472 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
1473 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat =
1474 &(bridge->b_int_status);
1475 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
1476 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
1477 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
1478 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
1482 * connect up our error handler. PIC has 2 busses (thus resulting in 2
1483 * pcibr_soft structs under 1 widget), so only register a xwidget error
1484 * handler for PIC's bus0. NOTE: for PIC pcibr_error_handler_wrapper()
1485 * is a wrapper routine we register that will call the real error handler
1486 * pcibr_error_handler() with the correct pcibr_soft struct.
1488 if (IS_PIC_SOFT(pcibr_soft)) {
1490 xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft);
1493 xwidget_error_register(xconn_vhdl, pcibr_error_handler, pcibr_soft);
1497 * Initialize various Bridge registers.
1501 * On pre-Rev.D bridges, set the PCI_RETRY_CNT
1502 * to zero to avoid dropping stores. (#475347)
1504 if (rev < BRIDGE_PART_REV_D)
1505 bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
1508 * Clear all pending interrupts.
1510 bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
1512 /* Initialize some PIC specific registers. */
1513 if (IS_PIC_SOFT(pcibr_soft)) {
1514 picreg_t pic_ctrl_reg = bridge->p_wid_control_64;
1516 /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
1517 pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
1518 pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
1519 pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
1520 pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
1522 pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
1523 pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
1525 /* enable parity checking on PICs internal RAM */
1526 pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
1527 pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
1528 /* PIC BRINGUP WAR (PV# 862253): dont enable write request
1531 if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
1532 pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
1535 bridge->p_wid_control_64 = pic_ctrl_reg;
1539 * Until otherwise set up,
1540 * assume all interrupts are
1541 * from slot 7(Bridge/Xbridge) or 3(PIC).
1542 * XXX. Not sure why we're doing this, made change for PIC
1543 * just to avoid setting reserved bits.
1545 if (IS_PIC_SOFT(pcibr_soft))
1546 bridge->b_int_device = (uint32_t) 0x006db6db;
1548 bridge->b_int_device = (uint32_t) 0xffffffff;
1556 int num_entries = 0;
1561 /* Set the Bridge's 32-bit PCI to XTalk
1562 * Direct Map register to the most useful
1563 * value we can determine. Note that we
1564 * must use a single xid for all of:
1565 * direct-mapped 32-bit DMA accesses
1566 * direct-mapped 64-bit DMA accesses
1567 * DMA accesses through the PMU
1569 * This is the only way to guarantee that
1570 * completion interrupts will reach a CPU
1571 * after all DMA data has reached memory.
1572 * (Of course, there may be a few special
1573 * drivers/controlers that explicitly manage
1574 * this ordering problem.)
1577 cnodeid = 0; /* default node id */
1578 nasid = COMPACT_TO_NASID_NODEID(cnodeid);
1579 paddr = NODE_OFFSET(nasid) + 0;
1581 /* currently, we just assume that if we ask
1582 * for a DMA mapping to "zero" the XIO
1583 * host will transmute this into a request
1584 * for the lowest hunk of memory.
1586 xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
1589 if (xbase != XIO_NOWHERE) {
1590 if (XIO_PACKED(xbase)) {
1591 xport = XIO_PORT(xbase);
1592 xbase = XIO_ADDR(xbase);
1594 xport = pcibr_soft->bs_mxid;
1596 offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
1597 xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
1599 dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
1602 dirmap |= BRIDGE_DIRMAP_OFF & xbase;
1603 else if (offset >= (512 << 20))
1604 dirmap |= BRIDGE_DIRMAP_ADD512;
1606 bridge->b_dir_map = dirmap;
1609 * Set bridge's idea of page size according to the system's
1610 * idea of "IO page size". TBD: The idea of IO page size
1611 * should really go away.
1614 * ensure that we write and read without any interruption.
1615 * The read following the write is required for the Bridge war
1617 spl_level = splhi();
1618 #if IOPGSIZE == 4096
1619 if (IS_PIC_SOFT(pcibr_soft)) {
1620 bridge->p_wid_control_64 &= ~BRIDGE_CTRL_PAGE_SIZE;
1622 bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
1624 #elif IOPGSIZE == 16384
1625 if (IS_PIC_SOFT(pcibr_soft)) {
1626 bridge->p_wid_control_64 |= BRIDGE_CTRL_PAGE_SIZE;
1628 bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
1631 <<<Unable to deal with IOPGSIZE >>>;
1633 bridge->b_wid_control; /* inval addr bug war */
1636 /* Initialize internal mapping entries */
1637 for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
1638 bridge->b_int_ate_ram[entry].wr = 0;
1642 * Determine if there's external mapping SSRAM on this
1643 * bridge. Set up Bridge control register appropriately,
1644 * inititlize SSRAM, and set software up to manage RAM
1645 * entries as an allocatable resource.
1647 * Currently, we just use the rm* routines to manage ATE
1648 * allocation. We should probably replace this with a
1649 * Best Fit allocator.
1651 * For now, if we have external SSRAM, avoid using
1652 * the internal ssram: we can't turn PREFETCH on
1653 * when we use the internal SSRAM; and besides,
1654 * this also guarantees that no allocation will
1655 * straddle the internal/external line, so we
1656 * can increment ATE write addresses rather than
1657 * recomparing against BRIDGE_INTERNAL_ATES every
1661 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1664 num_entries = pcibr_init_ext_ate_ram(bridge);
1666 /* we always have 128 ATEs (512 for Xbridge) inside the chip
1667 * even if disabled for debugging.
1669 pcibr_soft->bs_int_ate_map = rmallocmap(pcibr_soft->bs_int_ate_size);
1670 pcibr_ate_free(pcibr_soft, 0, pcibr_soft->bs_int_ate_size);
1672 if (num_entries > pcibr_soft->bs_int_ate_size) {
1673 #if PCIBR_ATE_NOTBOTH /* for debug -- forces us to use external ates */
1674 printk("pcibr_attach: disabling internal ATEs.\n");
1675 pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
1677 pcibr_soft->bs_ext_ate_map = rmallocmap(num_entries);
1678 pcibr_ate_free(pcibr_soft, pcibr_soft->bs_int_ate_size,
1679 num_entries - pcibr_soft->bs_int_ate_size);
1681 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_vhdl,
1682 "pcibr_attach2: %d ATEs, %d internal & %d external\n",
1683 num_entries ? num_entries : pcibr_soft->bs_int_ate_size,
1684 pcibr_soft->bs_int_ate_size,
1685 num_entries ? num_entries-pcibr_soft->bs_int_ate_size : 0));
1693 * now figure the *real* xtalk base address
1694 * that dirmap sends us to.
1696 dirmap = bridge->b_dir_map;
1697 if (dirmap & BRIDGE_DIRMAP_OFF)
1698 xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
1699 << BRIDGE_DIRMAP_OFF_ADDRSHFT;
1700 else if (dirmap & BRIDGE_DIRMAP_ADD512)
1705 pcibr_soft->bs_dir_xbase = xbase;
1707 /* it is entirely possible that we may, at this
1708 * point, have our dirmap pointing somewhere
1709 * other than our "master" port.
1711 pcibr_soft->bs_dir_xport =
1712 (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
1715 /* pcibr sources an error interrupt;
1716 * figure out where to send it.
1718 * If any interrupts are enabled in bridge,
1719 * then the prom set us up and our interrupt
1720 * has already been reconnected in mlreset
1723 * Need to set the D_INTR_ISERR flag
1724 * in the dev_desc used for allocating the
1725 * error interrupt, so our interrupt will
1726 * be properly routed and prioritized.
1728 * If our crosstalk provider wants to
1729 * fix widget error interrupts to specific
1730 * destinations, D_INTR_ISERR is how it
1734 xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
1736 int irq = ((hub_intr_t)xtalk_intr)->i_bit;
1737 int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
1739 intr_unreserve_level(cpu, irq);
1740 ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
1742 ASSERT(xtalk_intr != NULL);
1744 pcibr_soft->bsi_err_intr = xtalk_intr;
1747 * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
1748 * in order to work around some addressing limitations. In order
1749 * for that fire wall to work properly, we need to make sure we
1750 * start from a known clean state.
1752 pcibr_clearwidint(bridge);
1754 xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
1755 (intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
1757 request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
1758 (intr_arg_t) pcibr_soft);
1760 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
1761 "pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
1762 bridge->b_wid_int_upper, bridge->b_wid_int_lower));
1765 * now we can start handling error interrupts;
1766 * enable all of them.
1767 * NOTE: some PCI ints may already be enabled.
1769 /* We read the INT_ENABLE register as a 64bit picreg_t for PIC and a
1770 * 32bit bridgereg_t for BRIDGE, but always process the result as a
1771 * 64bit value so the code can be "common" for both PIC and BRIDGE...
1773 if (IS_PIC_SOFT(pcibr_soft)) {
1774 int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
1775 int_enable = (uint64_t)int_enable_64;
1777 int_enable = (uint64_t)0x7ffffeff7ffffeff;
1780 int_enable_32 = bridge->b_int_enable | (BRIDGE_ISR_ERRORS & 0xffffffff);
1781 int_enable = ((uint64_t)int_enable_32 & 0xffffffff);
1783 int_enable = (uint64_t)0x7ffffeff;
1788 #if BRIDGE_ERROR_INTR_WAR
1789 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1791 * We commonly get master timeouts when talking to ql.
1792 * We also see RESP_XTALK_ERROR and LLP_TX_RETRY interrupts.
1793 * Insure that these are all disabled for now.
1795 int_enable &= ~(BRIDGE_IMR_PCI_MST_TIMEOUT |
1796 BRIDGE_ISR_RESP_XTLK_ERR |
1797 BRIDGE_ISR_LLP_TX_RETRY);
1799 if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) {
1800 int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT;
1802 #endif /* BRIDGE_ERROR_INTR_WAR */
1804 #ifdef QL_SCSI_CTRL_WAR /* for IP30 only */
1805 /* Really a QL rev A issue, but all newer hearts have newer QLs.
1806 * Forces all IO6/MSCSI to be new.
1808 if (heart_rev() == HEART_REV_A)
1809 int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;
1812 #ifdef BRIDGE1_TIMEOUT_WAR
1813 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1815 * Turn off these interrupts. They can't be trusted in bridge 1
1817 int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT |
1818 BRIDGE_IMR_UNEXP_RESP);
1822 /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
1823 * locked out to be freed up sooner (by timing out) so that the
1824 * read tnums are never completely used up.
1826 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
1827 int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
1828 int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT;
1830 bridge->b_wid_req_timeout = 0x750;
1834 * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
1835 * RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3
1836 * so they are not used
1838 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
1839 bridge->b_even_resp |= 0x000f000f;
1840 bridge->b_odd_resp |= 0x000f000f;
1843 if (IS_PIC_SOFT(pcibr_soft)) {
1844 bridge->p_int_enable_64 = (picreg_t)int_enable;
1846 bridge->b_int_enable = (bridgereg_t)int_enable;
1848 bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */
1850 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
1853 * Depending on the rev of bridge, disable certain features.
1854 * Easiest way seems to be to force the PCIBR_NOwhatever
1855 * flag to be on for all DMA calls, which overrides any
1856 * PCIBR_whatever flag or even the setting of whatever
1857 * from the PCIIO_DMA_class flags (or even from the other
1858 * PCIBR flags, since NO overrides YES).
1860 pcibr_soft->bs_dma_flags = 0;
1863 * Always completely disabled for REV.A;
1864 * at "pcibr_prefetch_enable_rev", anyone
1865 * asking for PCIIO_PREFETCH gets it.
1866 * Between these two points, you have to ask
1867 * for PCIBR_PREFETCH, which promises that
1868 * your driver knows about known Bridge WARs.
1870 if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
1871 pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
1872 else if (pcibr_soft->bs_rev_num <
1873 (BRIDGE_WIDGET_PART_NUM << 4))
1874 pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
1876 /* WRITE_GATHER: Disabled */
1877 if (pcibr_soft->bs_rev_num <
1878 (BRIDGE_WIDGET_PART_NUM << 4))
1879 pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
1881 /* PIC only supports 64-bit direct mapping in PCI-X mode. Since
1882 * all PCI-X devices that initiate memory transactions must be
1883 * capable of generating 64-bit addressed, we force 64-bit DMAs.
1885 if (IS_PCIX(pcibr_soft)) {
1886 pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
1891 pciio_win_map_t win_map_p;
1892 iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24;
1893 int prom_base_size = 0x1000000;
1894 iopaddr_t prom_base_limit = prom_base_addr + prom_base_size;
1896 /* Allocate resource maps based on bus page size; for I/O and memory
1897 * space, free all pages except those in the base area and in the
1898 * range set by the PROM.
1900 * PROM creates BAR addresses in this format: 0x0ws00000 where w is
1901 * the widget number and s is the device register offset for the slot.
1904 win_map_p = &pcibr_soft->bs_io_win_map;
1905 pciio_device_win_map_new(win_map_p,
1906 PCIBR_BUS_IO_MAX + 1,
1908 pciio_device_win_populate(win_map_p,
1910 prom_base_addr - PCIBR_BUS_IO_BASE);
1911 pciio_device_win_populate(win_map_p,
1913 (PCIBR_BUS_IO_MAX + 1) - prom_base_limit);
1915 win_map_p = &pcibr_soft->bs_swin_map;
1916 pciio_device_win_map_new(win_map_p,
1917 PCIBR_BUS_SWIN_MAX + 1,
1918 PCIBR_BUS_SWIN_PAGE);
1919 pciio_device_win_populate(win_map_p,
1920 PCIBR_BUS_SWIN_BASE,
1921 (PCIBR_BUS_SWIN_MAX + 1) - PCIBR_BUS_SWIN_PAGE);
1923 win_map_p = &pcibr_soft->bs_mem_win_map;
1924 pciio_device_win_map_new(win_map_p,
1925 PCIBR_BUS_MEM_MAX + 1,
1926 PCIBR_BUS_MEM_PAGE);
1927 pciio_device_win_populate(win_map_p,
1929 prom_base_addr - PCIBR_BUS_MEM_BASE);
1930 pciio_device_win_populate(win_map_p,
1932 (PCIBR_BUS_MEM_MAX + 1) - prom_base_limit);
1935 /* build "no-slot" connection point
1937 pcibr_info = pcibr_device_info_new
1938 (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
1939 PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
1940 noslot_conn = pciio_device_info_register
1941 (pcibr_vhdl, &pcibr_info->f_c);
1943 /* Remember the no slot connection point info for tearing it
1944 * down during detach.
1946 pcibr_soft->bs_noslot_conn = noslot_conn;
1947 pcibr_soft->bs_noslot_info = pcibr_info;
1949 fast_back_to_back_enable = 1;
1953 if (fast_back_to_back_enable) {
1955 * All devices on the bus are capable of fast back to back, so
1956 * we need to set the fast back to back bit in all devices on
1957 * the bus that are capable of doing such accesses.
1962 for (slot = pcibr_soft->bs_min_slot;
1963 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1964 /* Find out what is out there */
1965 (void)pcibr_slot_info_init(pcibr_vhdl,slot);
1967 for (slot = pcibr_soft->bs_min_slot;
1968 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1969 /* Set up the address space for this slot in the PCI land */
1970 (void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
1972 for (slot = pcibr_soft->bs_min_slot;
1973 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1974 /* Setup the device register */
1975 (void)pcibr_slot_device_init(pcibr_vhdl, slot);
1977 if (IS_PCIX(pcibr_soft)) {
1978 pcibr_soft->bs_pcix_rbar_inuse = 0;
1979 pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
1980 pcibr_soft->bs_pcix_rbar_percent_allowed =
1981 pcibr_pcix_rbars_calc(pcibr_soft);
1983 for (slot = pcibr_soft->bs_min_slot;
1984 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1985 /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
1986 (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
1989 /* Set up convenience links */
1990 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1991 pcibr_bus_cnvlink(pcibr_soft->bs_vhdl);
1993 for (slot = pcibr_soft->bs_min_slot;
1994 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1995 /* Setup host/guest relations */
1996 (void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
1998 /* Handle initial RRB management for Bridge and Xbridge */
1999 pcibr_initial_rrb(pcibr_vhdl,
2000 pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
2002 { /* Before any drivers get called that may want to re-allocate
2003 * RRB's, let's get some special cases pre-allocated. Drivers
2004 * may override these pre-allocations, but by doing pre-allocations
2005 * now we're assured not to step all over what the driver intended.
2007 * Note: Someday this should probably be moved over to pcibr_rrb.c
2010 * Each Pbrick PCI bus only has slots 1 and 2. Similarly for
2011 * widget 0xe on Ibricks. Allocate RRB's accordingly.
2013 if (pcibr_soft->bs_bricktype > 0) {
2014 switch (pcibr_soft->bs_bricktype) {
2016 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2017 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
2020 /* port 0xe on the Ibrick only has slots 1 and 2 */
2021 if (pcibr_soft->bs_xid == 0xe) {
2022 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2023 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
2026 /* allocate one RRB for the serial port */
2027 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
2030 case MODULE_PXBRICK:
2031 case MODULE_IXBRICK:
2033 * If the IO9 is in the PXBrick (bus1, slot1) allocate
2034 * RRBs to all the devices
2036 if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
2037 (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
2038 (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
2039 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
2040 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
2041 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4);
2042 do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4);
2044 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
2045 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2052 if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
2053 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2056 } /* OK Special RRB allocations are done. */
2058 for (slot = pcibr_soft->bs_min_slot;
2059 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
2060 /* Call the device attach */
2061 (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
2063 pciio_device_attach(noslot_conn, (int)0);
2070 * Detach the bridge device from the hwgraph after cleaning out all the
2071 * underlying vertices.
2075 pcibr_detach(vertex_hdl_t xconn)
2078 vertex_hdl_t pcibr_vhdl;
2079 pcibr_soft_t pcibr_soft;
2083 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
2085 /* Get the bridge vertex from its xtalk connection point */
2086 if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
2089 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
2090 bridge = pcibr_soft->bs_base;
2093 s = pcibr_lock(pcibr_soft);
2094 /* Disable the interrupts from the bridge */
2095 if (IS_PIC_SOFT(pcibr_soft)) {
2096 bridge->p_int_enable_64 = 0;
2098 bridge->b_int_enable = 0;
2100 pcibr_unlock(pcibr_soft, s);
2102 /* Detach all the PCI devices talking to this bridge */
2103 for (slot = pcibr_soft->bs_min_slot;
2104 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
2105 pcibr_slot_detach(pcibr_vhdl, slot, 0, (char *)NULL, (int *)NULL);
2108 /* Unregister the no-slot connection point */
2109 pciio_device_info_unregister(pcibr_vhdl,
2110 &(pcibr_soft->bs_noslot_info->f_c));
2112 spin_lock_destroy(&pcibr_soft->bs_lock);
2113 kfree(pcibr_soft->bs_name);
2115 /* Error handler gets unregistered when the widget info is
2118 /* Free the soft ATE maps */
2119 if (pcibr_soft->bs_int_ate_map)
2120 rmfreemap(pcibr_soft->bs_int_ate_map);
2121 if (pcibr_soft->bs_ext_ate_map)
2122 rmfreemap(pcibr_soft->bs_ext_ate_map);
2124 /* Disconnect the error interrupt and free the xtalk resources
2125 * associated with it.
2127 xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
2128 xtalk_intr_free(pcibr_soft->bsi_err_intr);
2130 /* Clear the software state maintained by the bridge driver for this
2134 /* Remove the Bridge revision labelled info */
2135 (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
2136 /* Remove the character device associated with this bridge */
2137 (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
2138 /* Remove the PCI bridge vertex */
2139 (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
2145 pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
2147 vertex_hdl_t pcibr_vhdl;
2149 arbitrary_info_t ainfo;
2151 if (GRAPH_SUCCESS !=
2152 hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
2155 tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
2158 * Any hwgraph function that returns a vertex handle will implicity
2159 * increment that vertex's reference count. The caller must explicity
2160 * decrement the vertex's referece count after the last reference to
2163 * Decrement reference count incremented by call to hwgraph_traverse().
2166 hwgraph_vertex_unref(pcibr_vhdl);
2168 if (tmp_vhdl != GRAPH_SUCCESS)
2174 pcibr_write_gather_flush(vertex_hdl_t pconn_vhdl)
2176 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2177 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2179 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2180 pcibr_device_write_gather_flush(pcibr_soft, slot);
2184 /* =====================================================================
2189 pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
2191 pciio_space_t space,
2196 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2197 pciio_info_t pciio_info = &pcibr_info->f_c;
2198 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2199 bridge_t *bridge = pcibr_soft->bs_base;
2201 unsigned bar; /* which BASE reg on device is decoding */
2202 iopaddr_t xio_addr = XIO_NOWHERE;
2203 iopaddr_t base; /* base of devio(x) mapped area on PCI */
2204 iopaddr_t limit; /* base of devio(x) mapped area on PCI */
2206 pciio_space_t wspace; /* which space device is decoding */
2207 iopaddr_t wbase; /* base of device decode on PCI */
2208 size_t wsize; /* size of device decode on PCI */
2210 int try; /* DevIO(x) window scanning order control */
2211 int maxtry, halftry;
2212 int win; /* which DevIO(x) window is being used */
2213 pciio_space_t mspace; /* target space for devio(x) register */
2214 iopaddr_t mbase; /* base of devio(x) mapped area on PCI */
2215 size_t msize; /* size of devio(x) mapped area on PCI */
2216 size_t mmask; /* addr bits stored in Device(x) */
2221 s = pcibr_lock(pcibr_soft);
2223 if (pcibr_soft->bs_slot[slot].has_host) {
2224 slot = pcibr_soft->bs_slot[slot].host_slot;
2225 pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
2228 * Special case for dual-slot pci devices such as ioc3 on IP27
2229 * baseio. In these cases, pconn_vhdl should never be for a pci
2230 * function on a subordiate PCI bus, so we can safely reset pciio_info
2231 * to be the info struct embedded in pcibr_info. Failure to do this
2232 * results in using a bogus pciio_info_t for calculations done later
2236 pciio_info = &pcibr_info->f_c;
2238 if (space == PCIIO_SPACE_NONE)
2241 if (space == PCIIO_SPACE_CFG) {
2243 * Usually, the first mapping
2244 * established to a PCI device
2245 * is to its config space.
2247 * In any case, we definitely
2248 * do NOT need to worry about
2249 * PCI BASE registers, and
2250 * MUST NOT attempt to point
2251 * the DevIO(x) window at
2254 if (((flags & PCIIO_BYTE_STREAM) == 0) &&
2255 ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
2256 xio_addr = pci_addr + PCIBR_TYPE0_CFG_DEV(pcibr_soft, slot);
2260 if (space == PCIIO_SPACE_ROM) {
2261 /* PIO to the Expansion Rom.
2262 * Driver is responsible for
2263 * enabling and disabling
2266 wbase = pciio_info->c_rbase;
2267 wsize = pciio_info->c_rsize;
2270 * While the driver should know better
2271 * than to attempt to map more space
2272 * than the device is decoding, he might
2273 * do it; better to bail out here.
2275 if ((pci_addr + req_size) > wsize)
2279 space = PCIIO_SPACE_MEM;
2282 * reduce window mappings to raw
2283 * space mappings (maybe allocating
2284 * windows), and try for DevIO(x)
2285 * usage (setting it if it is available).
2287 bar = space - PCIIO_SPACE_WIN0;
2289 wspace = pciio_info->c_window[bar].w_space;
2290 if (wspace == PCIIO_SPACE_NONE)
2293 /* get PCI base and size */
2294 wbase = pciio_info->c_window[bar].w_base;
2295 wsize = pciio_info->c_window[bar].w_size;
2298 * While the driver should know better
2299 * than to attempt to map more space
2300 * than the device is decoding, he might
2301 * do it; better to bail out here.
2303 if ((pci_addr + req_size) > wsize)
2306 /* shift from window relative to
2307 * decoded space relative.
2314 /* Scan all the DevIO(x) windows twice looking for one
2315 * that can satisfy our request. The first time through,
2316 * only look at assigned windows; the second time, also
2317 * look at PCIIO_SPACE_NONE windows. Arrange the order
2318 * so we always look at our own window first.
2320 * We will not attempt to satisfy a single request
2321 * by concatinating multiple windows.
2323 maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
2324 halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
2325 for (try = 0; try < maxtry; ++try) {
2329 /* calculate win based on slot, attempt, and max possible
2331 win = (try + slot) % PCIBR_NUM_SLOTS(pcibr_soft);
2333 /* If this DevIO(x) mapping area can provide
2334 * a mapping to this address, use it.
2336 msize = (win < 2) ? 0x200000 : 0x100000;
2338 if (space != PCIIO_SPACE_IO)
2339 mmask &= 0x3FFFFFFF;
2341 offset = pci_addr & (msize - 1);
2343 /* If this window can't possibly handle that request,
2344 * go on to the next window.
2346 if (((pci_addr & (msize - 1)) + req_size) > msize)
2349 devreg = pcibr_soft->bs_slot[win].bss_device;
2351 /* Is this window "nailed down"?
2352 * If not, maybe we can use it.
2353 * (only check this the second time through)
2355 mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
2356 if ((try > halftry) && (mspace == PCIIO_SPACE_NONE)) {
2358 /* If this is the primary DevIO(x) window
2359 * for some other device, skip it.
2361 if ((win != slot) &&
2362 (PCIIO_VENDOR_ID_NONE !=
2363 pcibr_soft->bs_slot[win].bss_vendor_id))
2366 /* It's a free window, and we fit in it.
2367 * Set up Device(win) to our taste.
2369 mbase = pci_addr & mmask;
2371 /* check that we would really get from
2374 if ((mbase | offset) != pci_addr)
2377 devreg &= ~BRIDGE_DEV_OFF_MASK;
2378 if (space != PCIIO_SPACE_IO)
2379 devreg |= BRIDGE_DEV_DEV_IO_MEM;
2381 devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
2382 devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
2384 /* default is WORD_VALUES.
2385 * if you specify both,
2386 * operation is undefined.
2388 if (flags & PCIIO_BYTE_STREAM)
2389 devreg |= BRIDGE_DEV_DEV_SWAP;
2391 devreg &= ~BRIDGE_DEV_DEV_SWAP;
2393 if (pcibr_soft->bs_slot[win].bss_device != devreg) {
2394 if ( IS_PIC_SOFT(pcibr_soft) ) {
2395 bridge->b_device[win].reg = devreg;
2396 pcibr_soft->bs_slot[win].bss_device = devreg;
2397 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
2400 if (io_get_sh_swapper(NASID_GET(bridge))) {
2401 BRIDGE_REG_SET32((&bridge->b_device[win].reg)) = __swab32(devreg);
2402 pcibr_soft->bs_slot[win].bss_device = devreg;
2403 BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
2405 bridge->b_device[win].reg = devreg;
2406 pcibr_soft->bs_slot[win].bss_device = devreg;
2407 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
2412 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
2413 "pcibr_addr_pci_to_xio: Device(%d): %x\n",
2414 win, devreg, device_bits));
2417 pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
2418 pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
2419 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2421 /* Increment this DevIO's use count */
2422 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2424 /* Save the DevIO register index used to access this BAR */
2426 pcibr_info->f_window[bar].w_devio_index = win;
2429 * The kernel only allows functions to have so many variable args,
2430 * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printk
2431 * arguments fails so sprintf() it into a temporary string.
2433 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2435 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
2436 "slot %d allocates DevIO(%d) Device(%d) set to %x\n",
2437 space, space_desc, pci_addr, pci_addr + req_size - 1,
2438 slot, win, win, devreg, device_bits);
2440 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
2441 "slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
2442 (unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
2443 (unsigned int)slot, win, win, (unsigned long)devreg);
2445 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2448 } /* endif DevIO(x) not pointed */
2449 mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
2451 /* Now check for request incompat with DevIO(x)
2453 if ((mspace != space) ||
2454 (pci_addr < mbase) ||
2455 ((pci_addr + req_size) > (mbase + msize)) ||
2456 ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
2457 (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
2460 /* DevIO(x) window is pointed at PCI space
2461 * that includes our target. Calculate the
2462 * final XIO address, release the lock and
2465 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2467 /* Increment this DevIO's use count */
2468 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2470 /* Save the DevIO register index used to access this BAR */
2472 pcibr_info->f_window[bar].w_devio_index = win;
2474 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2476 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
2477 "slot %d uses DevIO(%d)\n", space, space_desc, pci_addr,
2478 pci_addr + req_size - 1, slot, win);
2480 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2487 * Accesses to device decode
2488 * areas that do a not fit
2489 * within the DevIO(x) space are
2490 * modified to be accesses via
2491 * the direct mapping areas.
2493 * If necessary, drivers can
2494 * explicitly ask for mappings
2495 * into these address spaces,
2496 * but this should never be needed.
2498 case PCIIO_SPACE_MEM: /* "mem space" */
2499 case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
2500 if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
2501 base = PICBRIDGE0_PCI_MEM32_BASE;
2502 limit = PICBRIDGE0_PCI_MEM32_LIMIT;
2503 } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
2504 base = PICBRIDGE1_PCI_MEM32_BASE;
2505 limit = PICBRIDGE1_PCI_MEM32_LIMIT;
2506 } else { /* Bridge/Xbridge */
2507 base = BRIDGE_PCI_MEM32_BASE;
2508 limit = BRIDGE_PCI_MEM32_LIMIT;
2511 if ((pci_addr + base + req_size - 1) <= limit)
2512 xio_addr = pci_addr + base;
2515 case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
2516 if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
2517 base = PICBRIDGE0_PCI_MEM64_BASE;
2518 limit = PICBRIDGE0_PCI_MEM64_LIMIT;
2519 } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
2520 base = PICBRIDGE1_PCI_MEM64_BASE;
2521 limit = PICBRIDGE1_PCI_MEM64_LIMIT;
2522 } else { /* Bridge/Xbridge */
2523 base = BRIDGE_PCI_MEM64_BASE;
2524 limit = BRIDGE_PCI_MEM64_LIMIT;
2527 if ((pci_addr + base + req_size - 1) <= limit)
2528 xio_addr = pci_addr + base;
2531 case PCIIO_SPACE_IO: /* "i/o space" */
2533 * PIC bridges do not support big-window aliases into PCI I/O space
2535 if (IS_PIC_SOFT(pcibr_soft)) {
2536 xio_addr = XIO_NOWHERE;
2540 /* Bridge Hardware Bug WAR #482741:
2541 * The 4G area that maps directly from
2542 * XIO space to PCI I/O space is busted
2543 * until Bridge Rev D.
2545 if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
2546 ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
2547 BRIDGE_PCI_IO_LIMIT))
2548 xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
2552 /* Check that "Direct PIO" byteswapping matches,
2553 * try to change it if it does not.
2555 if (xio_addr != XIO_NOWHERE) {
2556 unsigned bst; /* nonzero to set bytestream */
2557 unsigned *bfp; /* addr of record of how swapper is set */
2558 unsigned swb; /* which control bit to mung */
2559 unsigned bfo; /* current swapper setting */
2560 unsigned bfn; /* desired swapper setting */
2562 bfp = ((space == PCIIO_SPACE_IO)
2563 ? (&pcibr_soft->bs_pio_end_io)
2564 : (&pcibr_soft->bs_pio_end_mem));
2568 bst = flags & PCIIO_BYTE_STREAM;
2570 bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
2572 if (bfn == bfo) { /* we already match. */
2574 } else if (bfo != 0) { /* we have a conflict. */
2575 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2577 sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap conflict in %x, "
2578 "was%s%s, want%s%s\n", space, space_desc,
2579 bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2580 bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
2581 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2582 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
2584 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2586 xio_addr = XIO_NOWHERE;
2587 } else { /* OK to make the change. */
2588 swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
2589 if ( IS_PIC_SOFT(pcibr_soft) ) {
2590 picreg_t octl, nctl;
2591 octl = bridge->p_wid_control_64;
2592 nctl = bst ? octl | (uint64_t)swb : octl & ((uint64_t)~swb);
2594 if (octl != nctl) /* make the change if any */
2595 bridge->b_wid_control = nctl;
2598 picreg_t octl, nctl;
2599 if (io_get_sh_swapper(NASID_GET(bridge))) {
2600 octl = BRIDGE_REG_GET32((&bridge->b_wid_control));
2601 nctl = bst ? octl | swb : octl & ~swb;
2603 if (octl != nctl) /* make the change if any */
2604 BRIDGE_REG_SET32((&bridge->b_wid_control)) = __swab32(nctl);
2606 octl = bridge->b_wid_control;
2607 nctl = bst ? octl | swb : octl & ~swb;
2609 if (octl != nctl) /* make the change if any */
2610 bridge->b_wid_control = nctl;
2613 *bfp = bfn; /* record the assignment */
2615 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2617 sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap for %x set "
2618 "to%s%s\n", space, space_desc,
2619 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2620 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
2622 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2627 pcibr_unlock(pcibr_soft, s);
2633 pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
2634 device_desc_t dev_desc,
2635 pciio_space_t space,
2638 size_t req_size_max,
2641 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2642 pciio_info_t pciio_info = &pcibr_info->f_c;
2643 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2644 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2645 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2647 pcibr_piomap_t *mapptr;
2648 pcibr_piomap_t maplist;
2649 pcibr_piomap_t pcibr_piomap;
2651 xtalk_piomap_t xtalk_piomap;
2654 /* Make sure that the req sizes are non-zero */
2655 if ((req_size < 1) || (req_size_max < 1)) {
2656 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2657 "pcibr_piomap_alloc: req_size | req_size_max < 1\n"));
2662 * Code to translate slot/space/addr
2663 * into xio_addr is common between
2664 * this routine and pcibr_piotrans_addr.
2666 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2668 if (xio_addr == XIO_NOWHERE) {
2669 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2670 "pcibr_piomap_alloc: xio_addr == XIO_NOWHERE\n"));
2674 /* Check the piomap list to see if there is already an allocated
2675 * piomap entry but not in use. If so use that one. Otherwise
2676 * allocate a new piomap entry and add it to the piomap list
2678 mapptr = &(pcibr_info->f_piomap);
2680 s = pcibr_lock(pcibr_soft);
2681 for (pcibr_piomap = *mapptr;
2682 pcibr_piomap != NULL;
2683 pcibr_piomap = pcibr_piomap->bp_next) {
2684 if (pcibr_piomap->bp_mapsz == 0)
2691 pcibr_unlock(pcibr_soft, s);
2695 pcibr_piomap->bp_dev = pconn_vhdl;
2696 pcibr_piomap->bp_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, pciio_slot);
2697 pcibr_piomap->bp_flags = flags;
2698 pcibr_piomap->bp_space = space;
2699 pcibr_piomap->bp_pciaddr = pci_addr;
2700 pcibr_piomap->bp_mapsz = req_size;
2701 pcibr_piomap->bp_soft = pcibr_soft;
2702 pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
2705 s = pcibr_lock(pcibr_soft);
2707 pcibr_piomap->bp_next = maplist;
2708 *mapptr = pcibr_piomap;
2710 pcibr_unlock(pcibr_soft, s);
2715 xtalk_piomap_alloc(xconn_vhdl, 0,
2717 req_size, req_size_max,
2718 flags & PIOMAP_FLAGS);
2720 pcibr_piomap->bp_xtalk_addr = xio_addr;
2721 pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
2723 pcibr_piomap->bp_mapsz = 0;
2728 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2729 "pcibr_piomap_alloc: map=0x%x\n", pcibr_piomap));
2731 return pcibr_piomap;
2736 pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
2738 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2739 "pcibr_piomap_free: map=0x%x\n", pcibr_piomap));
2741 xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
2742 pcibr_piomap->bp_xtalk_pio = 0;
2743 pcibr_piomap->bp_mapsz = 0;
2748 pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
2753 addr = xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
2754 pcibr_piomap->bp_xtalk_addr +
2755 pci_addr - pcibr_piomap->bp_pciaddr,
2757 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2758 "pcibr_piomap_free: map=0x%x, addr=0x%x\n",
2759 pcibr_piomap, addr));
2766 pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
2768 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2769 "pcibr_piomap_done: map=0x%x\n", pcibr_piomap));
2770 xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
2775 pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,
2776 device_desc_t dev_desc,
2777 pciio_space_t space,
2782 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2783 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2784 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2785 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2790 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2792 if (xio_addr == XIO_NOWHERE) {
2793 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2794 "pcibr_piotrans_addr: xio_addr == XIO_NOWHERE\n"));
2798 addr = xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
2799 PCIBR_DEBUG((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2800 "pcibr_piotrans_addr: xio_addr=0x%x, addr=0x%x\n",
2806 * PIO Space allocation and management.
2807 * Allocate and Manage the PCI PIO space (mem and io space)
2808 * This routine is pretty simplistic at this time, and
2809 * does pretty trivial management of allocation and freeing.
2810 * The current scheme is prone for fragmentation.
2811 * Change the scheme to use bitmaps.
2816 pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
2817 device_desc_t dev_desc,
2818 pciio_space_t space,
2822 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2823 pciio_info_t pciio_info = &pcibr_info->f_c;
2824 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2826 pciio_piospace_t piosp;
2829 iopaddr_t start_addr;
2833 * Check for proper alignment
2835 ASSERT(alignment >= NBPP);
2836 ASSERT((alignment & (alignment - 1)) == 0);
2838 align_mask = alignment - 1;
2839 s = pcibr_lock(pcibr_soft);
2842 * First look if a previously allocated chunk exists.
2844 if ((piosp = pcibr_info->f_piospace)) {
2846 * Look through the list for a right sized free chunk.
2850 (piosp->space == space) &&
2851 (piosp->count >= req_size) &&
2852 !(piosp->start & align_mask)) {
2854 pcibr_unlock(pcibr_soft, s);
2855 return piosp->start;
2857 piosp = piosp->next;
2863 * Allocate PCI bus address, usually for the Universe chip driver;
2864 * do not pass window info since the actual PCI bus address
2865 * space will never be freed. The space may be reused after it
2866 * is logically released by pcibr_piospace_free().
2869 case PCIIO_SPACE_IO:
2870 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2872 0, req_size, alignment);
2875 case PCIIO_SPACE_MEM:
2876 case PCIIO_SPACE_MEM32:
2877 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2879 0, req_size, alignment);
2884 pcibr_unlock(pcibr_soft, s);
2885 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2886 "pcibr_piospace_alloc: unknown space %d\n", space));
2891 * If too big a request, reject it.
2894 pcibr_unlock(pcibr_soft, s);
2895 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2896 "pcibr_piospace_alloc: request 0x%x to big\n", req_size));
2902 piosp->space = space;
2903 piosp->start = start_addr;
2904 piosp->count = req_size;
2905 piosp->next = pcibr_info->f_piospace;
2906 pcibr_info->f_piospace = piosp;
2908 pcibr_unlock(pcibr_soft, s);
2910 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2911 "pcibr_piospace_alloc: piosp=0x%x\n", piosp));
2918 pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
2919 pciio_space_t space,
2923 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2925 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
2928 pciio_piospace_t piosp;
2933 * Look through the bridge data structures for the pciio_piospace_t
2934 * structure corresponding to 'pciaddr'
2936 s = pcibr_lock(pcibr_soft);
2937 piosp = pcibr_info->f_piospace;
2940 * Piospace free can only be for the complete
2941 * chunk and not parts of it..
2943 if (piosp->start == pciaddr) {
2944 if (piosp->count == req_size)
2947 * Improper size passed for freeing..
2948 * Print a message and break;
2950 hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
2951 printk(KERN_WARNING "pcibr_piospace_free: error");
2952 printk(KERN_WARNING "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
2953 name, req_size, piosp->count);
2954 printk(KERN_WARNING "Freeing 0x%lx instead", piosp->count);
2957 piosp = piosp->next;
2962 "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
2964 pcibr_unlock(pcibr_soft, s);
2968 pcibr_unlock(pcibr_soft, s);
2970 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2971 "pcibr_piospace_free: piosp=0x%x\n", piosp));
2975 /* =====================================================================
2978 * The Bridge ASIC provides three methods of doing
2979 * DMA: via a "direct map" register available in
2980 * 32-bit PCI space (which selects a contiguous 2G
2981 * address space on some other widget), via
2982 * "direct" addressing via 64-bit PCI space (all
2983 * destination information comes from the PCI
2984 * address, including transfer attributes), and via
2985 * a "mapped" region that allows a bunch of
2986 * different small mappings to be established with
2989 * For efficiency, we most prefer to use the 32-bit
2990 * direct mapping facility, since it requires no
2991 * resource allocations. The advantage of using the
2992 * PMU over the 64-bit direct is that single-cycle
2993 * PCI addressing can be used; the advantage of
2994 * using 64-bit direct over PMU addressing is that
2995 * we do not have to allocate entries in the PMU.
2999 * Convert PCI-generic software flags and Bridge-specific software flags
3000 * into Bridge-specific Direct Map attribute bits.
3003 pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
3005 iopaddr_t attributes = 0;
3007 /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
3009 ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
3012 /* Generic macro flags
3014 if (flags & PCIIO_DMA_DATA) { /* standard data channel */
3015 attributes &= ~PCI64_ATTR_BAR; /* no barrier bit */
3016 attributes |= PCI64_ATTR_PREF; /* prefetch on */
3018 if (flags & PCIIO_DMA_CMD) { /* standard command channel */
3019 attributes |= PCI64_ATTR_BAR; /* barrier bit on */
3020 attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
3022 /* Generic detail flags
3024 if (flags & PCIIO_PREFETCH)
3025 attributes |= PCI64_ATTR_PREF;
3026 if (flags & PCIIO_NOPREFETCH)
3027 attributes &= ~PCI64_ATTR_PREF;
3029 /* the swap bit is in the address attributes for xbridge */
3030 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3031 if (flags & PCIIO_BYTE_STREAM)
3032 attributes |= PCI64_ATTR_SWAP;
3033 if (flags & PCIIO_WORD_VALUES)
3034 attributes &= ~PCI64_ATTR_SWAP;
3037 /* Provider-specific flags
3039 if (flags & PCIBR_BARRIER)
3040 attributes |= PCI64_ATTR_BAR;
3041 if (flags & PCIBR_NOBARRIER)
3042 attributes &= ~PCI64_ATTR_BAR;
3044 if (flags & PCIBR_PREFETCH)
3045 attributes |= PCI64_ATTR_PREF;
3046 if (flags & PCIBR_NOPREFETCH)
3047 attributes &= ~PCI64_ATTR_PREF;
3049 if (flags & PCIBR_PRECISE)
3050 attributes |= PCI64_ATTR_PREC;
3051 if (flags & PCIBR_NOPRECISE)
3052 attributes &= ~PCI64_ATTR_PREC;
3054 if (flags & PCIBR_VCHAN1)
3055 attributes |= PCI64_ATTR_VIRTUAL;
3056 if (flags & PCIBR_VCHAN0)
3057 attributes &= ~PCI64_ATTR_VIRTUAL;
3059 /* PIC in PCI-X mode only supports barrier & swap */
3060 if (IS_PCIX(pcibr_soft)) {
3061 attributes &= (PCI64_ATTR_BAR | PCI64_ATTR_SWAP);
3064 return (attributes);
3069 pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
3070 device_desc_t dev_desc,
3071 size_t req_size_max,
3074 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3075 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3076 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3078 xwidgetnum_t xio_port;
3080 xtalk_dmamap_t xtalk_dmamap;
3081 pcibr_dmamap_t pcibr_dmamap;
3086 /* merge in forced flags */
3087 flags |= pcibr_soft->bs_dma_flags;
3090 * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
3091 * can be called within an interrupt thread.
3093 pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
3098 xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
3099 flags & DMAMAP_FLAGS);
3100 if (!xtalk_dmamap) {
3101 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3102 "pcibr_dmamap_alloc: xtalk_dmamap_alloc failed\n"));
3103 free_pciio_dmamap(pcibr_dmamap);
3106 xio_port = pcibr_soft->bs_mxid;
3107 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3109 pcibr_dmamap->bd_dev = pconn_vhdl;
3110 pcibr_dmamap->bd_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot);
3111 pcibr_dmamap->bd_soft = pcibr_soft;
3112 pcibr_dmamap->bd_xtalk = xtalk_dmamap;
3113 pcibr_dmamap->bd_max_size = req_size_max;
3114 pcibr_dmamap->bd_xio_port = xio_port;
3116 if (flags & PCIIO_DMA_A64) {
3117 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
3122 /* Device is capable of A64 operations,
3123 * and the attributes of the DMA are
3124 * consistent with any previous DMA
3125 * mappings using shared resources.
3128 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
3130 pcibr_dmamap->bd_flags = flags;
3131 pcibr_dmamap->bd_xio_addr = 0;
3132 pcibr_dmamap->bd_pci_addr = pci_addr;
3134 /* If in PCI mode, make sure we have an RRB (or two).
3136 if (IS_PCI(pcibr_soft) &&
3137 !(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
3138 if (flags & PCIBR_VCHAN1)
3140 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
3141 if (have_rrbs < 2) {
3142 if (pci_addr & PCI64_ATTR_PREF)
3146 if (have_rrbs < min_rrbs)
3147 do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
3148 min_rrbs - have_rrbs);
3151 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3152 "pcibr_dmamap_alloc: using direct64, map=0x%x\n",
3154 return pcibr_dmamap;
3156 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3157 "pcibr_dmamap_alloc: unable to use direct64\n"));
3159 /* PIC only supports 64-bit direct mapping in PCI-X mode. */
3160 if (IS_PCIX(pcibr_soft)) {
3165 flags &= ~PCIIO_DMA_A64;
3167 if (flags & PCIIO_FIXED) {
3168 /* warning: mappings may fail later,
3169 * if direct32 can't get to the address.
3171 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
3172 /* User desires DIRECT A32 operations,
3173 * and the attributes of the DMA are
3174 * consistent with any previous DMA
3175 * mappings using shared resources.
3176 * Mapping calls may fail if target
3177 * is outside the direct32 range.
3179 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3180 "pcibr_dmamap_alloc: using direct32, map=0x%x\n",
3182 pcibr_dmamap->bd_flags = flags;
3183 pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
3184 pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
3185 return pcibr_dmamap;
3187 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3188 "pcibr_dmamap_alloc: unable to use direct32\n"));
3190 /* If the user demands FIXED and we can't
3191 * give it to him, fail.
3193 xtalk_dmamap_free(xtalk_dmamap);
3194 free_pciio_dmamap(pcibr_dmamap);
3198 * Allocate Address Translation Entries from the mapping RAM.
3199 * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
3200 * the maximum number of ATEs is based on the worst-case
3201 * scenario, where the requested target is in the
3202 * last byte of an ATE; thus, mapping IOPGSIZE+2
3203 * does end up requiring three ATEs.
3205 if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
3206 ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
3207 +req_size_max /* max mapping bytes */
3208 - 1) + 1; /* round UP */
3209 } else { /* assume requested target is page aligned */
3210 ate_count = IOPG(req_size_max /* max mapping bytes */
3211 - 1) + 1; /* round UP */
3214 ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
3216 if (ate_index != -1) {
3217 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
3218 bridge_ate_t ate_proto;
3222 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3223 "pcibr_dmamap_alloc: using PMU, ate_index=%d, "
3224 "pcibr_dmamap=0x%x\n", ate_index, pcibr_dmamap));
3226 ate_proto = pcibr_flags_to_ate(flags);
3228 pcibr_dmamap->bd_flags = flags;
3229 pcibr_dmamap->bd_pci_addr =
3230 PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
3232 * for xbridge the byte-swap bit == bit 29 of PCI address
3234 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3235 if (flags & PCIIO_BYTE_STREAM)
3236 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
3238 * If swap was set in bss_device in pcibr_endian_set()
3239 * we need to change the address bit.
3241 if (pcibr_soft->bs_slot[slot].bss_device &
3242 BRIDGE_DEV_SWAP_PMU)
3243 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
3244 if (flags & PCIIO_WORD_VALUES)
3245 ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
3247 pcibr_dmamap->bd_xio_addr = 0;
3248 pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
3249 pcibr_dmamap->bd_ate_index = ate_index;
3250 pcibr_dmamap->bd_ate_count = ate_count;
3251 pcibr_dmamap->bd_ate_proto = ate_proto;
3253 /* Make sure we have an RRB (or two).
3255 if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
3256 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
3257 if (have_rrbs < 2) {
3258 if (ate_proto & ATE_PREF)
3262 if (have_rrbs < min_rrbs)
3263 do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
3264 min_rrbs - have_rrbs);
3267 if (ate_index >= pcibr_soft->bs_int_ate_size &&
3268 !IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3269 bridge_t *bridge = pcibr_soft->bs_base;
3270 volatile unsigned *cmd_regp;
3274 pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
3276 s = pcibr_lock(pcibr_soft);
3277 cmd_regp = pcibr_slot_config_addr(bridge, slot,
3279 if ( IS_PIC_SOFT(pcibr_soft) ) {
3280 cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3283 if (io_get_sh_swapper(NASID_GET(bridge))) {
3284 BRIDGE_REG_SET32((&cmd_reg)) = __swab32(*cmd_regp);
3286 cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3289 pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
3290 pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
3291 pcibr_unlock(pcibr_soft, s);
3293 return pcibr_dmamap;
3295 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3296 "pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",
3299 pcibr_ate_free(pcibr_soft, ate_index, ate_count);
3301 /* total failure: sorry, you just can't
3302 * get from here to there that way.
3304 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3305 "pcibr_dmamap_alloc: complete failure.\n"));
3306 xtalk_dmamap_free(xtalk_dmamap);
3307 free_pciio_dmamap(pcibr_dmamap);
3313 pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
3315 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
3316 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3317 pcibr_dmamap->bd_slot);
3319 unsigned flags = pcibr_dmamap->bd_flags;
3321 /* Make sure that bss_ext_ates_active
3322 * is properly kept up to date.
3325 if (PCIBR_DMAMAP_BUSY & flags)
3326 if (PCIBR_DMAMAP_SSRAM & flags)
3327 atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
3329 xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
3331 if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
3332 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
3334 if (pcibr_dmamap->bd_ate_count) {
3335 pcibr_ate_free(pcibr_dmamap->bd_soft,
3336 pcibr_dmamap->bd_ate_index,
3337 pcibr_dmamap->bd_ate_count);
3338 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
3341 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3342 "pcibr_dmamap_free: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3344 free_pciio_dmamap(pcibr_dmamap);
3348 * pcibr_addr_xio_to_pci: given a PIO range, hand
3349 * back the corresponding base PCI MEM address;
3350 * this is used to short-circuit DMA requests that
3351 * loop back onto this PCI bus.
3354 pcibr_addr_xio_to_pci(pcibr_soft_t soft,
3358 iopaddr_t xio_lim = xio_addr + req_size - 1;
3362 if (IS_PIC_BUSNUM_SOFT(soft, 0)) {
3363 if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&
3364 (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
3365 pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;
3368 if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&
3369 (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
3370 pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;
3373 } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {
3374 if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&
3375 (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
3376 pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;
3379 if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&
3380 (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
3381 pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
3385 if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
3386 (xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
3387 pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
3390 if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
3391 (xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
3392 pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
3396 for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
3397 if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
3398 (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
3401 dev = soft->bs_slot[slot].bss_device;
3402 pci_addr = dev & BRIDGE_DEV_OFF_MASK;
3403 pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
3404 pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);
3405 return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
3412 pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
3416 pcibr_soft_t pcibr_soft;
3418 xwidgetnum_t xio_port;
3422 ASSERT(pcibr_dmamap != NULL);
3423 ASSERT(req_size > 0);
3424 ASSERT(req_size <= pcibr_dmamap->bd_max_size);
3426 pcibr_soft = pcibr_dmamap->bd_soft;
3428 flags = pcibr_dmamap->bd_flags;
3430 xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
3431 if (XIO_PACKED(xio_addr)) {
3432 xio_port = XIO_PORT(xio_addr);
3433 xio_addr = XIO_ADDR(xio_addr);
3435 xio_port = pcibr_dmamap->bd_xio_port;
3437 /* If this DMA is to an address that
3438 * refers back to this Bridge chip,
3439 * reduce it back to the correct
3442 if (xio_port == pcibr_soft->bs_xid) {
3443 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3444 } else if (flags & PCIIO_DMA_A64) {
3446 * always use 64-bit direct mapping,
3447 * which always works.
3448 * Device(x) was set up during
3449 * dmamap allocation.
3452 /* attributes are already bundled up into bd_pci_addr.
3454 pci_addr = pcibr_dmamap->bd_pci_addr
3455 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
3458 /* Bridge Hardware WAR #482836:
3459 * If the transfer is not cache aligned
3460 * and the Bridge Rev is <= B, force
3461 * prefetch to be off.
3463 if (flags & PCIBR_NOPREFETCH)
3464 pci_addr &= ~PCI64_ATTR_PREF;
3466 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
3467 pcibr_dmamap->bd_dev,
3468 "pcibr_dmamap_addr: (direct64): wanted paddr [0x%x..0x%x] "
3469 "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3470 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3472 } else if (flags & PCIIO_FIXED) {
3474 * always use 32-bit direct mapping,
3476 * Device(x) was set up during
3477 * dmamap allocation.
3480 if (xio_port != pcibr_soft->bs_dir_xport)
3481 pci_addr = 0; /* wrong DIDN */
3482 else if (xio_addr < pcibr_dmamap->bd_xio_addr)
3483 pci_addr = 0; /* out of range */
3484 else if ((xio_addr + req_size) >
3485 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
3486 pci_addr = 0; /* out of range */
3488 pci_addr = pcibr_dmamap->bd_pci_addr +
3489 xio_addr - pcibr_dmamap->bd_xio_addr;
3491 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
3492 pcibr_dmamap->bd_dev,
3493 "pcibr_dmamap_addr (direct32): wanted paddr [0x%x..0x%x] "
3494 "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3495 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3498 bridge_t *bridge = pcibr_soft->bs_base;
3499 iopaddr_t offset = IOPGOFF(xio_addr);
3500 bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
3501 int ate_count = IOPG(offset + req_size - 1) + 1;
3503 int ate_index = pcibr_dmamap->bd_ate_index;
3504 unsigned cmd_regs[8];
3507 #if PCIBR_FREEZE_TIME
3508 int ate_total = ate_count;
3509 unsigned freeze_time;
3511 bridge_ate_p ate_ptr = pcibr_dmamap->bd_ate_ptr;
3514 /* Bridge Hardware WAR #482836:
3515 * If the transfer is not cache aligned
3516 * and the Bridge Rev is <= B, force
3517 * prefetch to be off.
3519 if (flags & PCIBR_NOPREFETCH)
3520 ate_proto &= ~ATE_PREF;
3523 | (xio_port << ATE_TIDSHIFT)
3524 | (xio_addr - offset);
3526 pci_addr = pcibr_dmamap->bd_pci_addr + offset;
3528 /* Fill in our mapping registers
3529 * with the appropriate xtalk data,
3530 * and hand back the PCI address.
3533 ASSERT(ate_count > 0);
3534 if (ate_count <= pcibr_dmamap->bd_ate_count) {
3538 if ( IS_PIC_SOFT(pcibr_soft) ) {
3539 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
3542 if (io_get_sh_swapper(NASID_GET(bridge))) {
3543 BRIDGE_REG_GET32((&bridge->b_wid_tflush));
3545 bridge->b_wid_tflush;
3548 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3549 "pcibr_dmamap_addr (PMU) : wanted paddr "
3550 "[0x%x..0x%x] returning PCI 0x%x\n",
3551 paddr, paddr + req_size - 1, pci_addr));
3554 /* The number of ATE's required is greater than the number
3555 * allocated for this map. One way this can happen is if
3556 * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
3557 * flag, and then when that map is used (right now), the
3558 * target address tells us we really did need to roundup.
3559 * The other possibility is that the map is just plain too
3560 * small to handle the requested target area.
3562 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3563 "pcibr_dmamap_addr (PMU) : wanted paddr "
3564 "[0x%x..0x%x] ate_count 0x%x bd_ate_count 0x%x "
3565 "ATE's required > number allocated\n",
3566 paddr, paddr + req_size - 1,
3567 ate_count, pcibr_dmamap->bd_ate_count));
3577 pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
3580 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
3581 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3584 * We could go through and invalidate ATEs here;
3585 * for performance reasons, we don't.
3586 * We also don't enforce the strict alternation
3587 * between _addr/_list and _done, but Hub does.
3590 if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
3591 pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
3593 if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
3594 atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
3596 xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
3598 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3599 "pcibr_dmamap_done: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3604 * For each bridge, the DIR_OFF value in the Direct Mapping Register
3605 * determines the PCI to Crosstalk memory mapping to be used for all
3606 * 32-bit Direct Mapping memory accesses. This mapping can be to any
3607 * node in the system. This function will return that compact node id.
3612 pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
3615 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3616 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3618 return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
3623 pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
3624 device_desc_t dev_desc,
3629 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3630 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3631 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3632 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3633 pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
3635 xwidgetnum_t xio_port;
3643 /* merge in forced flags */
3644 flags |= pcibr_soft->bs_dma_flags;
3646 xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
3647 flags & DMAMAP_FLAGS);
3649 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3650 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3651 "xtalk_dmatrans_addr failed with 0x%x\n",
3652 paddr, paddr + req_size - 1, xio_addr));
3656 * find which XIO port this goes to.
3658 if (XIO_PACKED(xio_addr)) {
3659 if (xio_addr == XIO_NOWHERE) {
3660 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3661 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3662 "xtalk_dmatrans_addr failed with XIO_NOWHERE\n",
3663 paddr, paddr + req_size - 1));
3666 xio_port = XIO_PORT(xio_addr);
3667 xio_addr = XIO_ADDR(xio_addr);
3670 xio_port = pcibr_soft->bs_mxid;
3673 * If this DMA comes back to us,
3674 * return the PCI MEM address on
3675 * which it would land, or NULL
3676 * if the target is something
3677 * on bridge other than PCI MEM.
3679 if (xio_port == pcibr_soft->bs_xid) {
3680 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3681 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3682 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3683 "xio_port=0x%x, pci_addr=0x%x\n",
3684 paddr, paddr + req_size - 1, xio_port, pci_addr));
3687 /* If the caller can use A64, try to
3688 * satisfy the request with the 64-bit
3689 * direct map. This can fail if the
3690 * configuration bits in Device(x)
3691 * conflict with our flags.
3694 if (flags & PCIIO_DMA_A64) {
3695 pci_addr = slotp->bss_d64_base;
3696 if (!(flags & PCIBR_VCHAN1))
3697 flags |= PCIBR_VCHAN0;
3698 if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
3699 (flags == slotp->bss_d64_flags)) {
3701 pci_addr |= xio_addr
3702 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3705 if (xio_addr != 0x20000000)
3707 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3708 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3709 "xio_port=0x%x, direct64: pci_addr=0x%x\n",
3710 paddr, paddr + req_size - 1, xio_addr, pci_addr));
3713 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
3714 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
3715 slotp->bss_d64_flags = flags;
3716 slotp->bss_d64_base = pci_addr;
3717 pci_addr |= xio_addr
3718 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3720 /* If in PCI mode, make sure we have an RRB (or two).
3722 if (IS_PCI(pcibr_soft) &&
3723 !(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
3724 if (flags & PCIBR_VCHAN1)
3726 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
3727 if (have_rrbs < 2) {
3728 if (pci_addr & PCI64_ATTR_PREF)
3732 if (have_rrbs < min_rrbs)
3733 do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, vchan,
3734 min_rrbs - have_rrbs);
3738 if (xio_addr != 0x20000000)
3740 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3741 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3742 "xio_port=0x%x, direct64: pci_addr=0x%x, "
3743 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
3744 xio_addr, pci_addr, (uint64_t) flags));
3748 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3749 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3750 "xio_port=0x%x, Unable to set direct64 Device(x) bits\n",
3751 paddr, paddr + req_size - 1, xio_addr));
3753 /* PIC only supports 64-bit direct mapping in PCI-X mode */
3754 if (IS_PCIX(pcibr_soft)) {
3758 /* our flags conflict with Device(x). try direct32*/
3759 flags = flags & ~(PCIIO_DMA_A64 | PCIBR_VCHAN0);
3761 /* Try to satisfy the request with the 32-bit direct
3762 * map. This can fail if the configuration bits in
3763 * Device(x) conflict with our flags, or if the
3764 * target address is outside where DIR_OFF points.
3767 size_t map_size = 1ULL << 31;
3768 iopaddr_t xio_base = pcibr_soft->bs_dir_xbase;
3769 iopaddr_t offset = xio_addr - xio_base;
3770 iopaddr_t endoff = req_size + offset;
3772 if ((req_size > map_size) ||
3773 (xio_addr < xio_base) ||
3774 (xio_port != pcibr_soft->bs_dir_xport) ||
3775 (endoff > map_size)) {
3777 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3778 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3779 "xio_port=0x%x, xio region outside direct32 target\n",
3780 paddr, paddr + req_size - 1, xio_addr));
3782 pci_addr = slotp->bss_d32_base;
3783 if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
3784 (flags == slotp->bss_d32_flags)) {
3788 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3789 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3790 "xio_port=0x%x, direct32: pci_addr=0x%x\n",
3791 paddr, paddr + req_size - 1, xio_addr, pci_addr));
3795 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
3797 pci_addr = PCI32_DIRECT_BASE;
3798 slotp->bss_d32_flags = flags;
3799 slotp->bss_d32_base = pci_addr;
3802 /* Make sure we have an RRB (or two).
3804 if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
3805 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
3806 if (have_rrbs < 2) {
3807 if (slotp->bss_device & BRIDGE_DEV_PREF)
3811 if (have_rrbs < min_rrbs)
3812 do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot,
3813 vchan, min_rrbs - have_rrbs);
3817 if (xio_addr != 0x20000000)
3819 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3820 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3821 "xio_port=0x%x, direct32: pci_addr=0x%x, "
3822 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
3823 xio_addr, pci_addr, (uint64_t) flags));
3827 /* our flags conflict with Device(x).
3829 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3830 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3831 "xio_port=0x%x, Unable to set direct32 Device(x) bits\n",
3832 paddr, paddr + req_size - 1, xio_port));
3836 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3837 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3838 "xio_port=0x%x, No acceptable PCI address found\n",
3839 paddr, paddr + req_size - 1, xio_port));
3845 pcibr_dmamap_drain(pcibr_dmamap_t map)
3847 xtalk_dmamap_drain(map->bd_xtalk);
3851 pcibr_dmaaddr_drain(vertex_hdl_t pconn_vhdl,
3855 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3856 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3857 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3859 xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
3863 pcibr_dmalist_drain(vertex_hdl_t pconn_vhdl,
3866 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3867 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3868 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3870 xtalk_dmalist_drain(xconn_vhdl, list);
3874 * Get the starting PCIbus address out of the given DMA map.
3875 * This function is supposed to be used by a close friend of PCI bridge
3876 * since it relies on the fact that the starting address of the map is fixed at
3877 * the allocation time in the current implementation of PCI bridge.
3880 pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
3882 return (pcibr_dmamap->bd_pci_addr);
3885 /* =====================================================================
3886 * CONFIGURATION MANAGEMENT
3890 pcibr_provider_startup(vertex_hdl_t pcibr)
3896 pcibr_provider_shutdown(vertex_hdl_t pcibr)
3901 pcibr_reset(vertex_hdl_t conn)
3904 pciio_info_t pciio_info = pciio_info_get(conn);
3905 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3906 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3907 bridge_t *bridge = pcibr_soft->bs_base;
3912 pcibr_info_h pcibr_infoh;
3913 pcibr_info_t pcibr_info;
3916 #endif /* PIC_LATER */
3920 if (pcibr_soft->bs_slot[pciio_slot].has_host) {
3921 pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
3922 pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
3925 if ((pciio_slot >= pcibr_soft->bs_first_slot) &&
3926 (pciio_slot <= pcibr_soft->bs_last_reset)) {
3927 s = pcibr_lock(pcibr_soft);
3928 nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
3929 pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
3930 for (f = 0; f < nf; ++f)
3932 cfgctl[f] = pcibr_func_config_get(bridge, pciio_slot, f,
3935 error = iobrick_pci_slot_rst(pcibr_soft->bs_l1sc,
3936 pcibr_widget_to_bus(pcibr_soft->bs_vhdl),
3937 PCIBR_DEVICE_TO_SLOT(pcibr_soft,pciio_slot),
3940 ctlreg = bridge->b_wid_control;
3941 bridge->b_wid_control = ctlreg & ~BRIDGE_CTRL_RST_PIN(pciio_slot);
3943 bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST_PIN(pciio_slot);
3946 for (f = 0; f < nf; ++f)
3947 if ((pcibr_info = pcibr_infoh[f]))
3948 for (win = 0; win < 6; ++win)
3949 if (pcibr_info->f_window[win].w_base != 0)
3950 pcibr_func_config_set(bridge, pciio_slot, f,
3951 PCI_CFG_BASE_ADDR(win) / 4,
3952 pcibr_info->f_window[win].w_base);
3953 for (f = 0; f < nf; ++f)
3955 pcibr_func_config_set(bridge, pciio_slot, f,
3956 PCI_CFG_COMMAND / 4,
3958 pcibr_unlock(pcibr_soft, s);
3965 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, conn,
3966 "pcibr_reset unimplemented for slot %d\n", conn, pciio_slot));
3967 #endif /* PIC_LATER */
3972 pcibr_endian_set(vertex_hdl_t pconn_vhdl,
3973 pciio_endian_t device_end,
3974 pciio_endian_t desired_end)
3976 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3977 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3978 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3983 * Bridge supports hardware swapping; so we can always
3984 * arrange for the caller's desired endianness.
3987 s = pcibr_lock(pcibr_soft);
3988 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
3989 if (device_end != desired_end)
3990 devreg |= BRIDGE_DEV_SWAP_BITS;
3992 devreg &= ~BRIDGE_DEV_SWAP_BITS;
3994 /* NOTE- if we ever put SWAP bits
3995 * onto the disabled list, we will
3996 * have to change the logic here.
3998 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
3999 bridge_t *bridge = pcibr_soft->bs_base;
4001 if ( IS_PIC_SOFT(pcibr_soft) ) {
4002 bridge->b_device[pciio_slot].reg = devreg;
4003 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4004 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4007 if (io_get_sh_swapper(NASID_GET(bridge))) {
4008 BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
4009 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4010 BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
4012 bridge->b_device[pciio_slot].reg = devreg;
4013 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4014 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4018 pcibr_unlock(pcibr_soft, s);
4021 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
4022 "pcibr_endian_set: Device(%d): %x\n",
4023 pciio_slot, devreg, device_bits));
4025 printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
4030 /* This (re)sets the GBR and REALTIME bits and also keeps track of how
4031 * many sets are outstanding. Reset succeeds only if the number of outstanding
4035 pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
4036 pciio_slot_t pciio_slot,
4037 pciio_priority_t device_prio)
4041 bridgereg_t rtbits = 0;
4043 int rc = PRIO_SUCCESS;
4045 /* in dual-slot configurations, the host and the
4046 * guest have separate DMA resources, so they
4047 * have separate requirements for priority bits.
4050 counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
4053 * Bridge supports PCI notions of LOW and HIGH priority
4054 * arbitration rings via a "REAL_TIME" bit in the per-device
4055 * Bridge register. The "GBR" bit controls access to the GBR
4056 * ring on the xbow. These two bits are (re)set together.
4058 * XXX- Bug in Rev B Bridge Si:
4059 * Symptom: Prefetcher starts operating incorrectly. This happens
4060 * due to corruption of the address storage ram in the prefetcher
4061 * when a non-real time PCI request is pulled and a real-time one is
4062 * put in it's place. Workaround: Use only a single arbitration ring
4063 * on PCI bus. GBR and RR can still be uniquely used per
4064 * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
4067 if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
4068 rtbits |= BRIDGE_DEV_RT;
4070 /* NOTE- if we ever put DEV_RT or DEV_GBR on
4071 * the disabled list, we will have to take
4072 * it into account here.
4075 s = pcibr_lock(pcibr_soft);
4076 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
4077 if (device_prio == PCI_PRIO_HIGH) {
4078 if ((++*counter == 1)) {
4084 } else if (device_prio == PCI_PRIO_LOW) {
4087 else if (--*counter == 0)
4091 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
4092 bridge_t *bridge = pcibr_soft->bs_base;
4094 if ( IS_PIC_SOFT(pcibr_soft) ) {
4095 bridge->b_device[pciio_slot].reg = devreg;
4096 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4097 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4100 if (io_get_sh_swapper(NASID_GET(bridge))) {
4101 BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
4102 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4103 BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
4105 bridge->b_device[pciio_slot].reg = devreg;
4106 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4107 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4111 pcibr_unlock(pcibr_soft, s);
4117 pcibr_priority_set(vertex_hdl_t pconn_vhdl,
4118 pciio_priority_t device_prio)
4120 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4121 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4122 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4124 (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
4130 * Interfaces to allow special (e.g. SGI) drivers to set/clear
4131 * Bridge-specific device flags. Many flags are modified through
4132 * PCI-generic interfaces; we don't allow them to be directly
4133 * manipulated here. Only flags that at this point seem pretty
4134 * Bridge-specific can be set through these special interfaces.
4135 * We may add more flags as the need arises, or remove flags and
4136 * create PCI-generic interfaces as the need arises.
4138 * Returns 0 on failure, 1 on success
4141 pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
4142 pcibr_device_flags_t flags)
4144 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4145 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4146 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4147 bridgereg_t set = 0;
4148 bridgereg_t clr = 0;
4150 ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
4152 if (flags & PCIBR_WRITE_GATHER)
4153 set |= BRIDGE_DEV_PMU_WRGA_EN;
4154 if (flags & PCIBR_NOWRITE_GATHER)
4155 clr |= BRIDGE_DEV_PMU_WRGA_EN;
4157 if (flags & PCIBR_WRITE_GATHER)
4158 set |= BRIDGE_DEV_DIR_WRGA_EN;
4159 if (flags & PCIBR_NOWRITE_GATHER)
4160 clr |= BRIDGE_DEV_DIR_WRGA_EN;
4162 if (flags & PCIBR_PREFETCH)
4163 set |= BRIDGE_DEV_PREF;
4164 if (flags & PCIBR_NOPREFETCH)
4165 clr |= BRIDGE_DEV_PREF;
4167 if (flags & PCIBR_PRECISE)
4168 set |= BRIDGE_DEV_PRECISE;
4169 if (flags & PCIBR_NOPRECISE)
4170 clr |= BRIDGE_DEV_PRECISE;
4172 if (flags & PCIBR_BARRIER)
4173 set |= BRIDGE_DEV_BARRIER;
4174 if (flags & PCIBR_NOBARRIER)
4175 clr |= BRIDGE_DEV_BARRIER;
4177 if (flags & PCIBR_64BIT)
4178 set |= BRIDGE_DEV_DEV_SIZE;
4179 if (flags & PCIBR_NO64BIT)
4180 clr |= BRIDGE_DEV_DEV_SIZE;
4186 s = pcibr_lock(pcibr_soft);
4187 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
4188 devreg = (devreg & ~clr) | set;
4189 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
4190 bridge_t *bridge = pcibr_soft->bs_base;
4192 if ( IS_PIC_SOFT(pcibr_soft) ) {
4193 bridge->b_device[pciio_slot].reg = devreg;
4194 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4195 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4198 if (io_get_sh_swapper(NASID_GET(bridge))) {
4199 BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
4200 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4201 BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
4203 bridge->b_device[pciio_slot].reg = devreg;
4204 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4205 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4209 pcibr_unlock(pcibr_soft, s);
4211 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
4212 "pcibr_device_flags_set: Device(%d): %x\n",
4213 pciio_slot, devreg, device_bits));
4215 printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
4222 * PIC has 16 RBARs per bus; meaning it can have a total of 16 outstanding
4223 * split transactions. If the functions on the bus have requested a total
4224 * of 16 or less, then we can give them what they requested (ie. 100%).
4225 * Otherwise we have make sure each function can get at least one buffer
4226 * and then divide the rest of the buffers up among the functions as ``A
4227 * PERCENTAGE OF WHAT THEY REQUESTED'' (i.e. 0% - 100% of a function's
4228 * pcix_type0_status.max_out_split). This percentage does not include the
4229 * one RBAR that all functions get by default.
4232 pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
4234 /* 'percent_allowed' is the percentage of requested RBARs that functions
4235 * are allowed, ***less the 1 RBAR that all functions get by default***
4237 int percent_allowed;
4239 if (pcibr_soft->bs_pcix_num_funcs) {
4240 if (pcibr_soft->bs_pcix_num_funcs > NUM_RBAR) {
4242 "%lx: Must oversubscribe Read Buffer Attribute Registers"
4243 "(RBAR). Bus has %d RBARs but %d funcs need them.\n",
4244 (unsigned long)pcibr_soft->bs_vhdl, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
4245 percent_allowed = 0;
4247 percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
4248 pcibr_soft->bs_pcix_split_tot);
4250 /* +1 to percentage to solve rounding errors that occur because
4251 * we're not doing fractional math. (ie. ((3 * 66%) / 100) = 1)
4252 * but should be "2" if doing true fractional math. NOTE: Since
4253 * the greatest number of outstanding transactions a function
4254 * can request is 32, this "+1" will always work (i.e. we won't
4255 * accidentally oversubscribe the RBARs because of this rounding
4256 * of the percentage).
4258 percent_allowed=(percent_allowed > 100) ? 100 : percent_allowed+1;
4264 return(percent_allowed);
4267 pciio_provider_t pcibr_provider =
4269 (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
4270 (pciio_piomap_free_f *) pcibr_piomap_free,
4271 (pciio_piomap_addr_f *) pcibr_piomap_addr,
4272 (pciio_piomap_done_f *) pcibr_piomap_done,
4273 (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
4274 (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
4275 (pciio_piospace_free_f *) pcibr_piospace_free,
4277 (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
4278 (pciio_dmamap_free_f *) pcibr_dmamap_free,
4279 (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
4280 (pciio_dmamap_done_f *) pcibr_dmamap_done,
4281 (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
4282 (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
4283 (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
4284 (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
4286 (pciio_intr_alloc_f *) pcibr_intr_alloc,
4287 (pciio_intr_free_f *) pcibr_intr_free,
4288 (pciio_intr_connect_f *) pcibr_intr_connect,
4289 (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
4290 (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
4292 (pciio_provider_startup_f *) pcibr_provider_startup,
4293 (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
4294 (pciio_reset_f *) pcibr_reset,
4295 (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
4296 (pciio_endian_set_f *) pcibr_endian_set,
4297 (pciio_priority_set_f *) pcibr_priority_set,
4298 (pciio_config_get_f *) pcibr_config_get,
4299 (pciio_config_set_f *) pcibr_config_set,
4300 (pciio_error_devenable_f *) 0,
4301 (pciio_error_extract_f *) 0,
4302 (pciio_driver_reg_callback_f *) 0,
4303 (pciio_driver_unreg_callback_f *) 0,
4304 (pciio_device_unregister_f *) pcibr_device_unregister,
4305 (pciio_dma_enabled_f *) pcibr_dma_enabled,
4309 pcibr_dma_enabled(vertex_hdl_t pconn_vhdl)
4311 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4312 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4315 return xtalk_dma_enabled(pcibr_soft->bs_conn);
4320 * pcibr_debug() is used to print pcibr debug messages to the console. A
4321 * user enables tracing by setting the following global variables:
4323 * pcibr_debug_mask -Bitmask of what to trace. see pcibr_private.h
4324 * pcibr_debug_module -Module to trace. 'all' means trace all modules
4325 * pcibr_debug_widget -Widget to trace. '-1' means trace all widgets
4326 * pcibr_debug_slot -Slot to trace. '-1' means trace all slots
4328 * 'type' is the type of debugging that the current PCIBR_DEBUG macro is
4329 * tracing. 'vhdl' (which can be NULL) is the vhdl associated with the
4330 * debug statement. If there is a 'vhdl' associated with this debug
4331 * statement, it is parsed to obtain the module, widget, and slot. If the
4332 * globals above match the PCIBR_DEBUG params, then the debug info in the
4333 * parameter 'format' is sent to the console.
4336 pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
4338 char hwpath[MAXDEVNAME] = "\0";
4339 char copy_of_hwpath[MAXDEVNAME];
4340 char *module = "all";
4345 if (pcibr_debug_mask & type) {
4347 if (!hwgraph_vertex_name_get(vhdl, hwpath, MAXDEVNAME)) {
4350 if (strcmp(module, pcibr_debug_module)) {
4352 (void)strcpy(copy_of_hwpath, hwpath);
4353 cp = strstr(copy_of_hwpath, "/module/");
4355 cp += strlen("/module");
4356 module = strsep(&cp, "/");
4359 if (pcibr_debug_widget != -1) {
4360 cp = strstr(hwpath, "/xtalk/");
4362 cp += strlen("/xtalk/");
4366 if (pcibr_debug_slot != -1) {
4367 cp = strstr(hwpath, "/pci/");
4369 cp += strlen("/pci/");
4375 if ((vhdl == NULL) ||
4376 (!strcmp(module, pcibr_debug_module) &&
4377 (widget == pcibr_debug_widget) &&
4378 (slot == pcibr_debug_slot))) {
4380 printk("PCIBR_DEBUG<%d>\t: %s :", cpuid(), hwpath);
4382 printk("PCIBR_DEBUG\t: %s :", hwpath);
4385 * Kernel printk translates to this 3 line sequence.
4386 * Since we have a variable length argument list, we
4387 * need to call printk this way rather than directly
4389 va_start(ap, format);
4397 isIO9(nasid_t nasid) {
4398 lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
4401 if (brd->brd_flags & LOCAL_MASTER_IO6) {
4404 brd = KLCF_NEXT(brd);
4406 /* if it's dual ported, check the peer also */
4407 nasid = NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer;
4408 if (nasid < 0) return 0;
4409 brd = (lboard_t *)KL_CONFIG_INFO(nasid);
4411 if (brd->brd_flags & LOCAL_MASTER_IO6) {
4414 brd = KLCF_NEXT(brd);