3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <asm/sn/sgi.h>
14 #include <asm/sn/sn_cpuid.h>
15 #include <asm/sn/addrs.h>
16 #include <asm/sn/arch.h>
17 #include <asm/sn/iograph.h>
18 #include <asm/sn/invent.h>
19 #include <asm/sn/hcl.h>
20 #include <asm/sn/labelcl.h>
21 #include <asm/sn/xtalk/xwidget.h>
22 #include <asm/sn/pci/bridge.h>
23 #include <asm/sn/pci/pciio.h>
24 #include <asm/sn/pci/pcibr.h>
25 #include <asm/sn/pci/pcibr_private.h>
26 #include <asm/sn/pci/pci_defs.h>
27 #include <asm/sn/prio.h>
28 #include <asm/sn/xtalk/xbow.h>
29 #include <asm/sn/ioc3.h>
30 #include <asm/sn/eeprom.h>
31 #include <asm/sn/io.h>
32 #include <asm/sn/sn_private.h>
35 #define rmallocmap atemapalloc
36 #define rmfreemap atemapfree
37 #define rmfree atefree
38 #define rmalloc atealloc
42 * global variables to toggle the different levels of pcibr debugging.
43 * -pcibr_debug_mask is the mask of the different types of debugging
44 * you want to enable. See sys/PCI/pcibr_private.h
45 * -pcibr_debug_module is the module you want to trace. By default
46 * all modules are trace. For IP35 this value has the format of
47 * something like "001c10". For IP27 this value is a node number,
48 * i.e. "1", "2"... For IP30 this is undefined and should be set to
50 * -pcibr_debug_widget is the widget you want to trace. For IP27
51 * the widget isn't exposed in the hwpath so use the xio slot num.
52 * i.e. for 'io2' set pcibr_debug_widget to "2".
53 * -pcibr_debug_slot is the pci slot you want to trace.
55 uint32_t pcibr_debug_mask = 0x0; /* 0x00000000 to disable */
56 char *pcibr_debug_module = "all"; /* 'all' for all modules */
57 int pcibr_debug_widget = -1; /* '-1' for all widgets */
58 int pcibr_debug_slot = -1; /* '-1' for all slots */
61 * Macros related to the Lucent USS 302/312 usb timeout workaround. It
62 * appears that if the lucent part can get into a retry loop if it sees a
63 * DAC on the bus during a pio read retry. The loop is broken after about
64 * 1ms, so we need to set up bridges holding this part to allow at least
68 #define USS302_TIMEOUT_WAR
70 #ifdef USS302_TIMEOUT_WAR
71 #define LUCENT_USBHC_VENDOR_ID_NUM 0x11c1
72 #define LUCENT_USBHC302_DEVICE_ID_NUM 0x5801
73 #define LUCENT_USBHC312_DEVICE_ID_NUM 0x5802
74 #define USS302_BRIDGE_TIMEOUT_HLD 4
77 int pcibr_devflag = D_MP;
80 * This is the file operation table for the pcibr driver.
81 * As each of the functions are implemented, put the
82 * appropriate function name below.
84 struct file_operations pcibr_fops = {
103 /* kbrick widgetnum-to-bus layout */
104 int p_busnum[MAX_PORT_NUM] = { /* widget# */
105 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
108 0, 0, /* 0xa - 0xb */
116 pcibr_list_p pcibr_list = 0;
119 extern int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
120 extern int hub_device_flags_set(devfs_handle_t widget_dev, hub_widget_flags_t flags);
121 extern long atoi(register char *p);
122 extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t vhdl);
123 extern char *dev_to_name(devfs_handle_t dev, char *buf, uint buflen);
124 extern struct map *atemapalloc(uint64_t);
125 extern void atefree(struct map *, size_t, uint64_t);
126 extern void atemapfree(struct map *);
127 extern pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
128 extern void free_pciio_dmamap(pcibr_dmamap_t);
129 extern void xwidget_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
131 #define ATE_WRITE() ate_write(pcibr_soft, ate_ptr, ate_count, ate)
132 #if PCIBR_FREEZE_TIME
133 #define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
135 #define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
136 #endif /* PCIBR_FREEZE_TIME */
138 #if PCIBR_FREEZE_TIME
139 #define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
141 #define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
144 /* =====================================================================
145 * Function Table of Contents
147 * The order of functions in this file has stopped
148 * making much sense. We might want to take a look
149 * at it some time and bring back some sanity, or
150 * perhaps bust this file into smaller chunks.
153 extern int do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
154 extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
156 extern int pcibr_wrb_flush(devfs_handle_t);
157 extern int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
158 extern void pcibr_rrb_flush(devfs_handle_t);
160 static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
161 void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
163 extern void pcibr_setwidint(xtalk_intr_t);
164 extern void pcibr_clearwidint(bridge_t *);
166 extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
167 pciio_space_t, int, int, int);
169 void pcibr_init(void);
170 int pcibr_attach(devfs_handle_t);
171 int pcibr_attach2(devfs_handle_t, bridge_t *, devfs_handle_t,
172 int, pcibr_soft_t *);
173 int pcibr_detach(devfs_handle_t);
174 int pcibr_open(devfs_handle_t *, int, int, cred_t *);
175 int pcibr_close(devfs_handle_t, int, int, cred_t *);
176 int pcibr_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
177 int pcibr_unmap(devfs_handle_t, vhandl_t *);
178 int pcibr_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
179 int pcibr_pcix_rbars_calc(pcibr_soft_t);
180 extern int pcibr_init_ext_ate_ram(bridge_t *);
181 extern int pcibr_ate_alloc(pcibr_soft_t, int);
182 extern void pcibr_ate_free(pcibr_soft_t, int, int);
183 extern int pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl);
185 extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
186 #if PCIBR_FREEZE_TIME
187 unsigned *freeze_time_ptr,
190 extern void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
191 extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
192 #if PCIBR_FREEZE_TIME
195 unsigned freeze_time_start,
200 pcibr_info_t pcibr_info_get(devfs_handle_t);
202 static iopaddr_t pcibr_addr_pci_to_xio(devfs_handle_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
204 pcibr_piomap_t pcibr_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
205 void pcibr_piomap_free(pcibr_piomap_t);
206 caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
207 void pcibr_piomap_done(pcibr_piomap_t);
208 caddr_t pcibr_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
209 iopaddr_t pcibr_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
210 void pcibr_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
212 static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
213 extern bridge_ate_t pcibr_flags_to_ate(unsigned);
215 pcibr_dmamap_t pcibr_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
216 void pcibr_dmamap_free(pcibr_dmamap_t);
217 extern bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
218 static iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
219 iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
220 alenlist_t pcibr_dmamap_list(pcibr_dmamap_t, alenlist_t, unsigned);
221 void pcibr_dmamap_done(pcibr_dmamap_t);
222 cnodeid_t pcibr_get_dmatrans_node(devfs_handle_t);
223 iopaddr_t pcibr_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
224 alenlist_t pcibr_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
225 void pcibr_dmamap_drain(pcibr_dmamap_t);
226 void pcibr_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
227 void pcibr_dmalist_drain(devfs_handle_t, alenlist_t);
228 iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
230 extern unsigned pcibr_intr_bits(pciio_info_t info,
231 pciio_intr_line_t lines, int nslots);
232 extern pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
233 extern void pcibr_intr_free(pcibr_intr_t);
234 extern void pcibr_setpciint(xtalk_intr_t);
235 extern int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
236 extern void pcibr_intr_disconnect(pcibr_intr_t);
238 extern devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
239 extern void pcibr_intr_func(intr_arg_t);
241 extern void print_bridge_errcmd(uint32_t, char *);
243 extern void pcibr_error_dump(pcibr_soft_t);
244 extern uint32_t pcibr_errintr_group(uint32_t);
245 extern void pcibr_pioerr_check(pcibr_soft_t);
246 extern void pcibr_error_intr_handler(int, void *, struct pt_regs *);
248 extern int pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
249 extern void pcibr_error_cleanup(pcibr_soft_t, int);
250 extern void pcibr_device_disable(pcibr_soft_t, int);
251 extern int pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
252 extern int pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
253 extern int pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
254 extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
255 extern int pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
256 void pcibr_provider_startup(devfs_handle_t);
257 void pcibr_provider_shutdown(devfs_handle_t);
259 int pcibr_reset(devfs_handle_t);
260 pciio_endian_t pcibr_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
261 int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
262 pciio_priority_t pcibr_priority_set(devfs_handle_t, pciio_priority_t);
263 int pcibr_device_flags_set(devfs_handle_t, pcibr_device_flags_t);
265 extern cfg_p pcibr_config_addr(devfs_handle_t, unsigned);
266 extern uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
267 extern void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
269 extern pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
270 extern void pcibr_hints_fix_rrbs(devfs_handle_t);
271 extern void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
272 extern void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
273 extern void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
274 extern void pcibr_hints_handsoff(devfs_handle_t);
275 extern void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
277 extern int pcibr_slot_reset(devfs_handle_t,pciio_slot_t);
278 extern int pcibr_slot_info_init(devfs_handle_t,pciio_slot_t);
279 extern int pcibr_slot_info_free(devfs_handle_t,pciio_slot_t);
280 extern int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
281 pcibr_slot_info_resp_t);
282 extern void pcibr_slot_func_info_return(pcibr_info_h, int,
283 pcibr_slot_func_info_resp_t);
284 extern int pcibr_slot_addr_space_init(devfs_handle_t,pciio_slot_t);
285 extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
286 extern int pcibr_slot_device_init(devfs_handle_t, pciio_slot_t);
287 extern int pcibr_slot_guest_info_init(devfs_handle_t,pciio_slot_t);
288 extern int pcibr_slot_call_device_attach(devfs_handle_t,
290 extern int pcibr_slot_call_device_detach(devfs_handle_t,
292 extern int pcibr_slot_attach(devfs_handle_t, pciio_slot_t, int,
294 extern int pcibr_slot_detach(devfs_handle_t, pciio_slot_t, int,
296 extern int pcibr_is_slot_sys_critical(devfs_handle_t, pciio_slot_t);
298 extern int pcibr_slot_initial_rrb_alloc(devfs_handle_t, pciio_slot_t);
299 extern int pcibr_initial_rrb(devfs_handle_t, pciio_slot_t, pciio_slot_t);
302 /* =====================================================================
303 * Device(x) register management
306 /* pcibr_try_set_device: attempt to modify Device(x)
307 * for the specified slot on the specified bridge
308 * as requested in flags, limited to the specified
309 * bits. Returns which BRIDGE bits were in conflict,
310 * or ZERO if everything went OK.
312 * Caller MUST hold pcibr_lock when calling this function.
315 pcibr_try_set_device(pcibr_soft_t pcibr_soft,
321 pcibr_soft_slot_t slotp;
334 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
335 if (mask == BRIDGE_DEV_PMU_BITS)
336 xmask = XBRIDGE_DEV_PMU_BITS;
337 if (mask == BRIDGE_DEV_D64_BITS)
338 xmask = XBRIDGE_DEV_D64_BITS;
341 slotp = &pcibr_soft->bs_slot[slot];
343 s = pcibr_lock(pcibr_soft);
345 bridge = pcibr_soft->bs_base;
347 old = slotp->bss_device;
349 /* figure out what the desired
350 * Device(x) bits are based on
351 * the flags specified.
356 /* Currently, we inherit anything that
357 * the new caller has not specified in
358 * one way or another, unless we take
359 * action here to not inherit.
361 * This is needed for the "swap" stuff,
362 * since it could have been set via
363 * pcibr_endian_set -- altho note that
364 * any explicit PCIBR_BYTE_STREAM or
365 * PCIBR_WORD_VALUES will freely override
366 * the effect of that call (and vice
367 * versa, no protection either way).
369 * I want to get rid of pcibr_endian_set
370 * in favor of tracking DMA endianness
371 * using the flags specified when DMA
372 * channels are created.
375 #define BRIDGE_DEV_WRGA_BITS (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
376 #define BRIDGE_DEV_SWAP_BITS (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
378 /* Do not use Barrier, Write Gather,
379 * or Prefetch unless asked.
380 * Leave everything else as it
381 * was from the last time.
384 & ~BRIDGE_DEV_BARRIER
385 & ~BRIDGE_DEV_WRGA_BITS
389 /* Generic macro flags
391 if (flags & PCIIO_DMA_DATA) {
393 & ~BRIDGE_DEV_BARRIER) /* barrier off */
394 | BRIDGE_DEV_PREF; /* prefetch on */
397 if (flags & PCIIO_DMA_CMD) {
399 & ~BRIDGE_DEV_PREF) /* prefetch off */
400 & ~BRIDGE_DEV_WRGA_BITS) /* write gather off */
401 | BRIDGE_DEV_BARRIER; /* barrier on */
403 /* Generic detail flags
405 if (flags & PCIIO_WRITE_GATHER)
406 new |= BRIDGE_DEV_WRGA_BITS;
407 if (flags & PCIIO_NOWRITE_GATHER)
408 new &= ~BRIDGE_DEV_WRGA_BITS;
410 if (flags & PCIIO_PREFETCH)
411 new |= BRIDGE_DEV_PREF;
412 if (flags & PCIIO_NOPREFETCH)
413 new &= ~BRIDGE_DEV_PREF;
415 if (flags & PCIBR_WRITE_GATHER)
416 new |= BRIDGE_DEV_WRGA_BITS;
417 if (flags & PCIBR_NOWRITE_GATHER)
418 new &= ~BRIDGE_DEV_WRGA_BITS;
420 if (flags & PCIIO_BYTE_STREAM)
421 new |= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ?
422 BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
423 if (flags & PCIIO_WORD_VALUES)
424 new &= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ?
425 ~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
427 /* Provider-specific flags
429 if (flags & PCIBR_PREFETCH)
430 new |= BRIDGE_DEV_PREF;
431 if (flags & PCIBR_NOPREFETCH)
432 new &= ~BRIDGE_DEV_PREF;
434 if (flags & PCIBR_PRECISE)
435 new |= BRIDGE_DEV_PRECISE;
436 if (flags & PCIBR_NOPRECISE)
437 new &= ~BRIDGE_DEV_PRECISE;
439 if (flags & PCIBR_BARRIER)
440 new |= BRIDGE_DEV_BARRIER;
441 if (flags & PCIBR_NOBARRIER)
442 new &= ~BRIDGE_DEV_BARRIER;
444 if (flags & PCIBR_64BIT)
445 new |= BRIDGE_DEV_DEV_SIZE;
446 if (flags & PCIBR_NO64BIT)
447 new &= ~BRIDGE_DEV_DEV_SIZE;
450 * PIC BRINGUP WAR (PV# 855271):
451 * Allow setting BRIDGE_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
452 * device. The bit is only intended for 64-bit devices and, on
453 * PIC, can cause problems for 32-bit devices.
455 if (IS_PIC_SOFT(pcibr_soft) && mask == BRIDGE_DEV_D64_BITS &&
456 PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
457 if (flags & PCIBR_VCHAN1) {
458 new |= BRIDGE_DEV_VIRTUAL_EN;
459 xmask |= BRIDGE_DEV_VIRTUAL_EN;
464 chg = old ^ new; /* what are we changing, */
465 chg &= xmask; /* of the interesting bits */
469 badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
470 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
471 badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
472 badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
474 badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
475 badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
477 bad = badpmu | badd32 | badd64;
481 /* some conflicts can be resolved by
482 * forcing the bit on. this may cause
483 * some performance degredation in
484 * the stream(s) that want the bit off,
485 * but the alternative is not allowing
486 * the new stream at all.
488 if ( (fix = bad & (BRIDGE_DEV_PRECISE |
489 BRIDGE_DEV_BARRIER)) ) {
491 /* don't change these bits if
492 * they are already set in "old"
496 /* some conflicts can be resolved by
497 * forcing the bit off. this may cause
498 * some performance degredation in
499 * the stream(s) that want the bit on,
500 * but the alternative is not allowing
501 * the new stream at all.
503 if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
504 BRIDGE_DEV_PREF)) ) {
506 /* don't change these bits if
507 * we wanted to turn them on.
511 /* conflicts in other bits mean
512 * we can not establish this DMA
513 * channel while the other(s) are
517 pcibr_unlock(pcibr_soft, s);
519 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
520 "pcibr_try_set_device: mod blocked by %x\n",
527 if (mask == BRIDGE_DEV_PMU_BITS)
528 slotp->bss_pmu_uctr++;
529 if (mask == BRIDGE_DEV_D32_BITS)
530 slotp->bss_d32_uctr++;
531 if (mask == BRIDGE_DEV_D64_BITS)
532 slotp->bss_d64_uctr++;
534 /* the value we want to write is the
535 * original value, with the bits for
536 * our selected changes flipped, and
537 * with any disabled features turned off.
539 new = old ^ chg; /* only change what we want to change */
541 if (slotp->bss_device == new) {
542 pcibr_unlock(pcibr_soft, s);
545 if ( IS_PIC_SOFT(pcibr_soft) ) {
546 bridge->b_device[slot].reg = new;
547 slotp->bss_device = new;
548 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
551 if (io_get_sh_swapper(NASID_GET(bridge))) {
552 BRIDGE_REG_SET32((&bridge->b_device[slot].reg)) = __swab32(new);
553 slotp->bss_device = new;
554 BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
556 bridge->b_device[slot].reg = new;
557 slotp->bss_device = new;
558 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
561 pcibr_unlock(pcibr_soft, s);
564 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
565 "pcibr_try_set_device: Device(%d): %x\n",
566 slot, new, device_bits));
568 printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
574 pcibr_release_device(pcibr_soft_t pcibr_soft,
578 pcibr_soft_slot_t slotp;
581 slotp = &pcibr_soft->bs_slot[slot];
583 s = pcibr_lock(pcibr_soft);
585 if (mask == BRIDGE_DEV_PMU_BITS)
586 slotp->bss_pmu_uctr--;
587 if (mask == BRIDGE_DEV_D32_BITS)
588 slotp->bss_d32_uctr--;
589 if (mask == BRIDGE_DEV_D64_BITS)
590 slotp->bss_d64_uctr--;
592 pcibr_unlock(pcibr_soft, s);
596 * flush write gather buffer for slot
599 pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
604 volatile uint32_t wrf;
605 s = pcibr_lock(pcibr_soft);
606 bridge = pcibr_soft->bs_base;
608 if ( IS_PIC_SOFT(pcibr_soft) ) {
609 wrf = bridge->b_wr_req_buf[slot].reg;
612 if (io_get_sh_swapper(NASID_GET(bridge))) {
613 wrf = BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
615 wrf = bridge->b_wr_req_buf[slot].reg;
618 pcibr_unlock(pcibr_soft, s);
621 /* =====================================================================
622 * Bridge (pcibr) "Device Driver" entry points
627 * pcibr_init: called once during system startup or
628 * when a loadable driver is loaded.
630 * The driver_register function should normally
631 * be in _reg, not _init. But the pcibr driver is
632 * required by devinit before the _reg routines
633 * are called, so this is an exception.
638 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INIT, NULL, "pcibr_init()\n"));
640 xwidget_driver_register(XBRIDGE_WIDGET_PART_NUM,
641 XBRIDGE_WIDGET_MFGR_NUM,
644 xwidget_driver_register(BRIDGE_WIDGET_PART_NUM,
645 BRIDGE_WIDGET_MFGR_NUM,
651 * open/close mmap/munmap interface would be used by processes
652 * that plan to map the PCI bridge, and muck around with the
653 * registers. This is dangerous to do, and will be allowed
654 * to a select brand of programs. Typically these are
655 * diagnostics programs, or some user level commands we may
656 * write to do some weird things.
657 * To start with expect them to have root priveleges.
658 * We will ask for more later.
662 pcibr_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
669 pcibr_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
676 pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
679 devfs_handle_t vhdl = dev_to_vhdl(dev);
680 devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
681 pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
682 bridge_t *bridge = pcibr_soft->bs_base;
684 hwgraph_vertex_unref(pcibr_vhdl);
687 len = ctob(btoc(len)); /* Make len page aligned */
688 error = v_mapphys(vt, (void *) ((__psunsigned_t) bridge + off), len);
691 * If the offset being mapped corresponds to the flash prom
692 * base, and if the mapping succeeds, and if the user
693 * has requested the protections to be WRITE, enable the
694 * flash prom to be written.
696 * XXX- deprecate this in favor of using the
697 * real flash driver ...
699 if (IS_BRIDGE_SOFT(pcibr_soft) && !error &&
700 ((off == BRIDGE_EXTERNAL_FLASH) ||
701 (len > BRIDGE_EXTERNAL_FLASH))) {
705 * ensure that we write and read without any interruption.
706 * The read following the write is required for the Bridge war
710 if (io_get_sh_swapper(NASID_GET(bridge))) {
711 BRIDGE_REG_SET32((&bridge->b_wid_control)) |= __swab32(BRIDGE_CTRL_FLASH_WR_EN);
712 BRIDGE_REG_GET32((&bridge->b_wid_control)); /* inval addr bug war */
714 bridge->b_wid_control |= BRIDGE_CTRL_FLASH_WR_EN;
715 bridge->b_wid_control; /* inval addr bug war */
724 pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
726 devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t) dev);
727 pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
728 bridge_t *bridge = pcibr_soft->bs_base;
730 hwgraph_vertex_unref(pcibr_vhdl);
732 if ( IS_PIC_SOFT(pcibr_soft) ) {
734 * If flashprom write was enabled, disable it, as
735 * this is the last unmap.
737 if (IS_BRIDGE_SOFT(pcibr_soft) &&
738 (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN)) {
742 * ensure that we write and read without any interruption.
743 * The read following the write is required for the Bridge war
746 bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
747 bridge->b_wid_control; /* inval addr bug war */
752 if (io_get_sh_swapper(NASID_GET(bridge))) {
753 if (BRIDGE_REG_GET32((&bridge->b_wid_control)) & BRIDGE_CTRL_FLASH_WR_EN) {
757 * ensure that we write and read without any interruption.
758 * The read following the write is required for the Bridge war
761 BRIDGE_REG_SET32((&bridge->b_wid_control)) &= __swab32((unsigned int)~BRIDGE_CTRL_FLASH_WR_EN);
762 BRIDGE_REG_GET32((&bridge->b_wid_control)); /* inval addr bug war */
765 if (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN) {
769 * ensure that we write and read without any interruption.
770 * The read following the write is required for the Bridge war
773 bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
774 bridge->b_wid_control; /* inval addr bug war */
783 /* This is special case code used by grio. There are plans to make
784 * this a bit more general in the future, but till then this should
788 pcibr_device_slot_get(devfs_handle_t dev_vhdl)
790 char devname[MAXDEVNAME];
792 pciio_info_t pciio_info;
793 pciio_slot_t slot = PCIIO_SLOT_NONE;
795 vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
797 /* run back along the canonical path
798 * until we find a PCI connection point.
800 tdev = hwgraph_connectpt_get(dev_vhdl);
801 while (tdev != GRAPH_VERTEX_NONE) {
802 pciio_info = pciio_info_chk(tdev);
804 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
807 hwgraph_vertex_unref(tdev);
808 tdev = hwgraph_connectpt_get(tdev);
810 hwgraph_vertex_unref(tdev);
817 pcibr_ioctl(devfs_handle_t dev,
828 pcibr_info_get(devfs_handle_t vhdl)
830 return (pcibr_info_t) pciio_info_get(vhdl);
834 pcibr_device_info_new(
835 pcibr_soft_t pcibr_soft,
837 pciio_function_t rfunc,
838 pciio_vendor_id_t vendor,
839 pciio_device_id_t device)
841 pcibr_info_t pcibr_info;
842 pciio_function_t func;
845 func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
848 * Create a pciio_info_s for this device. pciio_device_info_new()
849 * will set the c_slot (which is suppose to represent the external
850 * slot (i.e the slot number silk screened on the back of the I/O
851 * brick)). So for PIC we need to adjust this "internal slot" num
852 * passed into us, into its external representation. See comment
853 * for the PCIBR_DEVICE_TO_SLOT macro for more information.
856 pciio_device_info_new(&pcibr_info->f_c, pcibr_soft->bs_vhdl,
857 PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
858 rfunc, vendor, device);
859 pcibr_info->f_dev = slot;
861 /* Set PCI bus number */
862 pcibr_info->f_bus = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
864 if (slot != PCIIO_SLOT_NONE) {
867 * Currently favored mapping from PCI
868 * slot number and INTA/B/C/D to Bridge
869 * PCI Interrupt Bit Number:
881 * XXX- allow pcibr_hints to override default
882 * XXX- allow ADMIN to override pcibr_hints
884 for (ibit = 0; ibit < 4; ++ibit)
885 pcibr_info->f_ibit[ibit] =
886 (slot + 4 * ibit) & 7;
889 * Record the info in the sparse func info space.
891 if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
892 pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
899 * pcibr_device_unregister
900 * This frees up any hardware resources reserved for this PCI device
901 * and removes any PCI infrastructural information setup for it.
902 * This is usually used at the time of shutting down of the PCI card.
905 pcibr_device_unregister(devfs_handle_t pconn_vhdl)
907 pciio_info_t pciio_info;
908 devfs_handle_t pcibr_vhdl;
910 pcibr_soft_t pcibr_soft;
912 int count_vchan0, count_vchan1;
917 pciio_info = pciio_info_get(pconn_vhdl);
919 pcibr_vhdl = pciio_info_master_get(pciio_info);
920 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
922 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
923 bridge = pcibr_soft->bs_base;
925 /* Clear all the hardware xtalk resources for this device */
926 xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
928 /* Flush all the rrbs */
929 pcibr_rrb_flush(pconn_vhdl);
932 * If the RRB configuration for this slot has changed, set it
933 * back to the boot-time default
935 if (pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] >= 0) {
937 s = pcibr_lock(pcibr_soft);
939 /* PIC NOTE: If this is a BRIDGE, VCHAN2 & VCHAN3 will be zero so
940 * no need to conditionalize this (ie. "if (IS_PIC_SOFT())" ).
942 pcibr_soft->bs_rrb_res[slot] = pcibr_soft->bs_rrb_res[slot] +
943 pcibr_soft->bs_rrb_valid[slot][VCHAN0] +
944 pcibr_soft->bs_rrb_valid[slot][VCHAN1] +
945 pcibr_soft->bs_rrb_valid[slot][VCHAN2] +
946 pcibr_soft->bs_rrb_valid[slot][VCHAN3];
948 /* Free the rrbs allocated to this slot, both the normal & virtual */
949 do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
951 count_vchan0 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0];
952 count_vchan1 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN1];
954 pcibr_unlock(pcibr_soft, s);
956 pcibr_rrb_alloc(pconn_vhdl, &count_vchan0, &count_vchan1);
960 /* Flush the write buffers !! */
961 error_call = pcibr_wrb_flush(pconn_vhdl);
966 /* Clear the information specific to the slot */
967 error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
977 * pcibr_driver_reg_callback
978 * CDL will call this function for each device found in the PCI
979 * registry that matches the vendor/device IDs supported by
980 * the driver being registered. The device's connection vertex
981 * and the driver's attach function return status enable the
982 * slot's device status to be set.
985 pcibr_driver_reg_callback(devfs_handle_t pconn_vhdl,
986 int key1, int key2, int error)
988 pciio_info_t pciio_info;
989 pcibr_info_t pcibr_info;
990 devfs_handle_t pcibr_vhdl;
992 pcibr_soft_t pcibr_soft;
994 /* Do not set slot status for vendor/device ID wildcard drivers */
995 if ((key1 == -1) || (key2 == -1))
998 pciio_info = pciio_info_get(pconn_vhdl);
999 pcibr_info = pcibr_info_get(pconn_vhdl);
1001 pcibr_vhdl = pciio_info_master_get(pciio_info);
1002 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1004 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
1007 /* This may be a loadable driver so lock out any pciconfig actions */
1008 mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
1011 pcibr_info->f_att_det_error = error;
1013 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
1016 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
1018 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
1022 /* Release the bus lock */
1023 mrunlock(pcibr_soft->bs_bus_lock);
1028 * pcibr_driver_unreg_callback
1029 * CDL will call this function for each device found in the PCI
1030 * registry that matches the vendor/device IDs supported by
1031 * the driver being unregistered. The device's connection vertex
1032 * and the driver's detach function return status enable the
1033 * slot's device status to be set.
1036 pcibr_driver_unreg_callback(devfs_handle_t pconn_vhdl,
1037 int key1, int key2, int error)
1039 pciio_info_t pciio_info;
1040 pcibr_info_t pcibr_info;
1041 devfs_handle_t pcibr_vhdl;
1043 pcibr_soft_t pcibr_soft;
1045 /* Do not set slot status for vendor/device ID wildcard drivers */
1046 if ((key1 == -1) || (key2 == -1))
1049 pciio_info = pciio_info_get(pconn_vhdl);
1050 pcibr_info = pcibr_info_get(pconn_vhdl);
1052 pcibr_vhdl = pciio_info_master_get(pciio_info);
1053 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1055 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
1058 /* This may be a loadable driver so lock out any pciconfig actions */
1059 mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
1062 pcibr_info->f_att_det_error = error;
1064 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
1067 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
1069 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
1073 /* Release the bus lock */
1074 mrunlock(pcibr_soft->bs_bus_lock);
1079 * build a convenience link path in the
1080 * form of ".../<iobrick>/bus/<busnum>"
1082 * returns 1 on success, 0 otherwise
1084 * depends on hwgraph separator == '/'
1087 pcibr_bus_cnvlink(devfs_handle_t f_c)
1089 char dst[MAXDEVNAME];
1094 devfs_handle_t nvtx, svtx;
1097 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
1099 if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
1102 /* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
1104 /* find the widget number */
1105 xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
1108 widgetnum = atoi(xp+7);
1109 if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
1112 /* remove "/pci/direct" from path */
1113 cp = strstr(dst, "/" EDGE_LBL_PCI "/" EDGE_LBL_DIRECT);
1118 /* get the vertex for the widget */
1119 if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))
1122 *xp = (char)NULL; /* remove "/xtalk/..." from path */
1124 /* dst example now == /hw/module/001c02/Pbrick */
1126 /* get the bus number */
1128 strcat(dst, EDGE_LBL_BUS);
1129 sprintf(pcibus, "%d", p_busnum[widgetnum]);
1131 /* link to bus to widget */
1132 rv = hwgraph_path_add(NULL, dp, &nvtx);
1133 if (GRAPH_SUCCESS == rv)
1134 rv = hwgraph_edge_add(nvtx, svtx, pcibus);
1136 return (rv == GRAPH_SUCCESS);
1141 * pcibr_attach: called every time the crosstalk
1142 * infrastructure is asked to initialize a widget
1143 * that matches the part number we handed to the
1144 * registration routine above.
1148 pcibr_attach(devfs_handle_t xconn_vhdl)
1152 devfs_handle_t pcibr_vhdl;
1155 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
1157 bridge = (bridge_t *)
1158 xtalk_piotrans_addr(xconn_vhdl, NULL,
1159 0, sizeof(bridge_t), 0);
1161 * Create the vertex for the PCI bus, which we
1162 * will also use to hold the pcibr_soft and
1163 * which will be the "master" vertex for all the
1164 * pciio connection points we will hang off it.
1165 * This needs to happen before we call nic_bridge_vertex_info
1166 * as we are some of the *_vmc functions need access to the edges.
1168 * Opening this vertex will provide access to
1169 * the Bridge registers themselves.
1171 rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
1172 ASSERT(rc == GRAPH_SUCCESS);
1174 pciio_provider_register(pcibr_vhdl, &pcibr_provider);
1175 pciio_provider_startup(pcibr_vhdl);
1177 return pcibr_attach2(xconn_vhdl, bridge, pcibr_vhdl, 0, NULL);
1183 pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
1184 devfs_handle_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
1187 devfs_handle_t ctlr_vhdl;
1190 pcibr_soft_t pcibr_soft;
1191 pcibr_info_t pcibr_info;
1192 xwidget_info_t info;
1193 xtalk_intr_t xtalk_intr;
1196 devfs_handle_t noslot_conn;
1197 char devnm[MAXDEVNAME], *s;
1198 pcibr_hints_t pcibr_hints;
1199 uint64_t int_enable;
1200 bridgereg_t int_enable_32;
1201 picreg_t int_enable_64;
1202 unsigned rrb_fixed = 0;
1207 int fast_back_to_back_enable;
1210 int iobrick_type_get_nasid(nasid_t nasid);
1211 int iobrick_module_get_nasid(nasid_t nasid);
1212 extern unsigned char Is_pic_on_this_nasid[512];
1215 async_attach_t aa = NULL;
1217 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1218 "pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
1220 aa = async_attach_get_info(xconn_vhdl);
1223 ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER,
1224 0, DEVFS_FL_AUTO_DEVNUM,
1226 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
1229 ASSERT(ctlr_vhdl != NULL);
1232 * Get the hint structure; if some NIC callback
1233 * marked this vertex as "hands-off" then we
1234 * just return here, before doing anything else.
1236 pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
1238 if (pcibr_hints && pcibr_hints->ph_hands_off)
1239 return -1; /* generic operations disabled */
1241 id = bridge->b_wid_id;
1242 rev = XWIDGET_PART_REV_NUM(id);
1244 hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
1247 * allocate soft state structure, fill in some
1248 * fields, and hook it up to our vertex.
1252 *ret_softp = pcibr_soft;
1253 BZERO(pcibr_soft, sizeof *pcibr_soft);
1254 pcibr_soft_set(pcibr_vhdl, pcibr_soft);
1255 pcibr_soft->bs_conn = xconn_vhdl;
1256 pcibr_soft->bs_vhdl = pcibr_vhdl;
1257 pcibr_soft->bs_base = bridge;
1258 pcibr_soft->bs_rev_num = rev;
1259 pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
1261 pcibr_soft->bs_min_slot = 0; /* lowest possible slot# */
1262 pcibr_soft->bs_max_slot = 7; /* highest possible slot# */
1263 pcibr_soft->bs_busnum = busnum;
1264 if (is_xbridge(bridge)) {
1265 pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_XBRIDGE;
1266 } else if (is_pic(bridge)) {
1267 pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
1269 pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_BRIDGE;
1271 switch(pcibr_soft->bs_bridge_type) {
1272 case PCIBR_BRIDGETYPE_BRIDGE:
1273 pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
1274 pcibr_soft->bs_bridge_mode = 0; /* speed is not available in bridge */
1276 case PCIBR_BRIDGETYPE_PIC:
1277 pcibr_soft->bs_min_slot = 0;
1278 pcibr_soft->bs_max_slot = 3;
1279 pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1280 pcibr_soft->bs_bridge_mode =
1281 (((bridge->p_wid_stat_64 & PIC_STAT_PCIX_SPEED) >> 33) |
1282 ((bridge->p_wid_stat_64 & PIC_STAT_PCIX_ACTIVE) >> 33));
1284 /* We have to clear PIC's write request buffer to avoid parity
1285 * errors. See PV#854845.
1290 for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
1291 bridge->p_wr_req_lower[i] = 0;
1292 bridge->p_wr_req_upper[i] = 0;
1293 bridge->p_wr_req_parity[i] = 0;
1298 case PCIBR_BRIDGETYPE_XBRIDGE:
1299 pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1300 pcibr_soft->bs_bridge_mode =
1301 ((bridge->b_wid_control & BRIDGE_CTRL_PCI_SPEED) >> 3);
1305 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1306 "pcibr_attach2: pcibr_soft=0x%x, mode=0x%x\n",
1307 pcibr_soft, pcibr_soft->bs_bridge_mode));
1308 pcibr_soft->bsi_err_intr = 0;
1310 /* Bridges up through REV C
1311 * are unable to set the direct
1312 * byteswappers to BYTE_STREAM.
1314 if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
1315 pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
1316 pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
1320 * link all the pcibr_soft structs
1326 self->bl_soft = pcibr_soft;
1327 self->bl_vhdl = pcibr_vhdl;
1328 self->bl_next = pcibr_list;
1331 #endif /* PCIBR_SOFT_LIST */
1334 * get the name of this bridge vertex and keep the info. Use this
1335 * only where it is really needed now: like error interrupts.
1337 s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
1338 pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
1339 strcpy(pcibr_soft->bs_name, s);
1341 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1342 "pcibr_attach2: %s ASIC: rev %s (code=0x%x)\n",
1343 IS_XBRIDGE_SOFT(pcibr_soft) ? "XBridge" :
1344 IS_PIC_SOFT(pcibr_soft) ? "PIC" : "Bridge",
1345 (rev == BRIDGE_PART_REV_A) ? "A" :
1346 (rev == BRIDGE_PART_REV_B) ? "B" :
1347 (rev == BRIDGE_PART_REV_C) ? "C" :
1348 (rev == BRIDGE_PART_REV_D) ? "D" :
1349 (rev == XBRIDGE_PART_REV_A) ? "A" :
1350 (rev == XBRIDGE_PART_REV_B) ? "B" :
1351 (IS_PIC_PART_REV_A(rev)) ? "A" :
1352 "unknown", rev, pcibr_soft->bs_name));
1354 info = xwidget_info_get(xconn_vhdl);
1355 pcibr_soft->bs_xid = xwidget_info_id_get(info);
1356 pcibr_soft->bs_master = xwidget_info_master_get(info);
1357 pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
1359 pcibr_soft->bs_first_slot = pcibr_soft->bs_min_slot;
1360 pcibr_soft->bs_last_slot = pcibr_soft->bs_max_slot;
1362 * Bridge can only reset slots 0, 1, 2, and 3. Ibrick internal
1363 * slots 4, 5, 6, and 7 must be reset as a group, so do not
1366 pcibr_soft->bs_last_reset = 3;
1368 nasid = NASID_GET(bridge);
1370 /* set whether it is a PIC or not */
1371 Is_pic_on_this_nasid[nasid] = (IS_PIC_SOFT(pcibr_soft)) ? 1 : 0;
1374 if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
1375 printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
1376 (unsigned int)pcibr_soft->bs_bricktype);
1378 pcibr_soft->bs_moduleid = iobrick_module_get_nasid(nasid);
1380 if (pcibr_soft->bs_bricktype > 0) {
1381 switch (pcibr_soft->bs_bricktype) {
1382 case MODULE_PXBRICK:
1383 pcibr_soft->bs_first_slot = 0;
1384 pcibr_soft->bs_last_slot = 1;
1385 pcibr_soft->bs_last_reset = 1;
1387 case MODULE_PEBRICK:
1389 pcibr_soft->bs_first_slot = 1;
1390 pcibr_soft->bs_last_slot = 2;
1391 pcibr_soft->bs_last_reset = 2;
1396 * Here's the current baseio layout for SN1 style systems:
1398 * 0 1 2 3 4 5 6 7 slot#
1400 * x scsi x x ioc3 usb x x O300 Ibrick
1402 * x == never occupied
1403 * E == external (add-in) slot
1406 pcibr_soft->bs_first_slot = 1; /* Ibrick first slot == 1 */
1407 if (pcibr_soft->bs_xid == 0xe) {
1408 pcibr_soft->bs_last_slot = 2;
1409 pcibr_soft->bs_last_reset = 2;
1411 pcibr_soft->bs_last_slot = 6;
1418 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1419 "pcibr_attach2: %cbrick, slots %d-%d\n",
1420 MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid),
1421 pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
1425 * Initialize bridge and bus locks
1427 spin_lock_init(&pcibr_soft->bs_lock);
1429 mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
1432 * If we have one, process the hints structure.
1435 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
1436 "pcibr_attach2: pcibr_hints=0x%x\n", pcibr_hints));
1438 rrb_fixed = pcibr_hints->ph_rrb_fixed;
1440 pcibr_soft->bs_rrb_fixed = rrb_fixed;
1442 if (pcibr_hints->ph_intr_bits) {
1443 pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
1446 for (slot = pcibr_soft->bs_min_slot;
1447 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1448 int hslot = pcibr_hints->ph_host_slot[slot] - 1;
1451 pcibr_soft->bs_slot[slot].host_slot = slot;
1453 pcibr_soft->bs_slot[slot].has_host = 1;
1454 pcibr_soft->bs_slot[slot].host_slot = hslot;
1459 * Set-up initial values for state fields
1461 for (slot = pcibr_soft->bs_min_slot;
1462 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1463 pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
1464 pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
1465 pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
1466 pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
1467 pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
1468 pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
1471 for (ibit = 0; ibit < 8; ++ibit) {
1472 pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
1473 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
1474 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
1475 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat =
1476 &(bridge->b_int_status);
1477 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
1478 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
1479 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
1480 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
1484 * connect up our error handler. PIC has 2 busses (thus resulting in 2
1485 * pcibr_soft structs under 1 widget), so only register a xwidget error
1486 * handler for PIC's bus0. NOTE: for PIC pcibr_error_handler_wrapper()
1487 * is a wrapper routine we register that will call the real error handler
1488 * pcibr_error_handler() with the correct pcibr_soft struct.
1490 if (IS_PIC_SOFT(pcibr_soft)) {
1492 xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft);
1495 xwidget_error_register(xconn_vhdl, pcibr_error_handler, pcibr_soft);
1499 * Initialize various Bridge registers.
1503 * On pre-Rev.D bridges, set the PCI_RETRY_CNT
1504 * to zero to avoid dropping stores. (#475347)
1506 if (rev < BRIDGE_PART_REV_D)
1507 bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
1510 * Clear all pending interrupts.
1512 bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
1514 /* Initialize some PIC specific registers. */
1515 if (IS_PIC_SOFT(pcibr_soft)) {
1516 picreg_t pic_ctrl_reg = bridge->p_wid_control_64;
1518 /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
1519 pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
1520 pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
1521 pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
1522 pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
1524 pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
1525 pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
1527 /* enable parity checking on PICs internal RAM */
1528 pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
1529 pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
1530 /* PIC BRINGUP WAR (PV# 862253): don't enable write request
1533 if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
1534 pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
1537 bridge->p_wid_control_64 = pic_ctrl_reg;
1541 * Until otherwise set up,
1542 * assume all interrupts are
1543 * from slot 7(Bridge/Xbridge) or 3(PIC).
1544 * XXX. Not sure why we're doing this, made change for PIC
1545 * just to avoid setting reserved bits.
1547 if (IS_PIC_SOFT(pcibr_soft))
1548 bridge->b_int_device = (uint32_t) 0x006db6db;
1550 bridge->b_int_device = (uint32_t) 0xffffffff;
1558 int num_entries = 0;
1564 devfs_handle_t node_vhdl;
1565 char vname[MAXDEVNAME];
1568 /* Set the Bridge's 32-bit PCI to XTalk
1569 * Direct Map register to the most useful
1570 * value we can determine. Note that we
1571 * must use a single xid for all of:
1572 * direct-mapped 32-bit DMA accesses
1573 * direct-mapped 64-bit DMA accesses
1574 * DMA accesses through the PMU
1576 * This is the only way to guarantee that
1577 * completion interrupts will reach a CPU
1578 * after all DMA data has reached memory.
1579 * (Of course, there may be a few special
1580 * drivers/controlers that explicitly manage
1581 * this ordering problem.)
1584 cnodeid = 0; /* default node id */
1586 * Determine the base address node id to be used for all 32-bit
1587 * Direct Mapping I/O. The default is node 0, but this can be changed
1588 * via a DEVICE_ADMIN directive and the PCIBUS_DMATRANS_NODE
1589 * attribute in the irix.sm config file. A device driver can obtain
1590 * this node value via a call to pcibr_get_dmatrans_node().
1593 // This probably needs to be addressed - pfg
1594 node_val = device_admin_info_get(pcibr_vhdl, ADMIN_LBL_DMATRANS_NODE);
1595 if (node_val != NULL) {
1596 node_vhdl = hwgraph_path_to_vertex(node_val);
1597 if (node_vhdl != GRAPH_VERTEX_NONE) {
1598 cnodeid = nodevertex_to_cnodeid(node_vhdl);
1600 if ((node_vhdl == GRAPH_VERTEX_NONE) || (cnodeid == CNODEID_NONE)) {
1602 vertex_to_name(pcibr_vhdl, vname, sizeof(vname));
1603 printk(KERN_WARNING "Invalid hwgraph node path specified:\n"
1604 " DEVICE_ADMIN: %s %s=%s\n",
1605 vname, ADMIN_LBL_DMATRANS_NODE, node_val);
1608 #endif /* PIC_LATER */
1609 nasid = COMPACT_TO_NASID_NODEID(cnodeid);
1610 paddr = NODE_OFFSET(nasid) + 0;
1612 /* currently, we just assume that if we ask
1613 * for a DMA mapping to "zero" the XIO
1614 * host will transmute this into a request
1615 * for the lowest hunk of memory.
1617 xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
1620 if (xbase != XIO_NOWHERE) {
1621 if (XIO_PACKED(xbase)) {
1622 xport = XIO_PORT(xbase);
1623 xbase = XIO_ADDR(xbase);
1625 xport = pcibr_soft->bs_mxid;
1627 offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
1628 xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
1630 dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
1633 dirmap |= BRIDGE_DIRMAP_OFF & xbase;
1634 else if (offset >= (512 << 20))
1635 dirmap |= BRIDGE_DIRMAP_ADD512;
1637 bridge->b_dir_map = dirmap;
1640 * Set bridge's idea of page size according to the system's
1641 * idea of "IO page size". TBD: The idea of IO page size
1642 * should really go away.
1645 * ensure that we write and read without any interruption.
1646 * The read following the write is required for the Bridge war
1648 spl_level = splhi();
1649 #if IOPGSIZE == 4096
1650 if (IS_PIC_SOFT(pcibr_soft)) {
1651 bridge->p_wid_control_64 &= ~BRIDGE_CTRL_PAGE_SIZE;
1653 bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
1655 #elif IOPGSIZE == 16384
1656 if (IS_PIC_SOFT(pcibr_soft)) {
1657 bridge->p_wid_control_64 |= BRIDGE_CTRL_PAGE_SIZE;
1659 bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
1662 <<<Unable to deal with IOPGSIZE >>>;
1664 bridge->b_wid_control; /* inval addr bug war */
1667 /* Initialize internal mapping entries */
1668 for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
1669 bridge->b_int_ate_ram[entry].wr = 0;
1673 * Determine if there's external mapping SSRAM on this
1674 * bridge. Set up Bridge control register appropriately,
1675 * inititlize SSRAM, and set software up to manage RAM
1676 * entries as an allocatable resource.
1678 * Currently, we just use the rm* routines to manage ATE
1679 * allocation. We should probably replace this with a
1680 * Best Fit allocator.
1682 * For now, if we have external SSRAM, avoid using
1683 * the internal ssram: we can't turn PREFETCH on
1684 * when we use the internal SSRAM; and besides,
1685 * this also guarantees that no allocation will
1686 * straddle the internal/external line, so we
1687 * can increment ATE write addresses rather than
1688 * recomparing against BRIDGE_INTERNAL_ATES every
1692 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1695 num_entries = pcibr_init_ext_ate_ram(bridge);
1697 /* we always have 128 ATEs (512 for Xbridge) inside the chip
1698 * even if disabled for debugging.
1700 pcibr_soft->bs_int_ate_map = rmallocmap(pcibr_soft->bs_int_ate_size);
1701 pcibr_ate_free(pcibr_soft, 0, pcibr_soft->bs_int_ate_size);
1703 if (num_entries > pcibr_soft->bs_int_ate_size) {
1704 #if PCIBR_ATE_NOTBOTH /* for debug -- forces us to use external ates */
1705 printk("pcibr_attach: disabling internal ATEs.\n");
1706 pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
1708 pcibr_soft->bs_ext_ate_map = rmallocmap(num_entries);
1709 pcibr_ate_free(pcibr_soft, pcibr_soft->bs_int_ate_size,
1710 num_entries - pcibr_soft->bs_int_ate_size);
1712 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_vhdl,
1713 "pcibr_attach2: %d ATEs, %d internal & %d external\n",
1714 num_entries ? num_entries : pcibr_soft->bs_int_ate_size,
1715 pcibr_soft->bs_int_ate_size,
1716 num_entries ? num_entries-pcibr_soft->bs_int_ate_size : 0));
1724 * now figure the *real* xtalk base address
1725 * that dirmap sends us to.
1727 dirmap = bridge->b_dir_map;
1728 if (dirmap & BRIDGE_DIRMAP_OFF)
1729 xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
1730 << BRIDGE_DIRMAP_OFF_ADDRSHFT;
1731 else if (dirmap & BRIDGE_DIRMAP_ADD512)
1736 pcibr_soft->bs_dir_xbase = xbase;
1738 /* it is entirely possible that we may, at this
1739 * point, have our dirmap pointing somewhere
1740 * other than our "master" port.
1742 pcibr_soft->bs_dir_xport =
1743 (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
1746 /* pcibr sources an error interrupt;
1747 * figure out where to send it.
1749 * If any interrupts are enabled in bridge,
1750 * then the prom set us up and our interrupt
1751 * has already been reconnected in mlreset
1754 * Need to set the D_INTR_ISERR flag
1755 * in the dev_desc used for allocating the
1756 * error interrupt, so our interrupt will
1757 * be properly routed and prioritized.
1759 * If our crosstalk provider wants to
1760 * fix widget error interrupts to specific
1761 * destinations, D_INTR_ISERR is how it
1765 xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
1766 ASSERT(xtalk_intr != NULL);
1768 pcibr_soft->bsi_err_intr = xtalk_intr;
1771 * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
1772 * in order to work around some addressing limitations. In order
1773 * for that fire wall to work properly, we need to make sure we
1774 * start from a known clean state.
1776 pcibr_clearwidint(bridge);
1778 xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
1779 (intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
1781 #ifdef BUS_INT_WAR_NOT_YET
1782 request_irq(CPU_VECTOR_TO_IRQ(((hub_intr_t)xtalk_intr)->i_cpuid,
1783 ((hub_intr_t)xtalk_intr)->i_bit),
1784 (intr_func_t)pcibr_error_intr_handler, 0, "PCIBR error",
1785 (intr_arg_t) pcibr_soft);
1788 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
1789 "pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
1790 bridge->b_wid_int_upper, bridge->b_wid_int_lower));
1793 * now we can start handling error interrupts;
1794 * enable all of them.
1795 * NOTE: some PCI ints may already be enabled.
1797 /* We read the INT_ENABLE register as a 64bit picreg_t for PIC and a
1798 * 32bit bridgereg_t for BRIDGE, but always process the result as a
1799 * 64bit value so the code can be "common" for both PIC and BRIDGE...
1801 if (IS_PIC_SOFT(pcibr_soft)) {
1802 int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
1803 int_enable = (uint64_t)int_enable_64;
1805 int_enable_32 = bridge->b_int_enable | (BRIDGE_ISR_ERRORS & 0xffffffff);
1806 int_enable = ((uint64_t)int_enable_32 & 0xffffffff);
1808 #ifdef BUS_INT_WAR_NOT_YET
1810 extern void sn_add_polled_interrupt(int irq, int interval);
1812 sn_add_polled_interrupt(CPU_VECTOR_TO_IRQ(((hub_intr_t)xtalk_intr)->i_cpuid,
1813 ((hub_intr_t)xtalk_intr)->i_bit), 20000);
1818 #if BRIDGE_ERROR_INTR_WAR
1819 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1821 * We commonly get master timeouts when talking to ql.
1822 * We also see RESP_XTALK_ERROR and LLP_TX_RETRY interrupts.
1823 * Insure that these are all disabled for now.
1825 int_enable &= ~(BRIDGE_IMR_PCI_MST_TIMEOUT |
1826 BRIDGE_ISR_RESP_XTLK_ERR |
1827 BRIDGE_ISR_LLP_TX_RETRY);
1829 if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) {
1830 int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT;
1832 #endif /* BRIDGE_ERROR_INTR_WAR */
1834 #ifdef QL_SCSI_CTRL_WAR /* for IP30 only */
1835 /* Really a QL rev A issue, but all newer hearts have newer QLs.
1836 * Forces all IO6/MSCSI to be new.
1838 if (heart_rev() == HEART_REV_A)
1839 int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;
1842 #ifdef BRIDGE1_TIMEOUT_WAR
1843 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1845 * Turn off these interrupts. They can't be trusted in bridge 1
1847 int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT |
1848 BRIDGE_IMR_UNEXP_RESP);
1852 #ifdef BRIDGE_B_DATACORR_WAR
1854 /* WAR panic for Rev B silent data corruption.
1855 * PIOERR turned off here because there is a problem
1856 * with not re-arming it in pcibr_error_intr_handler.
1857 * We don't get LLP error interrupts if we don't
1858 * re-arm PIOERR interrupts! Just disable them here
1861 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_B) {
1862 int_enable |= BRIDGE_IMR_LLP_REC_CBERR;
1863 int_enable &= ~BRIDGE_ISR_PCIBUS_PIOERR;
1865 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1866 "Turning on LLP_REC_CBERR for Rev B Bridge.\n"));
1870 /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
1871 * locked out to be freed up sooner (by timing out) so that the
1872 * read tnums are never completely used up.
1874 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
1875 int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
1876 int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT;
1878 bridge->b_wid_req_timeout = 0x750;
1882 * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
1883 * RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3
1884 * so they are not used
1886 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
1887 bridge->b_even_resp |= 0x000f000f;
1888 bridge->b_odd_resp |= 0x000f000f;
1891 if (IS_PIC_SOFT(pcibr_soft)) {
1892 bridge->p_int_enable_64 = (picreg_t)int_enable;
1894 bridge->b_int_enable = (bridgereg_t)int_enable;
1896 bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */
1898 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
1901 * Depending on the rev of bridge, disable certain features.
1902 * Easiest way seems to be to force the PCIBR_NOwhatever
1903 * flag to be on for all DMA calls, which overrides any
1904 * PCIBR_whatever flag or even the setting of whatever
1905 * from the PCIIO_DMA_class flags (or even from the other
1906 * PCIBR flags, since NO overrides YES).
1908 pcibr_soft->bs_dma_flags = 0;
1911 * Always completely disabled for REV.A;
1912 * at "pcibr_prefetch_enable_rev", anyone
1913 * asking for PCIIO_PREFETCH gets it.
1914 * Between these two points, you have to ask
1915 * for PCIBR_PREFETCH, which promises that
1916 * your driver knows about known Bridge WARs.
1918 if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
1919 pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
1920 else if (pcibr_soft->bs_rev_num <
1921 (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_prefetch_enable_rev))
1922 pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
1925 * Disabled up to but not including the
1926 * rev number in pcibr_wg_enable_rev. There
1927 * is no "WAR range" as with prefetch.
1929 if (pcibr_soft->bs_rev_num <
1930 (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_wg_enable_rev))
1931 pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
1933 /* PIC only supports 64-bit direct mapping in PCI-X mode. Since
1934 * all PCI-X devices that initiate memory transactions must be
1935 * capable of generating 64-bit addressed, we force 64-bit DMAs.
1937 if (IS_PCIX(pcibr_soft)) {
1938 pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
1943 pciio_win_map_t win_map_p;
1944 iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24;
1945 int prom_base_size = 0x1000000;
1946 iopaddr_t prom_base_limit = prom_base_addr + prom_base_size;
1948 /* Allocate resource maps based on bus page size; for I/O and memory
1949 * space, free all pages except those in the base area and in the
1950 * range set by the PROM.
1952 * PROM creates BAR addresses in this format: 0x0ws00000 where w is
1953 * the widget number and s is the device register offset for the slot.
1956 win_map_p = &pcibr_soft->bs_io_win_map;
1957 pciio_device_win_map_new(win_map_p,
1958 PCIBR_BUS_IO_MAX + 1,
1960 pciio_device_win_populate(win_map_p,
1962 prom_base_addr - PCIBR_BUS_IO_BASE);
1963 pciio_device_win_populate(win_map_p,
1965 (PCIBR_BUS_IO_MAX + 1) - prom_base_limit);
1967 win_map_p = &pcibr_soft->bs_swin_map;
1968 pciio_device_win_map_new(win_map_p,
1969 PCIBR_BUS_SWIN_MAX + 1,
1970 PCIBR_BUS_SWIN_PAGE);
1971 pciio_device_win_populate(win_map_p,
1972 PCIBR_BUS_SWIN_BASE,
1973 (PCIBR_BUS_SWIN_MAX + 1) - PCIBR_BUS_SWIN_PAGE);
1975 win_map_p = &pcibr_soft->bs_mem_win_map;
1976 pciio_device_win_map_new(win_map_p,
1977 PCIBR_BUS_MEM_MAX + 1,
1978 PCIBR_BUS_MEM_PAGE);
1979 pciio_device_win_populate(win_map_p,
1981 prom_base_addr - PCIBR_BUS_MEM_BASE);
1982 pciio_device_win_populate(win_map_p,
1984 (PCIBR_BUS_MEM_MAX + 1) - prom_base_limit);
1987 /* build "no-slot" connection point
1989 pcibr_info = pcibr_device_info_new
1990 (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
1991 PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
1992 noslot_conn = pciio_device_info_register
1993 (pcibr_vhdl, &pcibr_info->f_c);
1995 /* Remember the no slot connection point info for tearing it
1996 * down during detach.
1998 pcibr_soft->bs_noslot_conn = noslot_conn;
1999 pcibr_soft->bs_noslot_info = pcibr_info;
2001 fast_back_to_back_enable = 1;
2005 if (fast_back_to_back_enable) {
2007 * All devices on the bus are capable of fast back to back, so
2008 * we need to set the fast back to back bit in all devices on
2009 * the bus that are capable of doing such accesses.
2014 for (slot = pcibr_soft->bs_min_slot;
2015 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
2016 /* Find out what is out there */
2017 (void)pcibr_slot_info_init(pcibr_vhdl,slot);
2019 for (slot = pcibr_soft->bs_min_slot;
2020 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
2021 /* Set up the address space for this slot in the PCI land */
2022 (void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
2024 for (slot = pcibr_soft->bs_min_slot;
2025 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
2026 /* Setup the device register */
2027 (void)pcibr_slot_device_init(pcibr_vhdl, slot);
2029 if (IS_PCIX(pcibr_soft)) {
2030 pcibr_soft->bs_pcix_rbar_inuse = 0;
2031 pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
2032 pcibr_soft->bs_pcix_rbar_percent_allowed =
2033 pcibr_pcix_rbars_calc(pcibr_soft);
2035 for (slot = pcibr_soft->bs_min_slot;
2036 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
2037 /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
2038 (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
2041 /* Set up convenience links */
2042 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
2043 pcibr_bus_cnvlink(pcibr_soft->bs_vhdl);
2045 for (slot = pcibr_soft->bs_min_slot;
2046 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
2047 /* Setup host/guest relations */
2048 (void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
2050 /* Handle initial RRB management for Bridge and Xbridge */
2051 pcibr_initial_rrb(pcibr_vhdl,
2052 pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
2054 { /* Before any drivers get called that may want to re-allocate
2055 * RRB's, let's get some special cases pre-allocated. Drivers
2056 * may override these pre-allocations, but by doing pre-allocations
2057 * now we're assured not to step all over what the driver intended.
2059 * Note: Someday this should probably be moved over to pcibr_rrb.c
2062 * Each Pbrick PCI bus only has slots 1 and 2. Similarly for
2063 * widget 0xe on Ibricks. Allocate RRB's accordingly.
2065 if (pcibr_soft->bs_bricktype > 0) {
2066 switch (pcibr_soft->bs_bricktype) {
2067 case MODULE_PXBRICK:
2069 * If the IO9 is in the PXBrick (bus1, slot1) allocate
2070 * RRBs to all the devices
2072 if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
2073 (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
2074 (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
2075 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
2076 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
2077 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4);
2078 do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4);
2080 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
2081 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2085 case MODULE_PEBRICK:
2087 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2088 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
2091 /* port 0xe on the Ibrick only has slots 1 and 2 */
2092 if (pcibr_soft->bs_xid == 0xe) {
2093 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2094 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
2097 /* allocate one RRB for the serial port */
2098 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
2105 if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
2106 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
2109 } /* OK Special RRB allocations are done. */
2111 for (slot = pcibr_soft->bs_min_slot;
2112 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
2113 /* Call the device attach */
2114 (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
2117 #if (defined(USS302_TIMEOUT_WAR))
2119 * If this bridge holds a Lucent USS-302 or USS-312 pci/usb controller,
2120 * increase the Bridge PCI retry backoff interval. This part seems
2121 * to go away for long periods of time if a DAC appears on the bus during
2122 * a read command that is being retried.
2128 for (slot = pcibr_soft->bs_min_slot;
2129 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
2130 if (pcibr_soft->bs_slot[slot].bss_vendor_id ==
2131 LUCENT_USBHC_VENDOR_ID_NUM &&
2132 (pcibr_soft->bs_slot[slot].bss_device_id ==
2133 LUCENT_USBHC302_DEVICE_ID_NUM ||
2134 pcibr_soft->bs_slot[slot].bss_device_id ==
2135 LUCENT_USBHC312_DEVICE_ID_NUM)) {
2137 "pcibr_attach: %x Bus holds a usb part - setting"
2138 "bridge PCI_RETRY_HLD to %d\n",
2139 pcibr_vhdl, USS302_BRIDGE_TIMEOUT_HLD);
2141 bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_HLD_MASK;
2142 bridge->b_bus_timeout |=
2143 BRIDGE_BUS_PCI_RETRY_HLD(USS302_BRIDGE_TIMEOUT_HLD);
2146 * Have to consider the read response timer in the hub II as well
2149 hubii_ixtt_get(xconn_vhdl, &ixtt);
2152 * bump rrsp_ps to allow at least 1ms for read
2153 * responses from this widget
2156 ixtt.ii_ixtt_fld_s.i_rrsp_ps = 20000;
2157 hubii_ixtt_set(xconn_vhdl, &ixtt);
2160 * print the current setting
2163 hubii_ixtt_get(xconn_vhdl, &ixtt);
2164 printk( "Setting hub ixtt.rrsp_ps field to 0x%x\n",
2165 ixtt.ii_ixtt_fld_s.i_rrsp_ps);
2167 break; /* only need to do it once */
2171 #endif /* (defined(USS302_TIMEOUT_WAR)) */
2173 FIXME("pcibr_attach: Call do_pcibr_rrb_autoalloc nicinfo\n");
2174 #endif /* PIC_LATER */
2177 async_attach_add_info(noslot_conn, aa);
2179 pciio_device_attach(noslot_conn, (int)0);
2182 * Tear down pointer to async attach info -- async threads for
2183 * bridge's descendants may be running but the bridge's work is done.
2186 async_attach_del_info(xconn_vhdl);
2193 * Detach the bridge device from the hwgraph after cleaning out all the
2194 * underlying vertices.
2198 pcibr_detach(devfs_handle_t xconn)
2201 devfs_handle_t pcibr_vhdl;
2202 pcibr_soft_t pcibr_soft;
2206 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
2208 /* Get the bridge vertex from its xtalk connection point */
2209 if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
2212 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
2213 bridge = pcibr_soft->bs_base;
2216 s = pcibr_lock(pcibr_soft);
2217 /* Disable the interrupts from the bridge */
2218 if (IS_PIC_SOFT(pcibr_soft)) {
2219 bridge->p_int_enable_64 = 0;
2221 bridge->b_int_enable = 0;
2223 pcibr_unlock(pcibr_soft, s);
2225 /* Detach all the PCI devices talking to this bridge */
2226 for (slot = pcibr_soft->bs_min_slot;
2227 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
2228 pcibr_slot_detach(pcibr_vhdl, slot, 0, (char *)NULL, (int *)NULL);
2231 /* Unregister the no-slot connection point */
2232 pciio_device_info_unregister(pcibr_vhdl,
2233 &(pcibr_soft->bs_noslot_info->f_c));
2235 spin_lock_destroy(&pcibr_soft->bs_lock);
2236 kfree(pcibr_soft->bs_name);
2238 /* Error handler gets unregistered when the widget info is
2241 /* Free the soft ATE maps */
2242 if (pcibr_soft->bs_int_ate_map)
2243 rmfreemap(pcibr_soft->bs_int_ate_map);
2244 if (pcibr_soft->bs_ext_ate_map)
2245 rmfreemap(pcibr_soft->bs_ext_ate_map);
2247 /* Disconnect the error interrupt and free the xtalk resources
2248 * associated with it.
2250 xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
2251 xtalk_intr_free(pcibr_soft->bsi_err_intr);
2253 /* Clear the software state maintained by the bridge driver for this
2257 /* Remove the Bridge revision labelled info */
2258 (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
2259 /* Remove the character device associated with this bridge */
2260 (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
2261 /* Remove the PCI bridge vertex */
2262 (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
2268 pcibr_asic_rev(devfs_handle_t pconn_vhdl)
2270 devfs_handle_t pcibr_vhdl;
2272 arbitrary_info_t ainfo;
2274 if (GRAPH_SUCCESS !=
2275 hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
2278 tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
2281 * Any hwgraph function that returns a vertex handle will implicity
2282 * increment that vertex's reference count. The caller must explicity
2283 * decrement the vertex's referece count after the last reference to
2286 * Decrement reference count incremented by call to hwgraph_traverse().
2289 hwgraph_vertex_unref(pcibr_vhdl);
2291 if (tmp_vhdl != GRAPH_SUCCESS)
2297 pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
2299 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2300 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2302 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2303 pcibr_device_write_gather_flush(pcibr_soft, slot);
2307 /* =====================================================================
2312 pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
2314 pciio_space_t space,
2319 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2320 pciio_info_t pciio_info = &pcibr_info->f_c;
2321 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2322 bridge_t *bridge = pcibr_soft->bs_base;
2324 unsigned bar; /* which BASE reg on device is decoding */
2325 iopaddr_t xio_addr = XIO_NOWHERE;
2327 pciio_space_t wspace; /* which space device is decoding */
2328 iopaddr_t wbase; /* base of device decode on PCI */
2329 size_t wsize; /* size of device decode on PCI */
2331 int try; /* DevIO(x) window scanning order control */
2332 int maxtry, halftry;
2333 int win; /* which DevIO(x) window is being used */
2334 pciio_space_t mspace; /* target space for devio(x) register */
2335 iopaddr_t mbase; /* base of devio(x) mapped area on PCI */
2336 size_t msize; /* size of devio(x) mapped area on PCI */
2337 size_t mmask; /* addr bits stored in Device(x) */
2342 s = pcibr_lock(pcibr_soft);
2344 if (pcibr_soft->bs_slot[slot].has_host) {
2345 slot = pcibr_soft->bs_slot[slot].host_slot;
2346 pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
2349 * Special case for dual-slot pci devices such as ioc3 on IP27
2350 * baseio. In these cases, pconn_vhdl should never be for a pci
2351 * function on a subordiate PCI bus, so we can safely reset pciio_info
2352 * to be the info struct embedded in pcibr_info. Failure to do this
2353 * results in using a bogus pciio_info_t for calculations done later
2357 pciio_info = &pcibr_info->f_c;
2359 if (space == PCIIO_SPACE_NONE)
2362 if (space == PCIIO_SPACE_CFG) {
2364 * Usually, the first mapping
2365 * established to a PCI device
2366 * is to its config space.
2368 * In any case, we definitely
2369 * do NOT need to worry about
2370 * PCI BASE registers, and
2371 * MUST NOT attempt to point
2372 * the DevIO(x) window at
2375 if (((flags & PCIIO_BYTE_STREAM) == 0) &&
2376 ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
2377 xio_addr = pci_addr + PCIBR_TYPE0_CFG_DEV(pcibr_soft, slot);
2381 if (space == PCIIO_SPACE_ROM) {
2382 /* PIO to the Expansion Rom.
2383 * Driver is responsible for
2384 * enabling and disabling
2387 wbase = pciio_info->c_rbase;
2388 wsize = pciio_info->c_rsize;
2391 * While the driver should know better
2392 * than to attempt to map more space
2393 * than the device is decoding, he might
2394 * do it; better to bail out here.
2396 if ((pci_addr + req_size) > wsize)
2400 space = PCIIO_SPACE_MEM;
2403 * reduce window mappings to raw
2404 * space mappings (maybe allocating
2405 * windows), and try for DevIO(x)
2406 * usage (setting it if it is available).
2408 bar = space - PCIIO_SPACE_WIN0;
2410 wspace = pciio_info->c_window[bar].w_space;
2411 if (wspace == PCIIO_SPACE_NONE)
2414 /* get PCI base and size */
2415 wbase = pciio_info->c_window[bar].w_base;
2416 wsize = pciio_info->c_window[bar].w_size;
2419 * While the driver should know better
2420 * than to attempt to map more space
2421 * than the device is decoding, he might
2422 * do it; better to bail out here.
2424 if ((pci_addr + req_size) > wsize)
2427 /* shift from window relative to
2428 * decoded space relative.
2435 /* Scan all the DevIO(x) windows twice looking for one
2436 * that can satisfy our request. The first time through,
2437 * only look at assigned windows; the second time, also
2438 * look at PCIIO_SPACE_NONE windows. Arrange the order
2439 * so we always look at our own window first.
2441 * We will not attempt to satisfy a single request
2442 * by concatinating multiple windows.
2444 maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
2445 halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
2446 for (try = 0; try < maxtry; ++try) {
2450 /* calculate win based on slot, attempt, and max possible
2452 win = (try + slot) % PCIBR_NUM_SLOTS(pcibr_soft);
2454 /* If this DevIO(x) mapping area can provide
2455 * a mapping to this address, use it.
2457 msize = (win < 2) ? 0x200000 : 0x100000;
2459 if (space != PCIIO_SPACE_IO)
2460 mmask &= 0x3FFFFFFF;
2462 offset = pci_addr & (msize - 1);
2464 /* If this window can't possibly handle that request,
2465 * go on to the next window.
2467 if (((pci_addr & (msize - 1)) + req_size) > msize)
2470 devreg = pcibr_soft->bs_slot[win].bss_device;
2472 /* Is this window "nailed down"?
2473 * If not, maybe we can use it.
2474 * (only check this the second time through)
2476 mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
2477 if ((try > halftry) && (mspace == PCIIO_SPACE_NONE)) {
2479 /* If this is the primary DevIO(x) window
2480 * for some other device, skip it.
2482 if ((win != slot) &&
2483 (PCIIO_VENDOR_ID_NONE !=
2484 pcibr_soft->bs_slot[win].bss_vendor_id))
2487 /* It's a free window, and we fit in it.
2488 * Set up Device(win) to our taste.
2490 mbase = pci_addr & mmask;
2492 /* check that we would really get from
2495 if ((mbase | offset) != pci_addr)
2498 devreg &= ~BRIDGE_DEV_OFF_MASK;
2499 if (space != PCIIO_SPACE_IO)
2500 devreg |= BRIDGE_DEV_DEV_IO_MEM;
2502 devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
2503 devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
2505 /* default is WORD_VALUES.
2506 * if you specify both,
2507 * operation is undefined.
2509 if (flags & PCIIO_BYTE_STREAM)
2510 devreg |= BRIDGE_DEV_DEV_SWAP;
2512 devreg &= ~BRIDGE_DEV_DEV_SWAP;
2514 if (pcibr_soft->bs_slot[win].bss_device != devreg) {
2515 if ( IS_PIC_SOFT(pcibr_soft) ) {
2516 bridge->b_device[win].reg = devreg;
2517 pcibr_soft->bs_slot[win].bss_device = devreg;
2518 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
2521 if (io_get_sh_swapper(NASID_GET(bridge))) {
2522 BRIDGE_REG_SET32((&bridge->b_device[win].reg)) = __swab32(devreg);
2523 pcibr_soft->bs_slot[win].bss_device = devreg;
2524 BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
2526 bridge->b_device[win].reg = devreg;
2527 pcibr_soft->bs_slot[win].bss_device = devreg;
2528 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
2533 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
2534 "pcibr_addr_pci_to_xio: Device(%d): %x\n",
2535 win, devreg, device_bits));
2537 printk("pcibr_addr_pci_to_xio: Device(%d): %x\n", win, devreg);
2540 pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
2541 pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
2542 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2544 /* Increment this DevIO's use count */
2545 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2547 /* Save the DevIO register index used to access this BAR */
2549 pcibr_info->f_window[bar].w_devio_index = win;
2552 * The kernel only allows functions to have so many variable args,
2553 * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printk
2554 * arguments fails so sprintf() it into a temporary string.
2556 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2558 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
2559 "slot %d allocates DevIO(%d) Device(%d) set to %x\n",
2560 space, space_desc, pci_addr, pci_addr + req_size - 1,
2561 slot, win, win, devreg, device_bits);
2563 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
2564 "slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
2565 (unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
2566 (unsigned int)slot, win, win, (unsigned long)devreg);
2568 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2571 } /* endif DevIO(x) not pointed */
2572 mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
2574 /* Now check for request incompat with DevIO(x)
2576 if ((mspace != space) ||
2577 (pci_addr < mbase) ||
2578 ((pci_addr + req_size) > (mbase + msize)) ||
2579 ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
2580 (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
2583 /* DevIO(x) window is pointed at PCI space
2584 * that includes our target. Calculate the
2585 * final XIO address, release the lock and
2588 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2590 /* Increment this DevIO's use count */
2591 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2593 /* Save the DevIO register index used to access this BAR */
2595 pcibr_info->f_window[bar].w_devio_index = win;
2597 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2599 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
2600 "slot %d uses DevIO(%d)\n", space, space_desc, pci_addr,
2601 pci_addr + req_size - 1, slot, win);
2603 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2610 * Accesses to device decode
2611 * areas that do a not fit
2612 * within the DevIO(x) space are
2613 * modified to be accesses via
2614 * the direct mapping areas.
2616 * If necessary, drivers can
2617 * explicitly ask for mappings
2618 * into these address spaces,
2619 * but this should never be needed.
2621 case PCIIO_SPACE_MEM: /* "mem space" */
2622 case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
2623 if ((pci_addr + BRIDGE_PCI_MEM32_BASE + req_size - 1) <=
2624 BRIDGE_PCI_MEM32_LIMIT)
2625 xio_addr = pci_addr + BRIDGE_PCI_MEM32_BASE;
2628 case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
2629 if ((pci_addr + BRIDGE_PCI_MEM64_BASE + req_size - 1) <=
2630 BRIDGE_PCI_MEM64_LIMIT)
2631 xio_addr = pci_addr + BRIDGE_PCI_MEM64_BASE;
2634 case PCIIO_SPACE_IO: /* "i/o space" */
2635 /* Bridge Hardware Bug WAR #482741:
2636 * The 4G area that maps directly from
2637 * XIO space to PCI I/O space is busted
2638 * until Bridge Rev D.
2640 if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
2641 ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
2642 BRIDGE_PCI_IO_LIMIT))
2643 xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
2647 /* Check that "Direct PIO" byteswapping matches,
2648 * try to change it if it does not.
2650 if (xio_addr != XIO_NOWHERE) {
2651 unsigned bst; /* nonzero to set bytestream */
2652 unsigned *bfp; /* addr of record of how swapper is set */
2653 unsigned swb; /* which control bit to mung */
2654 unsigned bfo; /* current swapper setting */
2655 unsigned bfn; /* desired swapper setting */
2657 bfp = ((space == PCIIO_SPACE_IO)
2658 ? (&pcibr_soft->bs_pio_end_io)
2659 : (&pcibr_soft->bs_pio_end_mem));
2663 bst = flags & PCIIO_BYTE_STREAM;
2665 bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
2667 if (bfn == bfo) { /* we already match. */
2669 } else if (bfo != 0) { /* we have a conflict. */
2670 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2672 sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap conflict in %x, "
2673 "was%s%s, want%s%s\n", space, space_desc,
2674 bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2675 bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
2676 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2677 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
2679 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2681 xio_addr = XIO_NOWHERE;
2682 } else { /* OK to make the change. */
2683 swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
2684 if ( IS_PIC_SOFT(pcibr_soft) ) {
2685 picreg_t octl, nctl;
2686 octl = bridge->p_wid_control_64;
2687 nctl = bst ? octl | (uint64_t)swb : octl & ((uint64_t)~swb);
2689 if (octl != nctl) /* make the change if any */
2690 bridge->b_wid_control = nctl;
2693 picreg_t octl, nctl;
2694 if (io_get_sh_swapper(NASID_GET(bridge))) {
2695 octl = BRIDGE_REG_GET32((&bridge->b_wid_control));
2696 nctl = bst ? octl | swb : octl & ~swb;
2698 if (octl != nctl) /* make the change if any */
2699 BRIDGE_REG_SET32((&bridge->b_wid_control)) = __swab32(nctl);
2701 octl = bridge->b_wid_control;
2702 nctl = bst ? octl | swb : octl & ~swb;
2704 if (octl != nctl) /* make the change if any */
2705 bridge->b_wid_control = nctl;
2708 *bfp = bfn; /* record the assignment */
2710 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2712 sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap for %x set "
2713 "to%s%s\n", space, space_desc,
2714 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2715 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
2717 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2722 pcibr_unlock(pcibr_soft, s);
2728 pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
2729 device_desc_t dev_desc,
2730 pciio_space_t space,
2733 size_t req_size_max,
2736 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2737 pciio_info_t pciio_info = &pcibr_info->f_c;
2738 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2739 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2740 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
2742 pcibr_piomap_t *mapptr;
2743 pcibr_piomap_t maplist;
2744 pcibr_piomap_t pcibr_piomap;
2746 xtalk_piomap_t xtalk_piomap;
2749 /* Make sure that the req sizes are non-zero */
2750 if ((req_size < 1) || (req_size_max < 1)) {
2751 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2752 "pcibr_piomap_alloc: req_size | req_size_max < 1\n"));
2757 * Code to translate slot/space/addr
2758 * into xio_addr is common between
2759 * this routine and pcibr_piotrans_addr.
2761 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2763 if (xio_addr == XIO_NOWHERE) {
2764 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2765 "pcibr_piomap_alloc: xio_addr == XIO_NOWHERE\n"));
2769 /* Check the piomap list to see if there is already an allocated
2770 * piomap entry but not in use. If so use that one. Otherwise
2771 * allocate a new piomap entry and add it to the piomap list
2773 mapptr = &(pcibr_info->f_piomap);
2775 s = pcibr_lock(pcibr_soft);
2776 for (pcibr_piomap = *mapptr;
2777 pcibr_piomap != NULL;
2778 pcibr_piomap = pcibr_piomap->bp_next) {
2779 if (pcibr_piomap->bp_mapsz == 0)
2786 pcibr_unlock(pcibr_soft, s);
2790 pcibr_piomap->bp_dev = pconn_vhdl;
2791 pcibr_piomap->bp_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, pciio_slot);
2792 pcibr_piomap->bp_flags = flags;
2793 pcibr_piomap->bp_space = space;
2794 pcibr_piomap->bp_pciaddr = pci_addr;
2795 pcibr_piomap->bp_mapsz = req_size;
2796 pcibr_piomap->bp_soft = pcibr_soft;
2797 pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
2800 s = pcibr_lock(pcibr_soft);
2802 pcibr_piomap->bp_next = maplist;
2803 *mapptr = pcibr_piomap;
2805 pcibr_unlock(pcibr_soft, s);
2810 xtalk_piomap_alloc(xconn_vhdl, 0,
2812 req_size, req_size_max,
2813 flags & PIOMAP_FLAGS);
2815 pcibr_piomap->bp_xtalk_addr = xio_addr;
2816 pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
2818 pcibr_piomap->bp_mapsz = 0;
2823 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2824 "pcibr_piomap_alloc: map=0x%x\n", pcibr_piomap));
2826 return pcibr_piomap;
2831 pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
2833 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2834 "pcibr_piomap_free: map=0x%x\n", pcibr_piomap));
2836 xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
2837 pcibr_piomap->bp_xtalk_pio = 0;
2838 pcibr_piomap->bp_mapsz = 0;
2843 pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
2848 addr = xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
2849 pcibr_piomap->bp_xtalk_addr +
2850 pci_addr - pcibr_piomap->bp_pciaddr,
2852 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2853 "pcibr_piomap_free: map=0x%x, addr=0x%x\n",
2854 pcibr_piomap, addr));
2861 pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
2863 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2864 "pcibr_piomap_done: map=0x%x\n", pcibr_piomap));
2865 xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
2870 pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
2871 device_desc_t dev_desc,
2872 pciio_space_t space,
2877 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2878 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2879 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2880 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
2885 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2887 if (xio_addr == XIO_NOWHERE) {
2888 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2889 "pcibr_piotrans_addr: xio_addr == XIO_NOWHERE\n"));
2893 addr = xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
2894 PCIBR_DEBUG((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2895 "pcibr_piotrans_addr: xio_addr=0x%x, addr=0x%x\n",
2901 * PIO Space allocation and management.
2902 * Allocate and Manage the PCI PIO space (mem and io space)
2903 * This routine is pretty simplistic at this time, and
2904 * does pretty trivial management of allocation and freeing.
2905 * The current scheme is prone for fragmentation.
2906 * Change the scheme to use bitmaps.
2911 pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
2912 device_desc_t dev_desc,
2913 pciio_space_t space,
2917 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2918 pciio_info_t pciio_info = &pcibr_info->f_c;
2919 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2921 pciio_piospace_t piosp;
2924 iopaddr_t start_addr;
2928 * Check for proper alignment
2930 ASSERT(alignment >= NBPP);
2931 ASSERT((alignment & (alignment - 1)) == 0);
2933 align_mask = alignment - 1;
2934 s = pcibr_lock(pcibr_soft);
2937 * First look if a previously allocated chunk exists.
2939 if ((piosp = pcibr_info->f_piospace)) {
2941 * Look through the list for a right sized free chunk.
2945 (piosp->space == space) &&
2946 (piosp->count >= req_size) &&
2947 !(piosp->start & align_mask)) {
2949 pcibr_unlock(pcibr_soft, s);
2950 return piosp->start;
2952 piosp = piosp->next;
2958 * Allocate PCI bus address, usually for the Universe chip driver;
2959 * do not pass window info since the actual PCI bus address
2960 * space will never be freed. The space may be reused after it
2961 * is logically released by pcibr_piospace_free().
2964 case PCIIO_SPACE_IO:
2965 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2967 0, req_size, alignment);
2970 case PCIIO_SPACE_MEM:
2971 case PCIIO_SPACE_MEM32:
2972 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2974 0, req_size, alignment);
2979 pcibr_unlock(pcibr_soft, s);
2980 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2981 "pcibr_piospace_alloc: unknown space %d\n", space));
2986 * If too big a request, reject it.
2989 pcibr_unlock(pcibr_soft, s);
2990 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2991 "pcibr_piospace_alloc: request 0x%x to big\n", req_size));
2997 piosp->space = space;
2998 piosp->start = start_addr;
2999 piosp->count = req_size;
3000 piosp->next = pcibr_info->f_piospace;
3001 pcibr_info->f_piospace = piosp;
3003 pcibr_unlock(pcibr_soft, s);
3005 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
3006 "pcibr_piospace_alloc: piosp=0x%x\n", piosp));
3013 pcibr_piospace_free(devfs_handle_t pconn_vhdl,
3014 pciio_space_t space,
3018 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
3020 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
3023 pciio_piospace_t piosp;
3028 * Look through the bridge data structures for the pciio_piospace_t
3029 * structure corresponding to 'pciaddr'
3031 s = pcibr_lock(pcibr_soft);
3032 piosp = pcibr_info->f_piospace;
3035 * Piospace free can only be for the complete
3036 * chunk and not parts of it..
3038 if (piosp->start == pciaddr) {
3039 if (piosp->count == req_size)
3042 * Improper size passed for freeing..
3043 * Print a message and break;
3045 hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
3046 printk(KERN_WARNING "pcibr_piospace_free: error");
3047 printk(KERN_WARNING "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
3048 name, req_size, piosp->count);
3049 printk(KERN_WARNING "Freeing 0x%lx instead", piosp->count);
3052 piosp = piosp->next;
3057 "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
3059 pcibr_unlock(pcibr_soft, s);
3063 pcibr_unlock(pcibr_soft, s);
3065 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
3066 "pcibr_piospace_free: piosp=0x%x\n", piosp));
3070 /* =====================================================================
3073 * The Bridge ASIC provides three methods of doing
3074 * DMA: via a "direct map" register available in
3075 * 32-bit PCI space (which selects a contiguous 2G
3076 * address space on some other widget), via
3077 * "direct" addressing via 64-bit PCI space (all
3078 * destination information comes from the PCI
3079 * address, including transfer attributes), and via
3080 * a "mapped" region that allows a bunch of
3081 * different small mappings to be established with
3084 * For efficiency, we most prefer to use the 32-bit
3085 * direct mapping facility, since it requires no
3086 * resource allocations. The advantage of using the
3087 * PMU over the 64-bit direct is that single-cycle
3088 * PCI addressing can be used; the advantage of
3089 * using 64-bit direct over PMU addressing is that
3090 * we do not have to allocate entries in the PMU.
3094 * Convert PCI-generic software flags and Bridge-specific software flags
3095 * into Bridge-specific Direct Map attribute bits.
3098 pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
3100 iopaddr_t attributes = 0;
3102 /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
3104 ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
3107 /* Generic macro flags
3109 if (flags & PCIIO_DMA_DATA) { /* standard data channel */
3110 attributes &= ~PCI64_ATTR_BAR; /* no barrier bit */
3111 attributes |= PCI64_ATTR_PREF; /* prefetch on */
3113 if (flags & PCIIO_DMA_CMD) { /* standard command channel */
3114 attributes |= PCI64_ATTR_BAR; /* barrier bit on */
3115 attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
3117 /* Generic detail flags
3119 if (flags & PCIIO_PREFETCH)
3120 attributes |= PCI64_ATTR_PREF;
3121 if (flags & PCIIO_NOPREFETCH)
3122 attributes &= ~PCI64_ATTR_PREF;
3124 /* the swap bit is in the address attributes for xbridge */
3125 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3126 if (flags & PCIIO_BYTE_STREAM)
3127 attributes |= PCI64_ATTR_SWAP;
3128 if (flags & PCIIO_WORD_VALUES)
3129 attributes &= ~PCI64_ATTR_SWAP;
3132 /* Provider-specific flags
3134 if (flags & PCIBR_BARRIER)
3135 attributes |= PCI64_ATTR_BAR;
3136 if (flags & PCIBR_NOBARRIER)
3137 attributes &= ~PCI64_ATTR_BAR;
3139 if (flags & PCIBR_PREFETCH)
3140 attributes |= PCI64_ATTR_PREF;
3141 if (flags & PCIBR_NOPREFETCH)
3142 attributes &= ~PCI64_ATTR_PREF;
3144 if (flags & PCIBR_PRECISE)
3145 attributes |= PCI64_ATTR_PREC;
3146 if (flags & PCIBR_NOPRECISE)
3147 attributes &= ~PCI64_ATTR_PREC;
3149 if (flags & PCIBR_VCHAN1)
3150 attributes |= PCI64_ATTR_VIRTUAL;
3151 if (flags & PCIBR_VCHAN0)
3152 attributes &= ~PCI64_ATTR_VIRTUAL;
3154 /* PIC in PCI-X mode only supports barrier & swap */
3155 if (IS_PCIX(pcibr_soft)) {
3156 attributes &= (PCI64_ATTR_BAR | PCI64_ATTR_SWAP);
3159 return (attributes);
3164 pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl,
3165 device_desc_t dev_desc,
3166 size_t req_size_max,
3169 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3170 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3171 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
3173 xwidgetnum_t xio_port;
3175 xtalk_dmamap_t xtalk_dmamap;
3176 pcibr_dmamap_t pcibr_dmamap;
3181 /* merge in forced flags */
3182 flags |= pcibr_soft->bs_dma_flags;
3185 * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
3186 * can be called within an interrupt thread.
3188 pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
3193 xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
3194 flags & DMAMAP_FLAGS);
3195 if (!xtalk_dmamap) {
3196 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3197 "pcibr_dmamap_alloc: xtalk_dmamap_alloc failed\n"));
3198 free_pciio_dmamap(pcibr_dmamap);
3201 xio_port = pcibr_soft->bs_mxid;
3202 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3204 pcibr_dmamap->bd_dev = pconn_vhdl;
3205 pcibr_dmamap->bd_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot);
3206 pcibr_dmamap->bd_soft = pcibr_soft;
3207 pcibr_dmamap->bd_xtalk = xtalk_dmamap;
3208 pcibr_dmamap->bd_max_size = req_size_max;
3209 pcibr_dmamap->bd_xio_port = xio_port;
3211 if (flags & PCIIO_DMA_A64) {
3212 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
3217 /* Device is capable of A64 operations,
3218 * and the attributes of the DMA are
3219 * consistent with any previous DMA
3220 * mappings using shared resources.
3223 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
3225 pcibr_dmamap->bd_flags = flags;
3226 pcibr_dmamap->bd_xio_addr = 0;
3227 pcibr_dmamap->bd_pci_addr = pci_addr;
3229 /* If in PCI mode, make sure we have an RRB (or two).
3231 if (IS_PCI(pcibr_soft) &&
3232 !(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
3233 if (flags & PCIBR_VCHAN1)
3235 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
3236 if (have_rrbs < 2) {
3237 if (pci_addr & PCI64_ATTR_PREF)
3241 if (have_rrbs < min_rrbs)
3242 do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
3243 min_rrbs - have_rrbs);
3246 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3247 "pcibr_dmamap_alloc: using direct64, map=0x%x\n",
3249 return pcibr_dmamap;
3251 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3252 "pcibr_dmamap_alloc: unable to use direct64\n"));
3254 /* PIC only supports 64-bit direct mapping in PCI-X mode. */
3255 if (IS_PCIX(pcibr_soft)) {
3260 flags &= ~PCIIO_DMA_A64;
3262 if (flags & PCIIO_FIXED) {
3263 /* warning: mappings may fail later,
3264 * if direct32 can't get to the address.
3266 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
3267 /* User desires DIRECT A32 operations,
3268 * and the attributes of the DMA are
3269 * consistent with any previous DMA
3270 * mappings using shared resources.
3271 * Mapping calls may fail if target
3272 * is outside the direct32 range.
3274 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3275 "pcibr_dmamap_alloc: using direct32, map=0x%x\n",
3277 pcibr_dmamap->bd_flags = flags;
3278 pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
3279 pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
3280 return pcibr_dmamap;
3282 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
3283 "pcibr_dmamap_alloc: unable to use direct32\n"));
3285 /* If the user demands FIXED and we can't
3286 * give it to him, fail.
3288 xtalk_dmamap_free(xtalk_dmamap);
3289 free_pciio_dmamap(pcibr_dmamap);
3293 * Allocate Address Translation Entries from the mapping RAM.
3294 * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
3295 * the maximum number of ATEs is based on the worst-case
3296 * scenario, where the requested target is in the
3297 * last byte of an ATE; thus, mapping IOPGSIZE+2
3298 * does end up requiring three ATEs.
3300 if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
3301 ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
3302 +req_size_max /* max mapping bytes */
3303 - 1) + 1; /* round UP */
3304 } else { /* assume requested target is page aligned */
3305 ate_count = IOPG(req_size_max /* max mapping bytes */
3306 - 1) + 1; /* round UP */
3309 ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
3311 if (ate_index != -1) {
3312 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
3313 bridge_ate_t ate_proto;
3317 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3318 "pcibr_dmamap_alloc: using PMU, ate_index=%d, "
3319 "pcibr_dmamap=0x%x\n", ate_index, pcibr_dmamap));
3321 ate_proto = pcibr_flags_to_ate(flags);
3323 pcibr_dmamap->bd_flags = flags;
3324 pcibr_dmamap->bd_pci_addr =
3325 PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
3327 * for xbridge the byte-swap bit == bit 29 of PCI address
3329 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3330 if (flags & PCIIO_BYTE_STREAM)
3331 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
3333 * If swap was set in bss_device in pcibr_endian_set()
3334 * we need to change the address bit.
3336 if (pcibr_soft->bs_slot[slot].bss_device &
3337 BRIDGE_DEV_SWAP_PMU)
3338 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
3339 if (flags & PCIIO_WORD_VALUES)
3340 ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
3342 pcibr_dmamap->bd_xio_addr = 0;
3343 pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
3344 pcibr_dmamap->bd_ate_index = ate_index;
3345 pcibr_dmamap->bd_ate_count = ate_count;
3346 pcibr_dmamap->bd_ate_proto = ate_proto;
3348 /* Make sure we have an RRB (or two).
3350 if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
3351 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
3352 if (have_rrbs < 2) {
3353 if (ate_proto & ATE_PREF)
3357 if (have_rrbs < min_rrbs)
3358 do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
3359 min_rrbs - have_rrbs);
3362 if (ate_index >= pcibr_soft->bs_int_ate_size &&
3363 !IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3364 bridge_t *bridge = pcibr_soft->bs_base;
3365 volatile unsigned *cmd_regp;
3369 pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
3371 s = pcibr_lock(pcibr_soft);
3372 cmd_regp = pcibr_slot_config_addr(bridge, slot,
3374 if ( IS_PIC_SOFT(pcibr_soft) ) {
3375 cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3378 if (io_get_sh_swapper(NASID_GET(bridge))) {
3379 BRIDGE_REG_SET32((&cmd_reg)) = __swab32(*cmd_regp);
3381 cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3384 pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
3385 pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
3386 pcibr_unlock(pcibr_soft, s);
3388 return pcibr_dmamap;
3390 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3391 "pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",
3394 pcibr_ate_free(pcibr_soft, ate_index, ate_count);
3396 /* total failure: sorry, you just can't
3397 * get from here to there that way.
3399 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3400 "pcibr_dmamap_alloc: complete failure.\n"));
3401 xtalk_dmamap_free(xtalk_dmamap);
3402 free_pciio_dmamap(pcibr_dmamap);
3408 pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
3410 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
3411 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3412 pcibr_dmamap->bd_slot);
3414 unsigned flags = pcibr_dmamap->bd_flags;
3416 /* Make sure that bss_ext_ates_active
3417 * is properly kept up to date.
3420 if (PCIBR_DMAMAP_BUSY & flags)
3421 if (PCIBR_DMAMAP_SSRAM & flags)
3422 atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
3424 xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
3426 if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
3427 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
3429 if (pcibr_dmamap->bd_ate_count) {
3430 pcibr_ate_free(pcibr_dmamap->bd_soft,
3431 pcibr_dmamap->bd_ate_index,
3432 pcibr_dmamap->bd_ate_count);
3433 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
3436 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3437 "pcibr_dmamap_free: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3439 free_pciio_dmamap(pcibr_dmamap);
3443 * pcibr_addr_xio_to_pci: given a PIO range, hand
3444 * back the corresponding base PCI MEM address;
3445 * this is used to short-circuit DMA requests that
3446 * loop back onto this PCI bus.
3449 pcibr_addr_xio_to_pci(pcibr_soft_t soft,
3453 iopaddr_t xio_lim = xio_addr + req_size - 1;
3457 if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
3458 (xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
3459 pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
3462 if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
3463 (xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
3464 pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
3467 for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
3468 if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
3469 (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
3472 dev = soft->bs_slot[slot].bss_device;
3473 pci_addr = dev & BRIDGE_DEV_OFF_MASK;
3474 pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
3475 pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);
3476 return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
3483 pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
3487 pcibr_soft_t pcibr_soft;
3489 xwidgetnum_t xio_port;
3493 ASSERT(pcibr_dmamap != NULL);
3494 ASSERT(req_size > 0);
3495 ASSERT(req_size <= pcibr_dmamap->bd_max_size);
3497 pcibr_soft = pcibr_dmamap->bd_soft;
3499 flags = pcibr_dmamap->bd_flags;
3501 xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
3502 if (XIO_PACKED(xio_addr)) {
3503 xio_port = XIO_PORT(xio_addr);
3504 xio_addr = XIO_ADDR(xio_addr);
3506 xio_port = pcibr_dmamap->bd_xio_port;
3508 /* If this DMA is to an address that
3509 * refers back to this Bridge chip,
3510 * reduce it back to the correct
3513 if (xio_port == pcibr_soft->bs_xid) {
3514 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3515 } else if (flags & PCIIO_DMA_A64) {
3517 * always use 64-bit direct mapping,
3518 * which always works.
3519 * Device(x) was set up during
3520 * dmamap allocation.
3523 /* attributes are already bundled up into bd_pci_addr.
3525 pci_addr = pcibr_dmamap->bd_pci_addr
3526 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
3529 /* Bridge Hardware WAR #482836:
3530 * If the transfer is not cache aligned
3531 * and the Bridge Rev is <= B, force
3532 * prefetch to be off.
3534 if (flags & PCIBR_NOPREFETCH)
3535 pci_addr &= ~PCI64_ATTR_PREF;
3537 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
3538 pcibr_dmamap->bd_dev,
3539 "pcibr_dmamap_addr: (direct64): wanted paddr [0x%x..0x%x] "
3540 "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3541 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3543 } else if (flags & PCIIO_FIXED) {
3545 * always use 32-bit direct mapping,
3547 * Device(x) was set up during
3548 * dmamap allocation.
3551 if (xio_port != pcibr_soft->bs_dir_xport)
3552 pci_addr = 0; /* wrong DIDN */
3553 else if (xio_addr < pcibr_dmamap->bd_xio_addr)
3554 pci_addr = 0; /* out of range */
3555 else if ((xio_addr + req_size) >
3556 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
3557 pci_addr = 0; /* out of range */
3559 pci_addr = pcibr_dmamap->bd_pci_addr +
3560 xio_addr - pcibr_dmamap->bd_xio_addr;
3562 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
3563 pcibr_dmamap->bd_dev,
3564 "pcibr_dmamap_addr (direct32): wanted paddr [0x%x..0x%x] "
3565 "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3566 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3569 bridge_t *bridge = pcibr_soft->bs_base;
3570 iopaddr_t offset = IOPGOFF(xio_addr);
3571 bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
3572 int ate_count = IOPG(offset + req_size - 1) + 1;
3574 int ate_index = pcibr_dmamap->bd_ate_index;
3575 unsigned cmd_regs[8];
3578 #if PCIBR_FREEZE_TIME
3579 int ate_total = ate_count;
3580 unsigned freeze_time;
3582 bridge_ate_p ate_ptr = pcibr_dmamap->bd_ate_ptr;
3585 /* Bridge Hardware WAR #482836:
3586 * If the transfer is not cache aligned
3587 * and the Bridge Rev is <= B, force
3588 * prefetch to be off.
3590 if (flags & PCIBR_NOPREFETCH)
3591 ate_proto &= ~ATE_PREF;
3594 | (xio_port << ATE_TIDSHIFT)
3595 | (xio_addr - offset);
3597 pci_addr = pcibr_dmamap->bd_pci_addr + offset;
3599 /* Fill in our mapping registers
3600 * with the appropriate xtalk data,
3601 * and hand back the PCI address.
3604 ASSERT(ate_count > 0);
3605 if (ate_count <= pcibr_dmamap->bd_ate_count) {
3609 if ( IS_PIC_SOFT(pcibr_soft) ) {
3610 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
3613 if (io_get_sh_swapper(NASID_GET(bridge))) {
3614 BRIDGE_REG_GET32((&bridge->b_wid_tflush));
3616 bridge->b_wid_tflush;
3619 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3620 "pcibr_dmamap_addr (PMU) : wanted paddr "
3621 "[0x%x..0x%x] returning PCI 0x%x\n",
3622 paddr, paddr + req_size - 1, pci_addr));
3625 /* The number of ATE's required is greater than the number
3626 * allocated for this map. One way this can happen is if
3627 * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
3628 * flag, and then when that map is used (right now), the
3629 * target address tells us we really did need to roundup.
3630 * The other possibility is that the map is just plain too
3631 * small to handle the requested target area.
3633 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3634 "pcibr_dmamap_addr (PMU) : wanted paddr "
3635 "[0x%x..0x%x] ate_count 0x%x bd_ate_count 0x%x "
3636 "ATE's required > number allocated\n",
3637 paddr, paddr + req_size - 1,
3638 ate_count, pcibr_dmamap->bd_ate_count));
3648 pcibr_dmamap_list(pcibr_dmamap_t pcibr_dmamap,
3649 alenlist_t palenlist,
3652 pcibr_soft_t pcibr_soft;
3653 bridge_t *bridge=NULL;
3655 unsigned al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
3656 int inplace = flags & PCIIO_INPLACE;
3658 alenlist_t pciio_alenlist = 0;
3659 alenlist_t xtalk_alenlist;
3666 bridge_ate_p ate_ptr = (bridge_ate_p)0;
3667 bridge_ate_t ate_proto = (bridge_ate_t)0;
3668 bridge_ate_t ate_prev;
3670 alenaddr_t xio_addr;
3671 xwidgetnum_t xio_port;
3673 alenaddr_t new_addr;
3674 unsigned cmd_regs[8];
3677 #if PCIBR_FREEZE_TIME
3678 unsigned freeze_time;
3680 int ate_freeze_done = 0; /* To pair ATE_THAW
3681 * with an ATE_FREEZE
3684 pcibr_soft = pcibr_dmamap->bd_soft;
3686 xtalk_alenlist = xtalk_dmamap_list(pcibr_dmamap->bd_xtalk, palenlist,
3687 flags & DMAMAP_FLAGS);
3688 if (!xtalk_alenlist) {
3689 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3690 "pcibr_dmamap_list: xtalk_dmamap_list() failed, "
3691 "pcibr_dmamap=0x%x\n", pcibr_dmamap));
3694 alenlist_cursor_init(xtalk_alenlist, 0, NULL);
3697 pciio_alenlist = xtalk_alenlist;
3699 pciio_alenlist = alenlist_create(al_flags);
3700 if (!pciio_alenlist) {
3701 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3702 "pcibr_dmamap_list: alenlist_create() failed, "
3703 "pcibr_dmamap=0x%lx\n", (unsigned long)pcibr_dmamap));
3708 direct64 = pcibr_dmamap->bd_flags & PCIIO_DMA_A64;
3710 bridge = pcibr_soft->bs_base;
3711 ate_ptr = pcibr_dmamap->bd_ate_ptr;
3712 ate_index = pcibr_dmamap->bd_ate_index;
3713 ate_proto = pcibr_dmamap->bd_ate_proto;
3715 ate_freeze_done = 1; /* Remember that we need to do an ATE_THAW */
3717 pci_addr = pcibr_dmamap->bd_pci_addr;
3719 ate_prev = 0; /* matches no valid ATEs */
3720 while (ALENLIST_SUCCESS ==
3721 alenlist_get(xtalk_alenlist, NULL, 0,
3722 &xio_addr, &length, al_flags)) {
3723 if (XIO_PACKED(xio_addr)) {
3724 xio_port = XIO_PORT(xio_addr);
3725 xio_addr = XIO_ADDR(xio_addr);
3727 xio_port = pcibr_dmamap->bd_xio_port;
3729 if (xio_port == pcibr_soft->bs_xid) {
3730 new_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, length);
3731 if (new_addr == PCI_NOWHERE) {
3732 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3733 "pcibr_dmamap_list: pcibr_addr_xio_to_pci failed, "
3734 "pcibr_dmamap=0x%x\n", pcibr_dmamap));
3737 } else if (direct64) {
3738 new_addr = pci_addr | xio_addr
3739 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3741 /* Bridge Hardware WAR #482836:
3742 * If the transfer is not cache aligned
3743 * and the Bridge Rev is <= B, force
3744 * prefetch to be off.
3746 if (flags & PCIBR_NOPREFETCH)
3747 new_addr &= ~PCI64_ATTR_PREF;
3750 /* calculate the ate value for
3751 * the first address. If it
3752 * matches the previous
3753 * ATE written (ie. we had
3754 * multiple blocks in the
3755 * same IOPG), then back up
3756 * and reuse that ATE.
3758 * We are NOT going to
3759 * aggressively try to
3760 * reuse any other ATEs.
3762 offset = IOPGOFF(xio_addr);
3764 | (xio_port << ATE_TIDSHIFT)
3765 | (xio_addr - offset);
3766 if (ate == ate_prev) {
3767 PCIBR_DEBUG((PCIBR_DEBUG_ATE, pcibr_dmamap->bd_dev,
3768 "pcibr_dmamap_list: ATE share\n"));
3771 pci_addr -= IOPGSIZE;
3773 new_addr = pci_addr + offset;
3775 /* Fill in the hardware ATEs
3776 * that contain this block.
3778 ate_count = IOPG(offset + length - 1) + 1;
3779 ate_total += ate_count;
3781 /* Ensure that this map contains enough ATE's */
3782 if (ate_total > pcibr_dmamap->bd_ate_count) {
3783 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_dmamap->bd_dev,
3784 "pcibr_dmamap_list :\n"
3785 "\twanted xio_addr [0x%x..0x%x]\n"
3786 "\tate_total 0x%x bd_ate_count 0x%x\n"
3787 "\tATE's required > number allocated\n",
3788 xio_addr, xio_addr + length - 1,
3789 ate_total, pcibr_dmamap->bd_ate_count));
3795 ate_index += ate_count;
3796 ate_ptr += ate_count;
3798 ate_count <<= IOPFNSHIFT;
3800 pci_addr += ate_count;
3803 /* write the PCI DMA address
3804 * out to the scatter-gather list.
3807 if (ALENLIST_SUCCESS !=
3808 alenlist_replace(pciio_alenlist, NULL,
3809 &new_addr, &length, al_flags)) {
3810 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3811 "pcibr_dmamap_list: alenlist_replace() failed, "
3812 "pcibr_dmamap=0x%x\n", pcibr_dmamap));
3817 if (ALENLIST_SUCCESS !=
3818 alenlist_append(pciio_alenlist,
3819 new_addr, length, al_flags)) {
3820 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3821 "pcibr_dmamap_list: alenlist_append() failed, "
3822 "pcibr_dmamap=0x%x\n", pcibr_dmamap));
3828 alenlist_done(xtalk_alenlist);
3830 /* Reset the internal cursor of the alenlist to be returned back
3833 alenlist_cursor_init(pciio_alenlist, 0, NULL);
3836 /* In case an ATE_FREEZE was done do the ATE_THAW to unroll all the
3837 * changes that ATE_FREEZE has done to implement the external SSRAM
3840 if (ate_freeze_done) {
3842 if ( IS_PIC_SOFT(pcibr_soft) ) {
3843 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
3846 if (io_get_sh_swapper(NASID_GET(bridge))) {
3847 BRIDGE_REG_GET32((&bridge->b_wid_tflush));
3849 bridge->b_wid_tflush;
3853 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3854 "pcibr_dmamap_list: pcibr_dmamap=0x%x, pciio_alenlist=0x%x\n",
3855 pcibr_dmamap, pciio_alenlist));
3857 return pciio_alenlist;
3860 /* There are various points of failure after doing an ATE_FREEZE
3861 * We need to do an ATE_THAW. Otherwise the ATEs are locked forever.
3862 * The decision to do an ATE_THAW needs to be based on whether a
3863 * an ATE_FREEZE was done before.
3865 if (ate_freeze_done) {
3867 if ( IS_PIC_SOFT(pcibr_soft) ) {
3868 bridge->b_wid_tflush;
3871 if (io_get_sh_swapper(NASID_GET(bridge))) {
3872 BRIDGE_REG_GET32((&bridge->b_wid_tflush));
3874 bridge->b_wid_tflush;
3878 if (pciio_alenlist && !inplace)
3879 alenlist_destroy(pciio_alenlist);
3885 pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
3888 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
3889 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3892 * We could go through and invalidate ATEs here;
3893 * for performance reasons, we don't.
3894 * We also don't enforce the strict alternation
3895 * between _addr/_list and _done, but Hub does.
3898 if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
3899 pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
3901 if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
3902 atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
3904 xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
3906 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3907 "pcibr_dmamap_done: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3912 * For each bridge, the DIR_OFF value in the Direct Mapping Register
3913 * determines the PCI to Crosstalk memory mapping to be used for all
3914 * 32-bit Direct Mapping memory accesses. This mapping can be to any
3915 * node in the system. This function will return that compact node id.
3920 pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
3923 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3924 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3926 return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
3931 pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
3932 device_desc_t dev_desc,
3937 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3938 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3939 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
3940 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3941 pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
3943 xwidgetnum_t xio_port;
3951 /* merge in forced flags */
3952 flags |= pcibr_soft->bs_dma_flags;
3954 xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
3955 flags & DMAMAP_FLAGS);
3957 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3958 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3959 "xtalk_dmatrans_addr failed with 0x%x\n",
3960 paddr, paddr + req_size - 1, xio_addr));
3964 * find which XIO port this goes to.
3966 if (XIO_PACKED(xio_addr)) {
3967 if (xio_addr == XIO_NOWHERE) {
3968 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3969 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3970 "xtalk_dmatrans_addr failed with XIO_NOWHERE\n",
3971 paddr, paddr + req_size - 1));
3974 xio_port = XIO_PORT(xio_addr);
3975 xio_addr = XIO_ADDR(xio_addr);
3978 xio_port = pcibr_soft->bs_mxid;
3981 * If this DMA comes back to us,
3982 * return the PCI MEM address on
3983 * which it would land, or NULL
3984 * if the target is something
3985 * on bridge other than PCI MEM.
3987 if (xio_port == pcibr_soft->bs_xid) {
3988 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3989 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3990 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3991 "xio_port=0x%x, pci_addr=0x%x\n",
3992 paddr, paddr + req_size - 1, xio_port, pci_addr));
3995 /* If the caller can use A64, try to
3996 * satisfy the request with the 64-bit
3997 * direct map. This can fail if the
3998 * configuration bits in Device(x)
3999 * conflict with our flags.
4002 if (flags & PCIIO_DMA_A64) {
4003 pci_addr = slotp->bss_d64_base;
4004 if (!(flags & PCIBR_VCHAN1))
4005 flags |= PCIBR_VCHAN0;
4006 if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
4007 (flags == slotp->bss_d64_flags)) {
4009 pci_addr |= xio_addr
4010 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
4013 if (xio_addr != 0x20000000)
4015 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4016 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4017 "xio_port=0x%x, direct64: pci_addr=0x%x\n",
4018 paddr, paddr + req_size - 1, xio_addr, pci_addr));
4021 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
4022 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
4023 slotp->bss_d64_flags = flags;
4024 slotp->bss_d64_base = pci_addr;
4025 pci_addr |= xio_addr
4026 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
4028 /* If in PCI mode, make sure we have an RRB (or two).
4030 if (IS_PCI(pcibr_soft) &&
4031 !(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
4032 if (flags & PCIBR_VCHAN1)
4034 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
4035 if (have_rrbs < 2) {
4036 if (pci_addr & PCI64_ATTR_PREF)
4040 if (have_rrbs < min_rrbs)
4041 do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, vchan,
4042 min_rrbs - have_rrbs);
4046 if (xio_addr != 0x20000000)
4048 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4049 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4050 "xio_port=0x%x, direct64: pci_addr=0x%x, "
4051 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
4052 xio_addr, pci_addr, (uint64_t) flags));
4056 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4057 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4058 "xio_port=0x%x, Unable to set direct64 Device(x) bits\n",
4059 paddr, paddr + req_size - 1, xio_addr));
4061 /* PIC only supports 64-bit direct mapping in PCI-X mode */
4062 if (IS_PCIX(pcibr_soft)) {
4066 /* our flags conflict with Device(x). try direct32*/
4067 flags = flags & ~(PCIIO_DMA_A64 | PCIBR_VCHAN0);
4069 /* Try to satisfy the request with the 32-bit direct
4070 * map. This can fail if the configuration bits in
4071 * Device(x) conflict with our flags, or if the
4072 * target address is outside where DIR_OFF points.
4075 size_t map_size = 1ULL << 31;
4076 iopaddr_t xio_base = pcibr_soft->bs_dir_xbase;
4077 iopaddr_t offset = xio_addr - xio_base;
4078 iopaddr_t endoff = req_size + offset;
4080 if ((req_size > map_size) ||
4081 (xio_addr < xio_base) ||
4082 (xio_port != pcibr_soft->bs_dir_xport) ||
4083 (endoff > map_size)) {
4085 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4086 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4087 "xio_port=0x%x, xio region outside direct32 target\n",
4088 paddr, paddr + req_size - 1, xio_addr));
4090 pci_addr = slotp->bss_d32_base;
4091 if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
4092 (flags == slotp->bss_d32_flags)) {
4096 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4097 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4098 "xio_port=0x%x, direct32: pci_addr=0x%x\n",
4099 paddr, paddr + req_size - 1, xio_addr, pci_addr));
4103 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
4105 pci_addr = PCI32_DIRECT_BASE;
4106 slotp->bss_d32_flags = flags;
4107 slotp->bss_d32_base = pci_addr;
4110 /* Make sure we have an RRB (or two).
4112 if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
4113 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
4114 if (have_rrbs < 2) {
4115 if (slotp->bss_device & BRIDGE_DEV_PREF)
4119 if (have_rrbs < min_rrbs)
4120 do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot,
4121 vchan, min_rrbs - have_rrbs);
4125 if (xio_addr != 0x20000000)
4127 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4128 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4129 "xio_port=0x%x, direct32: pci_addr=0x%x, "
4130 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
4131 xio_addr, pci_addr, (uint64_t) flags));
4135 /* our flags conflict with Device(x).
4137 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4138 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4139 "xio_port=0x%x, Unable to set direct32 Device(x) bits\n",
4140 paddr, paddr + req_size - 1, xio_port));
4144 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4145 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
4146 "xio_port=0x%x, No acceptable PCI address found\n",
4147 paddr, paddr + req_size - 1, xio_port));
4154 pcibr_dmatrans_list(devfs_handle_t pconn_vhdl,
4155 device_desc_t dev_desc,
4156 alenlist_t palenlist,
4159 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4160 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4161 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
4162 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4163 pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
4164 xwidgetnum_t xio_port;
4166 alenlist_t pciio_alenlist = 0;
4167 alenlist_t xtalk_alenlist = 0;
4174 alenaddr_t xio_addr;
4179 alenaddr_t pci_addr;
4181 unsigned relbits = 0;
4183 /* merge in forced flags */
4184 flags |= pcibr_soft->bs_dma_flags;
4186 inplace = flags & PCIIO_INPLACE;
4187 direct64 = flags & PCIIO_DMA_A64;
4188 al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
4191 map_size = 1ull << 48;
4193 pci_base = slotp->bss_d64_base;
4194 if ((pci_base != PCIBR_D64_BASE_UNSET) &&
4195 (flags == slotp->bss_d64_flags)) {
4196 /* reuse previous base info */
4197 } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS) < 0) {
4198 /* DMA configuration conflict */
4199 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4200 "pcibr_dmatrans_list: DMA configuration conflict "
4201 "for direct64, flags=0x%x\n", flags));
4204 relbits = BRIDGE_DEV_D64_BITS;
4206 pcibr_flags_to_d64(flags, pcibr_soft);
4209 xio_base = pcibr_soft->bs_dir_xbase;
4210 map_size = 1ull << 31;
4211 pci_base = slotp->bss_d32_base;
4212 if ((pci_base != PCIBR_D32_BASE_UNSET) &&
4213 (flags == slotp->bss_d32_flags)) {
4214 /* reuse previous base info */
4215 } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS) < 0) {
4216 /* DMA configuration conflict */
4217 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4218 "pcibr_dmatrans_list: DMA configuration conflict "
4219 "for direct32, flags=0x%x\n", flags));
4222 relbits = BRIDGE_DEV_D32_BITS;
4223 pci_base = PCI32_DIRECT_BASE;
4227 xtalk_alenlist = xtalk_dmatrans_list(xconn_vhdl, 0, palenlist,
4228 flags & DMAMAP_FLAGS);
4229 if (!xtalk_alenlist) {
4230 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4231 "pcibr_dmatrans_list: xtalk_dmatrans_list failed "
4232 "xtalk_alenlist=0x%x\n", xtalk_alenlist));
4236 alenlist_cursor_init(xtalk_alenlist, 0, NULL);
4239 pciio_alenlist = xtalk_alenlist;
4241 pciio_alenlist = alenlist_create(al_flags);
4242 if (!pciio_alenlist) {
4243 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4244 "pcibr_dmatrans_list: alenlist_create failed with "
4245 " 0x%x\n", pciio_alenlist));
4250 while (ALENLIST_SUCCESS ==
4251 alenlist_get(xtalk_alenlist, NULL, 0,
4252 &xio_addr, &xio_size, al_flags)) {
4255 * find which XIO port this goes to.
4257 if (XIO_PACKED(xio_addr)) {
4258 if (xio_addr == XIO_NOWHERE) {
4259 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4260 "pcibr_dmatrans_list: xio_addr == XIO_NOWHERE\n"));
4263 xio_port = XIO_PORT(xio_addr);
4264 xio_addr = XIO_ADDR(xio_addr);
4266 xio_port = pcibr_soft->bs_mxid;
4269 * If this DMA comes back to us,
4270 * return the PCI MEM address on
4271 * which it would land, or NULL
4272 * if the target is something
4273 * on bridge other than PCI MEM.
4275 if (xio_port == pcibr_soft->bs_xid) {
4276 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, xio_size);
4277 if (pci_addr == (alenaddr_t)NULL) {
4278 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4279 "pcibr_dmatrans_list: pcibr_addr_xio_to_pci failed "
4280 "xio_addr=0x%x, xio_size=0x%x\n", xio_addr, xio_size));
4283 } else if (direct64) {
4284 ASSERT(xio_port != 0);
4285 pci_addr = pci_base | xio_addr
4286 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
4288 iopaddr_t offset = xio_addr - xio_base;
4289 iopaddr_t endoff = xio_size + offset;
4291 if ((xio_size > map_size) ||
4292 (xio_addr < xio_base) ||
4293 (xio_port != pcibr_soft->bs_dir_xport) ||
4294 (endoff > map_size)) {
4295 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4296 "pcibr_dmatrans_list: xio_size > map_size fail\n"
4297 "xio_addr=0x%x, xio_size=0x%x. map_size=0x%x, "
4298 "xio_port=0x%x, endoff=0x%x\n",
4299 xio_addr, xio_size, map_size, xio_port, endoff));
4303 pci_addr = pci_base + (xio_addr - xio_base);
4306 /* write the PCI DMA address
4307 * out to the scatter-gather list.
4310 if (ALENLIST_SUCCESS !=
4311 alenlist_replace(pciio_alenlist, NULL,
4312 &pci_addr, &xio_size, al_flags)) {
4313 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4314 "pcibr_dmatrans_list: alenlist_replace failed\n"));
4318 if (ALENLIST_SUCCESS !=
4319 alenlist_append(pciio_alenlist,
4320 pci_addr, xio_size, al_flags)) {
4321 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4322 "pcibr_dmatrans_list: alenlist_append failed\n"));
4330 slotp->bss_d64_flags = flags;
4331 slotp->bss_d64_base = pci_base;
4333 slotp->bss_d32_flags = flags;
4334 slotp->bss_d32_base = pci_base;
4338 alenlist_done(xtalk_alenlist);
4340 /* Reset the internal cursor of the alenlist to be returned back
4343 alenlist_cursor_init(pciio_alenlist, 0, NULL);
4345 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
4346 "pcibr_dmatrans_list: pciio_alenlist=0x%x\n",
4349 return pciio_alenlist;
4353 pcibr_release_device(pcibr_soft, pciio_slot, relbits);
4354 if (pciio_alenlist && !inplace)
4355 alenlist_destroy(pciio_alenlist);
4360 pcibr_dmamap_drain(pcibr_dmamap_t map)
4362 xtalk_dmamap_drain(map->bd_xtalk);
4366 pcibr_dmaaddr_drain(devfs_handle_t pconn_vhdl,
4370 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4371 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4372 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
4374 xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
4378 pcibr_dmalist_drain(devfs_handle_t pconn_vhdl,
4381 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4382 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4383 devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
4385 xtalk_dmalist_drain(xconn_vhdl, list);
4389 * Get the starting PCIbus address out of the given DMA map.
4390 * This function is supposed to be used by a close friend of PCI bridge
4391 * since it relies on the fact that the starting address of the map is fixed at
4392 * the allocation time in the current implementation of PCI bridge.
4395 pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
4397 return (pcibr_dmamap->bd_pci_addr);
4400 /* =====================================================================
4401 * CONFIGURATION MANAGEMENT
4405 pcibr_provider_startup(devfs_handle_t pcibr)
4411 pcibr_provider_shutdown(devfs_handle_t pcibr)
4416 pcibr_reset(devfs_handle_t conn)
4419 pciio_info_t pciio_info = pciio_info_get(conn);
4420 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4421 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4422 bridge_t *bridge = pcibr_soft->bs_base;
4427 pcibr_info_h pcibr_infoh;
4428 pcibr_info_t pcibr_info;
4431 #endif /* PIC_LATER */
4435 if (pcibr_soft->bs_slot[pciio_slot].has_host) {
4436 pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
4437 pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
4440 if ((pciio_slot >= pcibr_soft->bs_first_slot) &&
4441 (pciio_slot <= pcibr_soft->bs_last_reset)) {
4442 s = pcibr_lock(pcibr_soft);
4443 nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
4444 pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
4445 for (f = 0; f < nf; ++f)
4447 cfgctl[f] = pcibr_func_config_get(bridge, pciio_slot, f,
4450 error = iobrick_pci_slot_rst(pcibr_soft->bs_l1sc,
4451 pcibr_widget_to_bus(pcibr_soft->bs_vhdl),
4452 PCIBR_DEVICE_TO_SLOT(pcibr_soft,pciio_slot),
4455 ctlreg = bridge->b_wid_control;
4456 bridge->b_wid_control = ctlreg & ~BRIDGE_CTRL_RST_PIN(pciio_slot);
4458 bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST_PIN(pciio_slot);
4461 for (f = 0; f < nf; ++f)
4462 if ((pcibr_info = pcibr_infoh[f]))
4463 for (win = 0; win < 6; ++win)
4464 if (pcibr_info->f_window[win].w_base != 0)
4465 pcibr_func_config_set(bridge, pciio_slot, f,
4466 PCI_CFG_BASE_ADDR(win) / 4,
4467 pcibr_info->f_window[win].w_base);
4468 for (f = 0; f < nf; ++f)
4470 pcibr_func_config_set(bridge, pciio_slot, f,
4471 PCI_CFG_COMMAND / 4,
4473 pcibr_unlock(pcibr_soft, s);
4480 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, conn,
4481 "pcibr_reset unimplemented for slot %d\n", conn, pciio_slot));
4482 #endif /* PIC_LATER */
4487 pcibr_endian_set(devfs_handle_t pconn_vhdl,
4488 pciio_endian_t device_end,
4489 pciio_endian_t desired_end)
4491 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4492 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4493 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4498 * Bridge supports hardware swapping; so we can always
4499 * arrange for the caller's desired endianness.
4502 s = pcibr_lock(pcibr_soft);
4503 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
4504 if (device_end != desired_end)
4505 devreg |= BRIDGE_DEV_SWAP_BITS;
4507 devreg &= ~BRIDGE_DEV_SWAP_BITS;
4509 /* NOTE- if we ever put SWAP bits
4510 * onto the disabled list, we will
4511 * have to change the logic here.
4513 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
4514 bridge_t *bridge = pcibr_soft->bs_base;
4516 if ( IS_PIC_SOFT(pcibr_soft) ) {
4517 bridge->b_device[pciio_slot].reg = devreg;
4518 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4519 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4522 if (io_get_sh_swapper(NASID_GET(bridge))) {
4523 BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
4524 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4525 BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
4527 bridge->b_device[pciio_slot].reg = devreg;
4528 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4529 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4533 pcibr_unlock(pcibr_soft, s);
4536 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
4537 "pcibr_endian_set: Device(%d): %x\n",
4538 pciio_slot, devreg, device_bits));
4540 printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
4545 /* This (re)sets the GBR and REALTIME bits and also keeps track of how
4546 * many sets are outstanding. Reset succeeds only if the number of outstanding
4550 pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
4551 pciio_slot_t pciio_slot,
4552 pciio_priority_t device_prio)
4556 bridgereg_t rtbits = 0;
4558 int rc = PRIO_SUCCESS;
4560 /* in dual-slot configurations, the host and the
4561 * guest have separate DMA resources, so they
4562 * have separate requirements for priority bits.
4565 counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
4568 * Bridge supports PCI notions of LOW and HIGH priority
4569 * arbitration rings via a "REAL_TIME" bit in the per-device
4570 * Bridge register. The "GBR" bit controls access to the GBR
4571 * ring on the xbow. These two bits are (re)set together.
4573 * XXX- Bug in Rev B Bridge Si:
4574 * Symptom: Prefetcher starts operating incorrectly. This happens
4575 * due to corruption of the address storage ram in the prefetcher
4576 * when a non-real time PCI request is pulled and a real-time one is
4577 * put in it's place. Workaround: Use only a single arbitration ring
4578 * on PCI bus. GBR and RR can still be uniquely used per
4579 * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
4582 if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
4583 rtbits |= BRIDGE_DEV_RT;
4585 /* NOTE- if we ever put DEV_RT or DEV_GBR on
4586 * the disabled list, we will have to take
4587 * it into account here.
4590 s = pcibr_lock(pcibr_soft);
4591 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
4592 if (device_prio == PCI_PRIO_HIGH) {
4593 if ((++*counter == 1)) {
4599 } else if (device_prio == PCI_PRIO_LOW) {
4602 else if (--*counter == 0)
4606 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
4607 bridge_t *bridge = pcibr_soft->bs_base;
4609 if ( IS_PIC_SOFT(pcibr_soft) ) {
4610 bridge->b_device[pciio_slot].reg = devreg;
4611 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4612 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4615 if (io_get_sh_swapper(NASID_GET(bridge))) {
4616 BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
4617 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4618 BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
4620 bridge->b_device[pciio_slot].reg = devreg;
4621 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4622 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4626 pcibr_unlock(pcibr_soft, s);
4632 pcibr_priority_set(devfs_handle_t pconn_vhdl,
4633 pciio_priority_t device_prio)
4635 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4636 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4637 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4639 (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
4645 * Interfaces to allow special (e.g. SGI) drivers to set/clear
4646 * Bridge-specific device flags. Many flags are modified through
4647 * PCI-generic interfaces; we don't allow them to be directly
4648 * manipulated here. Only flags that at this point seem pretty
4649 * Bridge-specific can be set through these special interfaces.
4650 * We may add more flags as the need arises, or remove flags and
4651 * create PCI-generic interfaces as the need arises.
4653 * Returns 0 on failure, 1 on success
4656 pcibr_device_flags_set(devfs_handle_t pconn_vhdl,
4657 pcibr_device_flags_t flags)
4659 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4660 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
4661 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4662 bridgereg_t set = 0;
4663 bridgereg_t clr = 0;
4665 ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
4667 if (flags & PCIBR_WRITE_GATHER)
4668 set |= BRIDGE_DEV_PMU_WRGA_EN;
4669 if (flags & PCIBR_NOWRITE_GATHER)
4670 clr |= BRIDGE_DEV_PMU_WRGA_EN;
4672 if (flags & PCIBR_WRITE_GATHER)
4673 set |= BRIDGE_DEV_DIR_WRGA_EN;
4674 if (flags & PCIBR_NOWRITE_GATHER)
4675 clr |= BRIDGE_DEV_DIR_WRGA_EN;
4677 if (flags & PCIBR_PREFETCH)
4678 set |= BRIDGE_DEV_PREF;
4679 if (flags & PCIBR_NOPREFETCH)
4680 clr |= BRIDGE_DEV_PREF;
4682 if (flags & PCIBR_PRECISE)
4683 set |= BRIDGE_DEV_PRECISE;
4684 if (flags & PCIBR_NOPRECISE)
4685 clr |= BRIDGE_DEV_PRECISE;
4687 if (flags & PCIBR_BARRIER)
4688 set |= BRIDGE_DEV_BARRIER;
4689 if (flags & PCIBR_NOBARRIER)
4690 clr |= BRIDGE_DEV_BARRIER;
4692 if (flags & PCIBR_64BIT)
4693 set |= BRIDGE_DEV_DEV_SIZE;
4694 if (flags & PCIBR_NO64BIT)
4695 clr |= BRIDGE_DEV_DEV_SIZE;
4701 s = pcibr_lock(pcibr_soft);
4702 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
4703 devreg = (devreg & ~clr) | set;
4704 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
4705 bridge_t *bridge = pcibr_soft->bs_base;
4707 if ( IS_PIC_SOFT(pcibr_soft) ) {
4708 bridge->b_device[pciio_slot].reg = devreg;
4709 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4710 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4713 if (io_get_sh_swapper(NASID_GET(bridge))) {
4714 BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
4715 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4716 BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
4718 bridge->b_device[pciio_slot].reg = devreg;
4719 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
4720 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
4724 pcibr_unlock(pcibr_soft, s);
4726 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
4727 "pcibr_device_flags_set: Device(%d): %x\n",
4728 pciio_slot, devreg, device_bits));
4730 printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
4737 * PIC has 16 RBARs per bus; meaning it can have a total of 16 outstanding
4738 * split transactions. If the functions on the bus have requested a total
4739 * of 16 or less, then we can give them what they requested (ie. 100%).
4740 * Otherwise we have make sure each function can get at least one buffer
4741 * and then divide the rest of the buffers up among the functions as ``A
4742 * PERCENTAGE OF WHAT THEY REQUESTED'' (i.e. 0% - 100% of a function's
4743 * pcix_type0_status.max_out_split). This percentage does not include the
4744 * one RBAR that all functions get by default.
4747 pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
4749 /* 'percent_allowed' is the percentage of requested RBARs that functions
4750 * are allowed, ***less the 1 RBAR that all functions get by default***
4752 int percent_allowed;
4754 if (pcibr_soft->bs_pcix_num_funcs) {
4755 if (pcibr_soft->bs_pcix_num_funcs > NUM_RBAR) {
4757 "%lx: Must oversubscribe Read Buffer Attribute Registers"
4758 "(RBAR). Bus has %d RBARs but %d funcs need them.\n",
4759 (unsigned long)pcibr_soft->bs_vhdl, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
4760 percent_allowed = 0;
4762 percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
4763 pcibr_soft->bs_pcix_split_tot);
4765 /* +1 to percentage to solve rounding errors that occur because
4766 * we're not doing fractional math. (ie. ((3 * 66%) / 100) = 1)
4767 * but should be "2" if doing true fractional math. NOTE: Since
4768 * the greatest number of outstanding transactions a function
4769 * can request is 32, this "+1" will always work (i.e. we won't
4770 * accidentally oversubscribe the RBARs because of this rounding
4771 * of the percentage).
4773 percent_allowed=(percent_allowed > 100) ? 100 : percent_allowed+1;
4779 return(percent_allowed);
4782 pciio_provider_t pcibr_provider =
4784 (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
4785 (pciio_piomap_free_f *) pcibr_piomap_free,
4786 (pciio_piomap_addr_f *) pcibr_piomap_addr,
4787 (pciio_piomap_done_f *) pcibr_piomap_done,
4788 (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
4789 (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
4790 (pciio_piospace_free_f *) pcibr_piospace_free,
4792 (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
4793 (pciio_dmamap_free_f *) pcibr_dmamap_free,
4794 (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
4795 (pciio_dmamap_list_f *) pcibr_dmamap_list,
4796 (pciio_dmamap_done_f *) pcibr_dmamap_done,
4797 (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
4798 (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
4799 (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
4800 (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
4801 (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
4803 (pciio_intr_alloc_f *) pcibr_intr_alloc,
4804 (pciio_intr_free_f *) pcibr_intr_free,
4805 (pciio_intr_connect_f *) pcibr_intr_connect,
4806 (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
4807 (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
4809 (pciio_provider_startup_f *) pcibr_provider_startup,
4810 (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
4811 (pciio_reset_f *) pcibr_reset,
4812 (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
4813 (pciio_endian_set_f *) pcibr_endian_set,
4814 (pciio_priority_set_f *) pcibr_priority_set,
4815 (pciio_config_get_f *) pcibr_config_get,
4816 (pciio_config_set_f *) pcibr_config_set,
4818 (pciio_error_devenable_f *) pcibr_error_devenable,
4819 (pciio_error_extract_f *) pcibr_error_extract,
4820 (pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
4821 (pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
4823 (pciio_error_devenable_f *) 0,
4824 (pciio_error_extract_f *) 0,
4825 (pciio_driver_reg_callback_f *) 0,
4826 (pciio_driver_unreg_callback_f *) 0,
4827 #endif /* PIC_LATER */
4828 (pciio_device_unregister_f *) pcibr_device_unregister,
4829 (pciio_dma_enabled_f *) pcibr_dma_enabled,
4833 pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
4835 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
4836 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
4839 return xtalk_dma_enabled(pcibr_soft->bs_conn);
4844 * pcibr_debug() is used to print pcibr debug messages to the console. A
4845 * user enables tracing by setting the following global variables:
4847 * pcibr_debug_mask -Bitmask of what to trace. see pcibr_private.h
4848 * pcibr_debug_module -Module to trace. 'all' means trace all modules
4849 * pcibr_debug_widget -Widget to trace. '-1' means trace all widgets
4850 * pcibr_debug_slot -Slot to trace. '-1' means trace all slots
4852 * 'type' is the type of debugging that the current PCIBR_DEBUG macro is
4853 * tracing. 'vhdl' (which can be NULL) is the vhdl associated with the
4854 * debug statement. If there is a 'vhdl' associated with this debug
4855 * statement, it is parsed to obtain the module, widget, and slot. If the
4856 * globals above match the PCIBR_DEBUG params, then the debug info in the
4857 * parameter 'format' is sent to the console.
4860 pcibr_debug(uint32_t type, devfs_handle_t vhdl, char *format, ...)
4862 char hwpath[MAXDEVNAME] = "\0";
4863 char copy_of_hwpath[MAXDEVNAME];
4864 char *module = "all";
4868 char *strtok_r(char *string, const char *sepset, char **lasts);
4870 if (pcibr_debug_mask & type) {
4872 if (!hwgraph_vertex_name_get(vhdl, hwpath, MAXDEVNAME)) {
4875 if (strcmp(module, pcibr_debug_module)) {
4876 /* strtok_r() wipes out string, use a copy */
4877 (void)strcpy(copy_of_hwpath, hwpath);
4878 cp = strstr(copy_of_hwpath, "/module/");
4881 cp += strlen("/module");
4882 module = strtok_r(cp, "/", &last);
4885 if (pcibr_debug_widget != -1) {
4886 cp = strstr(hwpath, "/xtalk/");
4888 cp += strlen("/xtalk/");
4892 if (pcibr_debug_slot != -1) {
4893 cp = strstr(hwpath, "/pci/");
4895 cp += strlen("/pci/");
4901 if ((vhdl == NULL) ||
4902 (!strcmp(module, pcibr_debug_module) &&
4903 (widget == pcibr_debug_widget) &&
4904 (slot == pcibr_debug_slot))) {
4906 printk("PCIBR_DEBUG<%d>\t: %s :", cpuid(), hwpath);
4908 printk("PCIBR_DEBUG\t: %s :", hwpath);
4911 * Kernel printk translates to this 3 line sequence.
4912 * Since we have a variable length argument list, we
4913 * need to call printk this way rather than directly
4915 va_start(ap, format);