3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <asm/sn/sgi.h>
16 #include <asm/sn/intr.h>
17 #include <asm/sn/sn2/sn_private.h>
18 #include <asm/sn/sn2/shubio.h>
19 #include <asm/sn/iograph.h>
20 #include <asm/sn/invent.h>
21 #include <asm/sn/hcl.h>
22 #include <asm/sn/labelcl.h>
23 #include <asm/sn/pci/bridge.h>
24 #include <asm/sn/xtalk/xtalk_private.h>
25 #include <asm/sn/simulator.h>
28 /* #define XBOW_DEBUG 1 */
29 /* #define DEBUG_ERROR 1 */
33 * Files needed to get the device driver entry points
36 #include <asm/sn/xtalk/xbow.h>
37 #include <asm/sn/xtalk/xtalk.h>
38 #include <asm/sn/xtalk/xswitch.h>
39 #include <asm/sn/xtalk/xwidget.h>
41 #include <asm/sn/prio.h>
42 #include <asm/sn/hcl_util.h>
45 #define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
46 #define DEL(ptr) (kfree(ptr))
49 * This file supports the Xbow chip. Main functions: initializtion,
50 * error handling, and GBR.
54 * each vertex corresponding to an xbow chip
55 * has a "fastinfo" pointer pointing at one
58 typedef struct xbow_soft_s *xbow_soft_t;
61 vertex_hdl_t conn; /* our connection point */
62 vertex_hdl_t vhdl; /* xbow's private vertex */
63 vertex_hdl_t busv; /* the xswitch vertex */
64 xbow_t *base; /* PIO pointer to crossbow chip */
65 char *name; /* hwgraph name */
67 xbow_perf_t xbow_perfcnt[XBOW_PERF_COUNTERS];
68 xbow_perf_link_t xbow_perflink[MAX_XBOW_PORTS];
69 xbow_link_status_t xbow_link_status[MAX_XBOW_PORTS];
70 spinlock_t xbow_perf_lock;
72 widget_cfg_t *wpio[MAX_XBOW_PORTS]; /* cached PIO pointer */
74 /* Bandwidth allocation state. Bandwidth values are for the
75 * destination port since contention happens there.
76 * Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
78 spinlock_t xbow_bw_alloc_lock; /* bw allocation lock */
79 unsigned long long bw_hiwm[MAX_XBOW_PORTS]; /* hiwater mark values */
80 unsigned long long bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
83 #define xbow_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
84 #define xbow_soft_get(v) ((xbow_soft_t)hwgraph_fastinfo_get((v)))
87 * Function Table of Contents
90 void xbow_mlreset(xbow_t *);
91 int xbow_attach(vertex_hdl_t);
93 int xbow_widget_present(xbow_t *, int);
94 static int xbow_link_alive(xbow_t *, int);
95 vertex_hdl_t xbow_widget_lookup(vertex_hdl_t, int);
97 void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
101 void xbow_update_perf_counters(vertex_hdl_t);
102 xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t);
103 int xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
104 xbow_link_status_t *xbow_get_llp_status(vertex_hdl_t);
105 void xbow_update_llp_status(vertex_hdl_t);
107 int xbow_disable_llp_monitor(vertex_hdl_t);
108 int xbow_enable_llp_monitor(vertex_hdl_t);
109 int xbow_prio_bw_alloc(vertex_hdl_t, xwidgetnum_t, xwidgetnum_t,
110 unsigned long long, unsigned long long);
111 static void xbow_setwidint(xtalk_intr_t);
113 xswitch_reset_link_f xbow_reset_link;
115 xswitch_provider_t xbow_provider =
122 xbow_mmap(struct file * file, struct vm_area_struct * vma)
124 unsigned long phys_addr;
127 phys_addr = (unsigned long)file->private_data & ~0xc000000000000000; /* Mask out the Uncache bits */
128 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
129 vma->vm_flags |= VM_RESERVED | VM_IO;
130 error = io_remap_page_range(vma, phys_addr, vma->vm_start,
131 vma->vm_end-vma->vm_start,
137 * This is the file operation table for the pcibr driver.
138 * As each of the functions are implemented, put the
139 * appropriate function name below.
141 struct file_operations xbow_fops = {
142 .owner = THIS_MODULE,
147 * xbow_mlreset: called at mlreset time if the
148 * platform specific code determines that there is
149 * a crossbow in a critical path that must be
150 * functional before the driver would normally get
151 * the device properly set up.
153 * what do we need to do, that the boot prom can
154 * not be counted on to have already done, that is
155 * generic across all platforms using crossbows?
159 xbow_mlreset(xbow_t * xbow)
163 #ifdef XBRIDGE_REGS_SIM
164 /* xbow_set_simulated_regs: sets xbow regs as needed
165 * for powering through the boot
168 xbow_set_simulated_regs(xbow_t *xbow, int port)
173 xbow->xb_link(port).link_status = (1<<31);
175 * and give it a live widget too
177 xbow->xb_link(port).link_aux_status = XB_AUX_STAT_PRESENT;
179 * zero the link control reg
181 xbow->xb_link(port).link_control = 0x0;
183 #endif /* XBRIDGE_REGS_SIM */
186 * xbow_attach: the crosstalk provider has
187 * determined that there is a crossbow widget
188 * present, and has handed us the connection
189 * point for that vertex.
191 * We not only add our own vertex, but add
192 * some "xtalk switch" data to the switch
193 * vertex (at the connect point's parent) if
194 * it does not have any.
199 xbow_attach(vertex_hdl_t conn)
208 xtalk_intr_t intr_hdl;
209 char devnm[MAXDEVNAME], *s;
214 static void xbow_errintr_handler(int, void *, struct pt_regs *);
217 #if DEBUG && ATTACH_DEBUG
218 #if defined(SUPPORT_PRINTING_V_FORMAT)
219 printk("%v: xbow_attach\n", conn);
221 printk("0x%x: xbow_attach\n", conn);
226 * Get a PIO pointer to the base of the crossbow
229 #ifdef XBRIDGE_REGS_SIM
230 printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
231 xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
233 * turn on ports e and f like in a real live ibrick
235 xbow_set_simulated_regs(xbow, 0xe);
236 xbow_set_simulated_regs(xbow, 0xf);
238 xbow = (xbow_t *) xtalk_piotrans_addr(conn, 0, 0, sizeof(xbow_t), 0);
239 #endif /* XBRIDGE_REGS_SIM */
242 * Locate the "switch" vertex: it is the parent
243 * of our connection point.
245 busv = hwgraph_connectpt_get(conn);
246 #if DEBUG && ATTACH_DEBUG
247 printk("xbow_attach: Bus Vertex 0x%p, conn 0x%p, xbow register 0x%p wid= 0x%x\n", busv, conn, xbow, *(volatile u32 *)xbow);
250 ASSERT(busv != GRAPH_VERTEX_NONE);
253 * Create our private vertex, and connect our
254 * driver information to it. This makes it possible
255 * for diagnostic drivers to open the crossbow
256 * vertex for access to registers.
260 * Register a xbow driver with devfs.
264 vhdl = hwgraph_register(conn, EDGE_LBL_XBOW, 0,
266 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
267 (struct file_operations *)&xbow_fops, (void *)xbow);
269 printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
274 * Allocate the soft state structure and attach
275 * it to the xbow's vertex
282 /* does the universe really need another macro? */
283 /* xbow_soft_set(vhdl, (arbitrary_info_t) soft); */
284 /* hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) soft); */
286 #define XBOW_NUM_SUFFIX_FORMAT "[xbow# %d]"
288 /* Add xbow number as a suffix to the hwgraph name of the xbow.
289 * This is helpful while looking at the error/warning messages.
294 * get the name of this xbow vertex and keep the info.
295 * This is needed during errors and interupts, but as
296 * long as we have it, we can use it elsewhere.
298 s = dev_to_name(vhdl, devnm, MAXDEVNAME);
299 soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1,
301 sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
303 #ifdef XBRIDGE_REGS_SIM
304 /* my o200/ibrick has id=0x2d002049, but XXBOW_WIDGET_PART_NUM is defined
305 * as 0xd000, so I'm using that for the partnum bitfield.
307 printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: need xb_wid_id value!!\n");
310 id = xbow->xb_wid_id;
311 #endif /* XBRIDGE_REGS_SIM */
312 rev = XWIDGET_PART_REV_NUM(id);
314 mutex_spinlock_init(&soft->xbow_perf_lock);
315 soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
316 soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
318 /* Initialization for GBR bw allocation */
319 mutex_spinlock_init(&soft->xbow_bw_alloc_lock);
321 #define XBOW_8_BIT_PORT_BW_MAX (400 * 1000 * 1000) /* 400 MB/s */
322 #define XBOW_16_BIT_PORT_BW_MAX (800 * 1000 * 1000) /* 800 MB/s */
324 /* Set bandwidth hiwatermark and current values */
325 for (i = 0; i < MAX_XBOW_PORTS; i++) {
326 soft->bw_hiwm[i] = XBOW_16_BIT_PORT_BW_MAX; /* for now */
327 soft->bw_cur_used[i] = 0;
331 * attach the crossbow error interrupt.
333 intr_hdl = xtalk_intr_alloc(conn, (device_desc_t)0, vhdl);
334 ASSERT(intr_hdl != NULL);
337 int irq = ((hub_intr_t)intr_hdl)->i_bit;
338 int cpu = ((hub_intr_t)intr_hdl)->i_cpuid;
340 intr_unreserve_level(cpu, irq);
341 ((hub_intr_t)intr_hdl)->i_bit = SGI_XBOW_ERROR;
344 xtalk_intr_connect(intr_hdl,
345 (intr_func_t) xbow_errintr_handler,
347 (xtalk_intr_setfunc_t) xbow_setwidint,
350 request_irq(SGI_XBOW_ERROR, (void *)xbow_errintr_handler, SA_SHIRQ, "XBOW error",
355 * Enable xbow error interrupts
357 xbow->xb_wid_control = (XB_WID_CTRL_REG_ACC_IE | XB_WID_CTRL_XTALK_IE);
360 * take a census of the widgets present,
361 * leaving notes at the switch vertex.
363 info = xswitch_info_new(busv);
365 for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
366 port < MAX_PORT_NUM; ++port) {
367 if (!xbow_link_alive(xbow, port)) {
368 #if DEBUG && XBOW_DEBUG
369 printk(KERN_INFO "0x%p link %d is not alive\n",
374 if (!xbow_widget_present(xbow, port)) {
375 #if DEBUG && XBOW_DEBUG
376 printk(KERN_INFO "0x%p link %d is alive but no widget is present\n", (void *)busv, port);
380 #if DEBUG && XBOW_DEBUG
381 printk(KERN_INFO "0x%p link %d has a widget\n",
385 xswitch_info_link_is_ok(info, port);
387 * Turn some error interrupts on
388 * and turn others off. The PROM has
389 * some things turned on we don't
390 * want to see (bandwidth allocation
391 * errors for instance); so if it
392 * is not listed here, it is not on.
394 xbow->xb_link(port).link_control =
395 ( (xbow->xb_link(port).link_control
397 * Turn off these bits; they are non-fatal,
398 * but we might want to save some statistics
399 * on the frequency of these errors.
402 & ~XB_CTRL_RCV_CNT_OFLOW_IE
403 & ~XB_CTRL_XMT_CNT_OFLOW_IE
404 & ~XB_CTRL_BNDWDTH_ALLOC_IE
407 * These are the ones we want to turn on.
409 | (XB_CTRL_ILLEGAL_DST_IE
410 | XB_CTRL_OALLOC_IBUF_IE
411 | XB_CTRL_XMT_MAX_RTRY_IE
412 | XB_CTRL_MAXREQ_TOUT_IE
413 | XB_CTRL_XMT_RTRY_IE
414 | XB_CTRL_SRC_TOUT_IE) );
417 xswitch_provider_register(busv, &xbow_provider);
419 return 0; /* attach successful */
422 /* This contains special-case code for grio. There are plans to make
423 * this general sometime in the future, but till then this should
427 xbow_widget_num_get(vertex_hdl_t dev)
430 char devname[MAXDEVNAME];
431 xwidget_info_t xwidget_info;
434 vertex_to_name(dev, devname, MAXDEVNAME);
436 /* If this is a pci controller vertex, traverse up using
437 * the ".." links to get to the widget.
439 if (strstr(devname, EDGE_LBL_PCI) &&
440 strstr(devname, EDGE_LBL_CONTROLLER)) {
442 for (i=0; i< 2; i++) {
443 if (hwgraph_edge_get(tdev,
444 HWGRAPH_EDGELBL_DOTDOT, &tdev) !=
449 if ((xwidget_info = xwidget_info_chk(tdev)) != NULL) {
450 return (xwidget_info_id_get(xwidget_info));
460 * xbow_widget_present: See if a device is present
461 * on the specified port of this crossbow.
464 xbow_widget_present(xbow_t *xbow, int port)
466 if ( IS_RUNNING_ON_SIMULATOR() ) {
467 if ( (port == 14) || (port == 15) ) {
475 /* WAR: port 0xf on PIC is missing present bit */
476 if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
477 IS_PIC_XBOW(xbow->xb_wid_id) && port==0xf) {
480 return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
485 xbow_link_alive(xbow_t * xbow, int port)
487 xbwX_stat_t xbow_linkstat;
489 xbow_linkstat.linkstatus = xbow->xb_link(port).link_status;
490 return (xbow_linkstat.link_alive);
495 * Lookup the edges connected to the xbow specified, and
496 * retrieve the handle corresponding to the widgetnum
498 * If not found, return 0.
501 xbow_widget_lookup(vertex_hdl_t vhdl,
504 xswitch_info_t xswitch_info;
507 xswitch_info = xswitch_info_get(vhdl);
508 conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
513 * xbow_setwidint: called when xtalk
514 * is establishing or migrating our
518 xbow_setwidint(xtalk_intr_t intr)
520 xwidgetnum_t targ = xtalk_intr_target_get(intr);
521 iopaddr_t addr = xtalk_intr_addr_get(intr);
522 xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
523 xbow_t *xbow = (xbow_t *) xtalk_intr_sfarg_get(intr);
525 xbow_intr_preset((void *) xbow, 0, targ, addr, vect);
529 * xbow_intr_preset: called during mlreset time
530 * if the platform specific code needs to route
531 * an xbow interrupt before the xtalk infrastructure
532 * is available for use.
534 * Also called from xbow_setwidint, so we don't
535 * replicate the guts of the routine.
537 * XXX- probably should be renamed xbow_wid_intr_set or
538 * something to reduce confusion.
542 xbow_intr_preset(void *which_widget,
543 int which_widget_intr,
546 xtalk_intr_vector_t vect)
548 xbow_t *xbow = (xbow_t *) which_widget;
550 xbow->xb_wid_int_upper = ((0xFF000000 & (vect << 24)) |
551 (0x000F0000 & (targ << 16)) |
552 XTALK_ADDR_TO_UPPER(addr));
553 xbow->xb_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
557 #define XEM_ADD_STR(s) printk("%s", (s))
558 #define XEM_ADD_NVAR(n,v) printk("\t%20s: 0x%llx\n", (n), ((unsigned long long)v))
559 #define XEM_ADD_VAR(v) XEM_ADD_NVAR(#v,(v))
560 #define XEM_ADD_IOEF(p,n) if (IOERROR_FIELDVALID(ioe,n)) { \
561 IOERROR_GETVALUE(p,ioe,n); \
562 XEM_ADD_NVAR("ioe." #n, p); \
565 int xbow_xmit_retry_errors;
568 xbow_xmit_retry_error(xbow_soft_t soft,
578 wid = soft->wpio[port - BASE_XBOW_PORT];
580 /* If we can't track down a PIO
581 * pointer to our widget yet,
582 * leave our caller knowing that
583 * we are interested in this
584 * interrupt if it occurs in
587 info = xswitch_info_get(soft->busv);
590 vhdl = xswitch_info_vhdl_get(info, port);
591 if (vhdl == GRAPH_VERTEX_NONE)
593 wid = (widget_cfg_t *) xtalk_piotrans_addr
594 (vhdl, 0, 0, sizeof *wid, 0);
597 soft->wpio[port - BASE_XBOW_PORT] = wid;
600 part = XWIDGET_PART_NUM(id);
601 mfgr = XWIDGET_MFG_NUM(id);
603 /* If this thing is not a Bridge,
604 * do not activate the WAR, and
605 * tell our caller we do not need
606 * to be called again.
608 if ((part != BRIDGE_WIDGET_PART_NUM) ||
609 (mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
610 /* FIXME: add Xbridge to the WAR.
611 * Shouldn't hurt anything. Later need to
612 * check if we can remove this.
614 if ((part != XBRIDGE_WIDGET_PART_NUM) ||
615 (mfgr != XBRIDGE_WIDGET_MFGR_NUM))
619 /* count how many times we
620 * have picked up after
621 * LLP Transmit problems.
623 xbow_xmit_retry_errors++;
625 /* rewrite the control register
628 wid->w_control = wid->w_control;
635 * xbow_errintr_handler will be called if the xbow
636 * sends an interrupt request to report an error.
639 xbow_errintr_handler(int irq, void *arg, struct pt_regs *ep)
642 xbow_soft_t soft = (xbow_soft_t) arg;
643 xbow_t *xbow = soft->base;
644 xbowreg_t wid_control;
646 xbowreg_t wid_err_cmdword;
647 xbowreg_t wid_err_upper;
648 xbowreg_t wid_err_lower;
649 w_err_cmd_word_u wid_err;
650 unsigned long long wid_err_addr;
654 static int xbow_error_handler(void *, int, ioerror_mode_t, ioerror_t *);
656 wid_control = xbow->xb_wid_control;
657 wid_stat = xbow->xb_wid_stat_clr;
658 wid_err_cmdword = xbow->xb_wid_err_cmdword;
659 wid_err_upper = xbow->xb_wid_err_upper;
660 wid_err_lower = xbow->xb_wid_err_lower;
661 xbow->xb_wid_err_cmdword = 0;
663 wid_err_addr = wid_err_lower | (((iopaddr_t) wid_err_upper & WIDGET_ERR_UPPER_ADDR_ONLY) << 32);
665 if (wid_stat & XB_WID_STAT_LINK_INTR_MASK) {
668 wid_err.r = wid_err_cmdword;
670 for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
671 port < MAX_PORT_NUM; port++) {
672 if (wid_stat & XB_WID_STAT_LINK_INTR(port)) {
673 xb_linkregs_t *link = &(xbow->xb_link(port));
674 xbowreg_t link_control = link->link_control;
675 xbowreg_t link_status = link->link_status_clr;
676 xbowreg_t link_aux_status = link->link_aux_status;
679 link_pend = link_status & link_control &
680 (XB_STAT_ILLEGAL_DST_ERR
681 | XB_STAT_OALLOC_IBUF_ERR
682 | XB_STAT_RCV_CNT_OFLOW_ERR
683 | XB_STAT_XMT_CNT_OFLOW_ERR
684 | XB_STAT_XMT_MAX_RTRY_ERR
686 | XB_STAT_XMT_RTRY_ERR
687 | XB_STAT_MAXREQ_TOUT_ERR
688 | XB_STAT_SRC_TOUT_ERR
691 if (link_pend & XB_STAT_ILLEGAL_DST_ERR) {
692 if (wid_err.f.sidn == port) {
694 IOERROR_SETVALUE(ioe, widgetnum, port);
695 IOERROR_SETVALUE(ioe, xtalkaddr, wid_err_addr);
696 if (IOERROR_HANDLED ==
697 xbow_error_handler(soft,
701 link_pend &= ~XB_STAT_ILLEGAL_DST_ERR;
708 * if the bridge signals an LLP Transmitter Retry,
709 * rewrite its control register.
710 * If someone else triggers this interrupt,
711 * ignore (and disable) the interrupt.
713 if (link_pend & XB_STAT_XMT_RTRY_ERR) {
714 if (!xbow_xmit_retry_error(soft, port)) {
715 link_control &= ~XB_CTRL_XMT_RTRY_IE;
716 link->link_control = link_control;
717 link->link_control; /* stall until written */
719 link_pend &= ~XB_STAT_XMT_RTRY_ERR;
722 vertex_hdl_t xwidget_vhdl;
725 /* Get the widget name corresponding to the current
728 xwidget_vhdl = xbow_widget_lookup(soft->busv,port);
729 xwidget_name = xwidget_name_get(xwidget_vhdl);
731 printk("%s port %X[%s] XIO Bus Error",
732 soft->name, port, xwidget_name);
733 if (link_status & XB_STAT_MULTI_ERR)
734 XEM_ADD_STR("\tMultiple Errors\n");
735 if (link_status & XB_STAT_ILLEGAL_DST_ERR)
736 XEM_ADD_STR("\tInvalid Packet Destination\n");
737 if (link_status & XB_STAT_OALLOC_IBUF_ERR)
738 XEM_ADD_STR("\tInput Overallocation Error\n");
739 if (link_status & XB_STAT_RCV_CNT_OFLOW_ERR)
740 XEM_ADD_STR("\tLLP receive error counter overflow\n");
741 if (link_status & XB_STAT_XMT_CNT_OFLOW_ERR)
742 XEM_ADD_STR("\tLLP transmit retry counter overflow\n");
743 if (link_status & XB_STAT_XMT_MAX_RTRY_ERR)
744 XEM_ADD_STR("\tLLP Max Transmitter Retry\n");
745 if (link_status & XB_STAT_RCV_ERR)
746 XEM_ADD_STR("\tLLP Receiver error\n");
747 if (link_status & XB_STAT_XMT_RTRY_ERR)
748 XEM_ADD_STR("\tLLP Transmitter Retry\n");
749 if (link_status & XB_STAT_MAXREQ_TOUT_ERR)
750 XEM_ADD_STR("\tMaximum Request Timeout\n");
751 if (link_status & XB_STAT_SRC_TOUT_ERR)
752 XEM_ADD_STR("\tSource Timeout Error\n");
757 for (other_port = 8; other_port < 16; ++other_port) {
758 if (link_aux_status & (1 << other_port)) {
759 /* XXX- need to go to "other_port"
760 * and clean up after the timeout?
762 XEM_ADD_VAR(other_port);
770 XEM_ADD_VAR(link_control);
771 XEM_ADD_VAR(link_status);
772 XEM_ADD_VAR(link_aux_status);
782 if (wid_stat & wid_control & XB_WID_STAT_WIDGET0_INTR) {
783 /* we have a "widget zero" problem */
785 if (wid_stat & (XB_WID_STAT_MULTI_ERR
786 | XB_WID_STAT_XTALK_ERR
787 | XB_WID_STAT_REG_ACC_ERR)) {
789 printk("%s Port 0 XIO Bus Error",
791 if (wid_stat & XB_WID_STAT_MULTI_ERR)
792 XEM_ADD_STR("\tMultiple Error\n");
793 if (wid_stat & XB_WID_STAT_XTALK_ERR)
794 XEM_ADD_STR("\tXIO Error\n");
795 if (wid_stat & XB_WID_STAT_REG_ACC_ERR)
796 XEM_ADD_STR("\tRegister Access Error\n");
802 XEM_ADD_VAR(wid_stat);
803 XEM_ADD_VAR(wid_control);
804 XEM_ADD_VAR(wid_err_cmdword);
805 XEM_ADD_VAR(wid_err_upper);
806 XEM_ADD_VAR(wid_err_lower);
807 XEM_ADD_VAR(wid_err_addr);
808 PRINT_PANIC("XIO Bus Error");
813 * XBOW ERROR Handling routines.
814 * These get invoked as part of walking down the error handling path
815 * from hub/heart towards the I/O device that caused the error.
820 * XBow error handling dispatch routine.
821 * This is the primary interface used by external world to invoke
822 * in case of an error related to a xbow.
823 * Only functionality in this layer is to identify the widget handle
824 * given the widgetnum. Otherwise, xbow does not gathers any error
834 int retval = IOERROR_WIDGETLEVEL;
836 xbow_soft_t soft = (xbow_soft_t) einfo;
841 xbow_t *xbow = soft->base;
843 xbowreg_t wid_err_cmdword;
844 xbowreg_t wid_err_upper;
845 xbowreg_t wid_err_lower;
846 unsigned long long wid_err_addr;
849 xbowreg_t link_control;
850 xbowreg_t link_status;
851 xbowreg_t link_aux_status;
856 #if DEBUG && ERROR_DEBUG
857 printk("%s: xbow_error_handler\n", soft->name, busv);
860 IOERROR_GETVALUE(port, ioerror, widgetnum);
863 /* error during access to xbow:
864 * do NOT attempt to access xbow regs.
866 if (mode == MODE_DEVPROBE)
867 return IOERROR_HANDLED;
869 if (error_code & IOECODE_DMA) {
871 "DMA error blamed on Crossbow at %s\n"
872 "\tbut Crosbow never initiates DMA!",
875 if (error_code & IOECODE_PIO) {
877 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
878 printk(KERN_ALERT "PIO Error on XIO Bus %s\n"
879 "\tattempting to access XIO controller\n"
880 "\twith offset 0x%lx",
883 /* caller will dump contents of ioerror
884 * in DEBUG and kdebug kernels.
890 * error not on port zero:
891 * safe to read xbow registers.
893 wid_stat = xbow->xb_wid_stat;
894 wid_err_cmdword = xbow->xb_wid_err_cmdword;
895 wid_err_upper = xbow->xb_wid_err_upper;
896 wid_err_lower = xbow->xb_wid_err_lower;
900 | (((iopaddr_t) wid_err_upper
901 & WIDGET_ERR_UPPER_ADDR_ONLY)
904 if ((port < BASE_XBOW_PORT) ||
905 (port >= MAX_PORT_NUM)) {
907 if (mode == MODE_DEVPROBE)
908 return IOERROR_HANDLED;
910 if (error_code & IOECODE_DMA) {
912 "DMA error blamed on XIO port at %s/%d\n"
913 "\tbut Crossbow does not support that port",
916 if (error_code & IOECODE_PIO) {
918 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
920 "PIO Error on XIO Bus %s\n"
921 "\tattempting to access XIO port %d\n"
922 "\t(which Crossbow does not support)"
923 "\twith offset 0x%lx",
924 soft->name, port, tmp);
929 XEM_ADD_STR("Raw status values for Crossbow:\n");
930 XEM_ADD_VAR(wid_stat);
931 XEM_ADD_VAR(wid_err_cmdword);
932 XEM_ADD_VAR(wid_err_upper);
933 XEM_ADD_VAR(wid_err_lower);
934 XEM_ADD_VAR(wid_err_addr);
939 /* caller will dump contents of ioerror
940 * in DEBUG and kdebug kernels.
945 /* access to valid port:
946 * ok to check port status.
949 link = &(xbow->xb_link(port));
950 link_control = link->link_control;
951 link_status = link->link_status;
952 link_aux_status = link->link_aux_status;
954 /* Check that there is something present
957 /* WAR: PIC widget 0xf is missing prescense bit */
958 if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
959 IS_PIC_XBOW(xbow->xb_wid_id) && (port==0xf))
962 if (!(link_aux_status & XB_AUX_STAT_PRESENT)) {
963 /* nobody connected. */
964 if (mode == MODE_DEVPROBE)
965 return IOERROR_HANDLED;
967 if (error_code & IOECODE_DMA) {
969 "DMA error blamed on XIO port at %s/%d\n"
970 "\tbut there is no device connected there.",
973 if (error_code & IOECODE_PIO) {
975 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
977 "PIO Error on XIO Bus %s\n"
978 "\tattempting to access XIO port %d\n"
979 "\t(which has no device connected)"
980 "\twith offset 0x%lx",
981 soft->name, port, tmp);
986 XEM_ADD_STR("Raw status values for Crossbow:\n");
987 XEM_ADD_VAR(wid_stat);
988 XEM_ADD_VAR(wid_err_cmdword);
989 XEM_ADD_VAR(wid_err_upper);
990 XEM_ADD_VAR(wid_err_lower);
991 XEM_ADD_VAR(wid_err_addr);
993 XEM_ADD_VAR(link_control);
994 XEM_ADD_VAR(link_status);
995 XEM_ADD_VAR(link_aux_status);
1002 /* Check that the link is alive.
1004 if (!(link_status & XB_STAT_LINKALIVE)) {
1006 /* nobody connected. */
1007 if (mode == MODE_DEVPROBE)
1008 return IOERROR_HANDLED;
1011 "%s%sError on XIO Bus %s port %d",
1012 (error_code & IOECODE_DMA) ? "DMA " : "",
1013 (error_code & IOECODE_PIO) ? "PIO " : "",
1016 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
1017 if ((error_code & IOECODE_PIO) &&
1018 (IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
1019 printk("\tAccess attempted to offset 0x%lx\n", tmp);
1021 if (link_aux_status & XB_AUX_LINKFAIL_RST_BAD)
1022 XEM_ADD_STR("\tLink never came out of reset\n");
1024 XEM_ADD_STR("\tLink failed while transferring data\n");
1027 /* get the connection point for the widget
1028 * involved in this error; if it exists and
1029 * is not our connectpoint, cycle back through
1030 * xtalk_error_handler to deliver control to
1031 * the proper handler (or to report a generic
1034 * If the downstream handler won't handle
1035 * the problem, we let our upstream caller
1036 * deal with it, after (in DEBUG and kdebug
1037 * kernels) dumping the xbow state for this
1040 conn = xbow_widget_lookup(busv, port);
1041 if ((conn != GRAPH_VERTEX_NONE) &&
1042 (conn != soft->conn)) {
1043 retval = xtalk_error_handler(conn, error_code, mode, ioerror);
1044 if (retval == IOERROR_HANDLED)
1045 return IOERROR_HANDLED;
1047 if (mode == MODE_DEVPROBE)
1048 return IOERROR_HANDLED;
1050 if (retval == IOERROR_UNHANDLED) {
1052 retval = IOERROR_PANIC;
1055 "%s%sError on XIO Bus %s port %d",
1056 (error_code & IOECODE_DMA) ? "DMA " : "",
1057 (error_code & IOECODE_PIO) ? "PIO " : "",
1060 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
1061 if ((error_code & IOECODE_PIO) &&
1062 (IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
1063 printk("\tAccess attempted to offset 0x%lx\n", tmp);
1070 XEM_ADD_STR("Raw status values for Crossbow:\n");
1071 XEM_ADD_VAR(wid_stat);
1072 XEM_ADD_VAR(wid_err_cmdword);
1073 XEM_ADD_VAR(wid_err_upper);
1074 XEM_ADD_VAR(wid_err_lower);
1075 XEM_ADD_VAR(wid_err_addr);
1077 XEM_ADD_VAR(link_control);
1078 XEM_ADD_VAR(link_status);
1079 XEM_ADD_VAR(link_aux_status);
1083 /* caller will dump raw ioerror data
1084 * in DEBUG and kdebug kernels.
1091 xbow_update_perf_counters(vertex_hdl_t vhdl)
1093 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1094 xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
1095 xbow_perf_link_t *xbow_plink = xbow_soft->xbow_perflink;
1096 xbow_perfcount_t perf_reg;
1100 for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
1101 if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
1104 s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
1106 perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
1108 link = perf_reg.xb_perf.link_select;
1110 (xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
1111 ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
1112 xbow_perf->xp_current = perf_reg.xb_perf.count;
1114 mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
1119 xbow_get_perf_counters(vertex_hdl_t vhdl)
1121 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1122 xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
1124 return xbow_perf_link;
1128 xbow_enable_perf_counter(vertex_hdl_t vhdl, int link, int mode, int counter)
1130 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1131 xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
1132 xbow_linkctrl_t xbow_link_ctrl;
1133 xbow_t *xbow = xbow_soft->base;
1134 xbow_perfcount_t perf_reg;
1138 link -= BASE_XBOW_PORT;
1139 if ((link < 0) || (link >= MAX_XBOW_PORTS))
1142 if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
1145 if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
1148 s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
1150 if ((xbow_perf + counter)->xp_mode && mode) {
1151 mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
1154 for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
1157 if (((xbow_perf + i)->xp_link == link) &&
1158 ((xbow_perf + i)->xp_mode)) {
1159 mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
1163 xbow_perf += counter;
1165 xbow_perf->xp_curlink = xbow_perf->xp_link = link;
1166 xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
1168 xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
1169 xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
1170 xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
1172 perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
1173 perf_reg.xb_perf.link_select = link;
1174 *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
1175 xbow_perf->xp_current = perf_reg.xb_perf.count;
1177 mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
1181 xbow_link_status_t *
1182 xbow_get_llp_status(vertex_hdl_t vhdl)
1184 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1185 xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
1187 return xbow_llp_status;
1191 xbow_update_llp_status(vertex_hdl_t vhdl)
1193 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1194 xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
1196 xbwX_stat_t lnk_sts;
1197 xbow_aux_link_status_t aux_sts;
1199 vertex_hdl_t xwidget_vhdl;
1202 xbow = (xbow_t *) xbow_soft->base;
1203 for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
1204 /* Get the widget name corresponding the current link.
1205 * Note : 0 <= link < MAX_XBOW_PORTS(8).
1206 * BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
1208 xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
1209 xwidget_name = xwidget_name_get(xwidget_vhdl);
1210 aux_sts.aux_linkstatus
1211 = xbow->xb_link_raw[link].link_aux_status;
1212 lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
1214 if (lnk_sts.link_alive == 0)
1217 xbow_llp_status->rx_err_count +=
1218 aux_sts.xb_aux_linkstatus.rx_err_cnt;
1220 xbow_llp_status->tx_retry_count +=
1221 aux_sts.xb_aux_linkstatus.tx_retry_cnt;
1223 if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
1225 printk(KERN_WARNING "link %d[%s]: bad status 0x%x\n",
1226 link, xwidget_name, lnk_sts.linkstatus);
1233 xbow_disable_llp_monitor(vertex_hdl_t vhdl)
1235 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1238 for (port = 0; port < MAX_XBOW_PORTS; port++) {
1239 xbow_soft->xbow_link_status[port].rx_err_count = 0;
1240 xbow_soft->xbow_link_status[port].tx_retry_count = 0;
1243 xbow_soft->link_monitor = 0;
1248 xbow_enable_llp_monitor(vertex_hdl_t vhdl)
1250 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1252 xbow_soft->link_monitor = 1;
1258 xbow_reset_link(vertex_hdl_t xconn_vhdl)
1260 xwidget_info_t widget_info;
1267 static int ticks_per_ms = 0;
1269 if (!ticks_per_ms) {
1270 itick = get_timestamp();
1272 ticks_per_ms = get_timestamp() - itick;
1274 widget_info = xwidget_info_get(xconn_vhdl);
1275 port = xwidget_info_id_get(widget_info);
1277 #ifdef XBOW_K1PTR /* defined if we only have one xbow ... */
1281 vertex_hdl_t xbow_vhdl;
1282 xbow_soft_t xbow_soft;
1284 hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
1285 xbow_soft = xbow_soft_get(xbow_vhdl);
1286 xbow = xbow_soft->base;
1291 * This requires three PIOs (reset the link, check for the
1292 * reset, restore the control register for the link) plus
1293 * 10us to wait for the reset. We allow up to 1ms for the
1294 * widget to come out of reset before giving up and
1295 * returning a failure.
1297 ctrl = xbow->xb_link(port).link_control;
1298 xbow->xb_link(port).link_reset = 0;
1299 itick = get_timestamp();
1301 stat.linkstatus = xbow->xb_link(port).link_status;
1302 if (stat.link_alive)
1304 dtick = get_timestamp() - itick;
1305 if (dtick > ticks_per_ms) {
1306 return -1; /* never came out of reset */
1308 DELAY(2); /* don't beat on link_status */
1310 xbow->xb_link(port).link_control = ctrl;
1314 #define XBOW_ARB_RELOAD_TICKS 25
1315 /* granularity: 4 MB/s, max: 124 MB/s */
1316 #define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
1318 #define XBOW_BYTES_TO_GBR(BYTES_per_s) (int) (BYTES_per_s / GRANULARITY)
1320 #define XBOW_GBR_TO_BYTES(cnt) (bandwidth_t) ((cnt) * GRANULARITY)
1322 #define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec) \
1323 ((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
1325 #define XBOW_ARB_GBR_MAX 31
1327 #define ABS(x) ((x > 0) ? (x) : (-1 * x))
1328 /* absolute value */
1331 xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
1336 bandwidth_t new_total_bw;
1339 printk("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
1340 old_bytes_per_sec, bytes_per_sec);
1341 #endif /* GRIO_DEBUG */
1343 gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
1345 new_total_bw = old_bytes_per_sec + bytes_per_sec;
1346 new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
1349 change_gbr = new_total_gbr - gbr_granted;
1352 printk("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
1353 gbr_granted, new_total_gbr, change_gbr);
1354 #endif /* GRIO_DEBUG */
1356 return (change_gbr);
1359 /* Conversion from GBR to bytes */
1361 xbow_gbr_to_bytes(int gbr)
1363 return (XBOW_GBR_TO_BYTES(gbr));
1366 /* Given the vhdl for the desired xbow, the src and dest. widget ids
1367 * and the req_bw value, this xbow driver entry point accesses the
1368 * xbow registers and allocates the desired bandwidth if available.
1370 * If bandwidth allocation is successful, return success else return failure.
1373 xbow_prio_bw_alloc(vertex_hdl_t vhdl,
1374 xwidgetnum_t src_wid,
1375 xwidgetnum_t dest_wid,
1376 unsigned long long old_alloc_bw,
1377 unsigned long long req_bw)
1379 xbow_soft_t soft = xbow_soft_get(vhdl);
1380 volatile xbowreg_t *xreg;
1384 bandwidth_t old_bw_BYTES, req_bw_BYTES;
1386 int old_bw_GBR, req_bw_GBR, new_bw_GBR;
1389 printk("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
1390 (int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
1393 ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
1394 ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
1396 s = mutex_spinlock(&soft->xbow_bw_alloc_lock);
1398 /* Get pointer to the correct register */
1399 xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
1401 /* Get mask for GBR count value */
1402 mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
1404 req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
1405 req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
1406 : xbow_gbr_to_bytes(req_bw_GBR);
1409 printk("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
1410 req_bw, req_bw_BYTES, req_bw_GBR);
1411 #endif /* GRIO_DEBUG */
1413 old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
1415 old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
1418 ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
1420 printk("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
1422 printk("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
1423 req_bw_BYTES, old_bw_BYTES,
1424 soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
1426 #endif /* GRIO_DEBUG */
1428 /* Accept the request only if we don't exceed the destination
1429 * port HIWATER_MARK *AND* the max. link GBR arbitration count
1431 if (((old_bw_BYTES + req_bw_BYTES) <=
1432 soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
1433 (req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
1435 new_bw_GBR = (old_bw_GBR + req_bw_GBR);
1437 /* Set this in the xbow link register */
1438 *xreg = (old_xreg & ~mask) | \
1439 (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
1441 soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
1442 xbow_gbr_to_bytes(new_bw_GBR);
1447 mutex_spinunlock(&soft->xbow_bw_alloc_lock, s);