1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
7 *******************************************************************************
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
41 *******************************************************************************/
43 #include <linux/version.h>
44 #include <linux/module.h>
45 #include <linux/kernel.h>
47 #include <linux/pci.h>
48 #include <linux/errno.h>
49 #include <linux/atm.h>
50 #include <linux/atmdev.h>
51 #include <linux/sonet.h>
52 #include <linux/skbuff.h>
53 #include <linux/time.h>
54 #include <linux/delay.h>
55 #include <linux/uio.h>
56 #include <linux/init.h>
57 #include <asm/system.h>
59 #include <asm/atomic.h>
60 #include <asm/uaccess.h>
61 #include <asm/string.h>
62 #include <asm/byteorder.h>
63 #include <linux/vmalloc.h>
66 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
68 struct k_sonet_stats sonet_stats; /* link diagnostics */
69 unsigned char loop_mode; /* loopback mode */
70 struct atm_dev *dev; /* device back-pointer */
71 struct suni_priv *next; /* next SUNI */
73 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
75 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
77 static IADEV *ia_dev[8];
78 static struct atm_dev *_ia_dev[8];
79 static int iadev_count;
80 static void ia_led_timer(unsigned long arg);
81 static struct timer_list ia_timer = TIMER_INITIALIZER(ia_led_timer, 0, 0);
82 struct atm_vcc *vcc_close_que[100];
83 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
84 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
85 static u32 IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
86 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
88 MODULE_PARM(IA_TX_BUF, "i");
89 MODULE_PARM(IA_TX_BUF_SZ, "i");
90 MODULE_PARM(IA_RX_BUF, "i");
91 MODULE_PARM(IA_RX_BUF_SZ, "i");
92 MODULE_PARM(IADebugFlag, "i");
94 MODULE_LICENSE("GPL");
96 #if BITS_PER_LONG != 32
97 # error FIXME: this driver only works on 32-bit platforms
100 /**************************** IA_LIB **********************************/
102 static void ia_init_rtn_q (IARTN_Q *que)
108 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
111 if (que->next == NULL)
112 que->next = que->tail = data;
114 data->next = que->next;
120 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
121 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122 if (!entry) return -1;
125 if (que->next == NULL)
126 que->next = que->tail = entry;
128 que->tail->next = entry;
129 que->tail = que->tail->next;
134 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
136 if (que->next == NULL)
139 if ( que->next == que->tail)
140 que->next = que->tail = NULL;
142 que->next = que->next->next;
146 static void ia_hack_tcq(IADEV *dev) {
150 struct ia_vcc *iavcc_r = NULL;
151 extern void desc_dbg(IADEV *iadev);
153 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
154 while (dev->host_tcq_wr != tcq_wr) {
155 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
157 else if (!dev->desc_tbl[desc1 -1].timestamp) {
158 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
159 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
161 else if (dev->desc_tbl[desc1 -1].timestamp) {
162 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
163 printk("IA: Fatal err in get_desc\n");
166 iavcc_r->vc_desc_cnt--;
167 dev->desc_tbl[desc1 -1].timestamp = 0;
168 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
169 (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
170 if (iavcc_r->pcr < dev->rate_limit) {
171 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
172 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
173 printk("ia_hack_tcq: No memory available\n");
175 dev->desc_tbl[desc1 -1].iavcc = NULL;
176 dev->desc_tbl[desc1 -1].txskb = NULL;
178 dev->host_tcq_wr += 2;
179 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
180 dev->host_tcq_wr = dev->ffL.tcq_st;
184 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
187 struct ia_vcc *iavcc_r = NULL;
189 static unsigned long timer = 0;
191 extern void desc_dbg(IADEV *iadev);
194 if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){
197 while (i < dev->num_tx_desc) {
198 if (!dev->desc_tbl[i].timestamp) {
202 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
203 delta = jiffies - dev->desc_tbl[i].timestamp;
204 if (delta >= ltimeout) {
205 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
206 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
207 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
209 dev->ffL.tcq_rd -= 2;
210 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
211 if (!(skb = dev->desc_tbl[i].txskb) ||
212 !(iavcc_r = dev->desc_tbl[i].iavcc))
213 printk("Fatal err, desc table vcc or skb is NULL\n");
215 iavcc_r->vc_desc_cnt--;
216 dev->desc_tbl[i].timestamp = 0;
217 dev->desc_tbl[i].iavcc = NULL;
218 dev->desc_tbl[i].txskb = NULL;
223 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
226 /* Get the next available descriptor number from TCQ */
227 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
230 dev->ffL.tcq_rd += 2;
231 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
232 dev->ffL.tcq_rd = dev->ffL.tcq_st;
233 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
235 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
238 /* get system time */
239 dev->desc_tbl[desc_num -1].timestamp = jiffies;
243 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
245 vcstatus_t *vcstatus;
247 u_short tempCellSlot, tempFract;
248 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
249 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
252 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
253 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
256 if( vcstatus->cnt == 0x05 ) {
259 if( eabr_vc->last_desc ) {
260 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
261 /* Wait for 10 Micro sec */
263 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
267 tempCellSlot = abr_vc->last_cell_slot;
268 tempFract = abr_vc->fraction;
269 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
270 && (tempFract == dev->testTable[vcc->vci]->fract))
272 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
273 dev->testTable[vcc->vci]->fract = tempFract;
275 } /* last descriptor */
277 } /* vcstatus->cnt */
280 IF_ABR(printk("LOCK UP found\n");)
281 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
282 /* Wait for 10 Micro sec */
284 abr_vc->status &= 0xFFF8;
285 abr_vc->status |= 0x0001; /* state is idle */
286 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
287 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
289 shd_tbl[i] = vcc->vci;
291 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
292 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
293 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
294 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
304 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
306 ** +----+----+------------------+-------------------------------+
307 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
308 ** +----+----+------------------+-------------------------------+
310 ** R = reserverd (written as 0)
311 ** NZ = 0 if 0 cells/sec; 1 otherwise
313 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
316 cellrate_to_float(u32 cr)
320 #define M_BITS 9 /* Number of bits in mantissa */
321 #define E_BITS 5 /* Number of bits in exponent */
325 u32 tmp = cr & 0x00ffffff;
334 flot = NZ | (i << M_BITS) | (cr & M_MASK);
336 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
338 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
344 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
347 float_to_cellrate(u16 rate)
349 u32 exp, mantissa, cps;
350 if ((rate & NZ) == 0)
352 exp = (rate >> M_BITS) & E_MASK;
353 mantissa = rate & M_MASK;
356 cps = (1 << M_BITS) | mantissa;
359 else if (exp > M_BITS)
360 cps <<= (exp - M_BITS);
362 cps >>= (M_BITS - exp);
367 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
368 srv_p->class_type = ATM_ABR;
369 srv_p->pcr = dev->LineRate;
371 srv_p->icr = 0x055cb7;
372 srv_p->tbe = 0xffffff;
383 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
384 struct atm_vcc *vcc, u8 flag)
386 f_vc_abr_entry *f_abr_vc;
387 r_vc_abr_entry *r_abr_vc;
390 u16 adtf, air, *ptr16;
391 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
392 f_abr_vc += vcc->vci;
394 case 1: /* FFRED initialization */
395 #if 0 /* sanity check */
398 if (srv_p->pcr > dev->LineRate)
399 srv_p->pcr = dev->LineRate;
400 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
401 return MCR_UNAVAILABLE;
402 if (srv_p->mcr > srv_p->pcr)
405 srv_p->icr = srv_p->pcr;
406 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
408 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
410 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
412 if (srv_p->nrm > MAX_NRM)
414 if (srv_p->trm > MAX_TRM)
416 if (srv_p->adtf > MAX_ADTF)
418 else if (srv_p->adtf == 0)
420 if (srv_p->cdf > MAX_CDF)
422 if (srv_p->rif > MAX_RIF)
424 if (srv_p->rdf > MAX_RDF)
427 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
428 f_abr_vc->f_vc_type = ABR;
429 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
430 /* i.e 2**n = 2 << (n-1) */
431 f_abr_vc->f_nrm = nrm << 8 | nrm;
432 trm = 100000/(2 << (16 - srv_p->trm));
433 if ( trm == 0) trm = 1;
434 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
435 crm = srv_p->tbe / nrm;
436 if (crm == 0) crm = 1;
437 f_abr_vc->f_crm = crm & 0xff;
438 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
439 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
440 ((srv_p->tbe/srv_p->frtt)*1000000) :
441 (1000000/(srv_p->frtt/srv_p->tbe)));
442 f_abr_vc->f_icr = cellrate_to_float(icr);
443 adtf = (10000 * srv_p->adtf)/8192;
444 if (adtf == 0) adtf = 1;
445 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
446 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
447 f_abr_vc->f_acr = f_abr_vc->f_icr;
448 f_abr_vc->f_status = 0x0042;
450 case 0: /* RFRED initialization */
451 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
452 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
453 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
454 r_abr_vc += vcc->vci;
455 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
456 air = srv_p->pcr << (15 - srv_p->rif);
457 if (air == 0) air = 1;
458 r_abr_vc->r_air = cellrate_to_float(air);
459 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
460 dev->sum_mcr += srv_p->mcr;
468 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
469 u32 rateLow=0, rateHigh, rate;
471 struct ia_vcc *ia_vcc;
473 int idealSlot =0, testSlot, toBeAssigned, inc;
475 u16 *SchedTbl, *TstSchedTbl;
481 /* IpAdjustTrafficParams */
482 if (vcc->qos.txtp.max_pcr <= 0) {
483 IF_ERR(printk("PCR for CBR not defined\n");)
486 rate = vcc->qos.txtp.max_pcr;
487 entries = rate / dev->Granularity;
488 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
489 entries, rate, dev->Granularity);)
491 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
492 rateLow = entries * dev->Granularity;
493 rateHigh = (entries + 1) * dev->Granularity;
494 if (3*(rate - rateLow) > (rateHigh - rate))
496 if (entries > dev->CbrRemEntries) {
497 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
498 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
499 entries, dev->CbrRemEntries);)
503 ia_vcc = INPH_IA_VCC(vcc);
504 ia_vcc->NumCbrEntry = entries;
505 dev->sum_mcr += entries * dev->Granularity;
506 /* IaFFrednInsertCbrSched */
507 // Starting at an arbitrary location, place the entries into the table
508 // as smoothly as possible
510 spacing = dev->CbrTotEntries / entries;
511 sp_mod = dev->CbrTotEntries % entries; // get modulo
512 toBeAssigned = entries;
515 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
518 // If this is the first time, start the table loading for this connection
519 // as close to entryPoint as possible.
520 if (toBeAssigned == entries)
522 idealSlot = dev->CbrEntryPt;
523 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
524 if (dev->CbrEntryPt >= dev->CbrTotEntries)
525 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
527 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
528 // in the table that would be smoothest
529 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
530 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
532 if (idealSlot >= (int)dev->CbrTotEntries)
533 idealSlot -= dev->CbrTotEntries;
534 // Continuously check around this ideal value until a null
535 // location is encountered.
536 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
538 testSlot = idealSlot;
539 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
540 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
541 testSlot, (u32)TstSchedTbl,toBeAssigned);)
542 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
543 while (cbrVC) // If another VC at this location, we have to keep looking
546 testSlot = idealSlot - inc;
547 if (testSlot < 0) { // Wrap if necessary
548 testSlot += dev->CbrTotEntries;
549 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
550 (u32)SchedTbl,testSlot);)
552 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
553 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
556 testSlot = idealSlot + inc;
557 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
558 testSlot -= dev->CbrTotEntries;
559 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
560 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
561 testSlot, toBeAssigned);)
563 // set table index and read in value
564 TstSchedTbl = (u16*)(SchedTbl + testSlot);
565 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
566 (u32)TstSchedTbl,cbrVC,inc);)
567 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
569 // Move this VCI number into this location of the CBR Sched table.
570 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
571 dev->CbrRemEntries--;
575 /* IaFFrednCbrEnable */
576 dev->NumEnabledCBR++;
577 if (dev->NumEnabledCBR == 1) {
578 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
579 IF_CBR(printk("CBR is enabled\n");)
583 static void ia_cbrVc_close (struct atm_vcc *vcc) {
585 u16 *SchedTbl, NullVci = 0;
588 iadev = INPH_IA_DEV(vcc->dev);
589 iadev->NumEnabledCBR--;
590 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
591 if (iadev->NumEnabledCBR == 0) {
592 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
593 IF_CBR (printk("CBR support disabled\n");)
596 for (i=0; i < iadev->CbrTotEntries; i++)
598 if (*SchedTbl == vcc->vci) {
599 iadev->CbrRemEntries++;
605 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
608 static int ia_avail_descs(IADEV *iadev) {
611 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
612 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
614 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
615 iadev->ffL.tcq_st) / 2;
619 static int ia_que_tx (IADEV *iadev) {
623 struct ia_vcc *iavcc;
624 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
625 num_desc = ia_avail_descs(iadev);
627 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
628 if (!(vcc = ATM_SKB(skb)->vcc)) {
629 dev_kfree_skb_any(skb);
630 printk("ia_que_tx: Null vcc\n");
633 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
634 dev_kfree_skb_any(skb);
635 printk("Free the SKB on closed vci %d \n", vcc->vci);
638 iavcc = INPH_IA_VCC(vcc);
639 if (ia_pkt_tx (vcc, skb)) {
640 skb_queue_head(&iadev->tx_backlog, skb);
647 void ia_tx_poll (IADEV *iadev) {
648 struct atm_vcc *vcc = NULL;
649 struct sk_buff *skb = NULL, *skb1 = NULL;
650 struct ia_vcc *iavcc;
654 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
655 skb = rtne->data.txskb;
657 printk("ia_tx_poll: skb is null\n");
660 vcc = ATM_SKB(skb)->vcc;
662 printk("ia_tx_poll: vcc is null\n");
663 dev_kfree_skb_any(skb);
667 iavcc = INPH_IA_VCC(vcc);
669 printk("ia_tx_poll: iavcc is null\n");
670 dev_kfree_skb_any(skb);
674 skb1 = skb_dequeue(&iavcc->txing_skb);
675 while (skb1 && (skb1 != skb)) {
676 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
677 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
679 IF_ERR(printk("Release the SKB not match\n");)
680 if ((vcc->pop) && (skb1->len != 0))
683 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
687 dev_kfree_skb_any(skb1);
688 skb1 = skb_dequeue(&iavcc->txing_skb);
691 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
692 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
695 if ((vcc->pop) && (skb->len != 0))
698 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
701 dev_kfree_skb_any(skb);
709 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
714 * Issue a command to enable writes to the NOVRAM
716 NVRAM_CMD (EXTEND + EWEN);
719 * issue the write command
721 NVRAM_CMD(IAWRITE + addr);
723 * Send the data, starting with D15, then D14, and so on for 16 bits
725 for (i=15; i>=0; i--) {
726 NVRAM_CLKOUT (val & 0x8000);
731 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
733 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
737 * disable writes again
739 NVRAM_CMD(EXTEND + EWDS)
745 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
751 * Read the first bit that was clocked with the falling edge of the
752 * the last command data clock
754 NVRAM_CMD(IAREAD + addr);
756 * Now read the rest of the bits, the next bit read is D14, then D13,
760 for (i=15; i>=0; i--) {
769 static void ia_hw_type(IADEV *iadev) {
770 u_short memType = ia_eeprom_get(iadev, 25);
771 iadev->memType = memType;
772 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
773 iadev->num_tx_desc = IA_TX_BUF;
774 iadev->tx_buf_sz = IA_TX_BUF_SZ;
775 iadev->num_rx_desc = IA_RX_BUF;
776 iadev->rx_buf_sz = IA_RX_BUF_SZ;
777 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
778 if (IA_TX_BUF == DFL_TX_BUFFERS)
779 iadev->num_tx_desc = IA_TX_BUF / 2;
781 iadev->num_tx_desc = IA_TX_BUF;
782 iadev->tx_buf_sz = IA_TX_BUF_SZ;
783 if (IA_RX_BUF == DFL_RX_BUFFERS)
784 iadev->num_rx_desc = IA_RX_BUF / 2;
786 iadev->num_rx_desc = IA_RX_BUF;
787 iadev->rx_buf_sz = IA_RX_BUF_SZ;
790 if (IA_TX_BUF == DFL_TX_BUFFERS)
791 iadev->num_tx_desc = IA_TX_BUF / 8;
793 iadev->num_tx_desc = IA_TX_BUF;
794 iadev->tx_buf_sz = IA_TX_BUF_SZ;
795 if (IA_RX_BUF == DFL_RX_BUFFERS)
796 iadev->num_rx_desc = IA_RX_BUF / 8;
798 iadev->num_rx_desc = IA_RX_BUF;
799 iadev->rx_buf_sz = IA_RX_BUF_SZ;
801 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
802 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
803 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
804 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
807 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
808 iadev->phy_type = PHY_OC3C_S;
809 else if ((memType & FE_MASK) == FE_UTP_OPTION)
810 iadev->phy_type = PHY_UTP155;
812 iadev->phy_type = PHY_OC3C_M;
815 iadev->phy_type = memType & FE_MASK;
816 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
817 memType,iadev->phy_type);)
818 if (iadev->phy_type == FE_25MBIT_PHY)
819 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
820 else if (iadev->phy_type == FE_DS3_PHY)
821 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
822 else if (iadev->phy_type == FE_E3_PHY)
823 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
825 iadev->LineRate = (u32)(ATM_OC3_PCR);
826 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
830 static void IaFrontEndIntr(IADEV *iadev) {
831 volatile IA_SUNI *suni;
832 volatile ia_mb25_t *mb25;
833 volatile suni_pm7345_t *suni_pm7345;
837 if(iadev->phy_type & FE_25MBIT_PHY) {
838 mb25 = (ia_mb25_t*)iadev->phy;
839 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
840 } else if (iadev->phy_type & FE_DS3_PHY) {
841 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
842 /* clear FRMR interrupts */
843 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat;
844 iadev->carrier_detect =
845 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
846 } else if (iadev->phy_type & FE_E3_PHY ) {
847 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
848 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind;
849 iadev->carrier_detect =
850 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
853 suni = (IA_SUNI *)iadev->phy;
854 intr_status = suni->suni_rsop_status & 0xff;
855 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
857 if (iadev->carrier_detect)
858 printk("IA: SUNI carrier detected\n");
860 printk("IA: SUNI carrier lost signal\n");
864 void ia_mb25_init (IADEV *iadev)
866 volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
868 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
870 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
871 mb25->mb25_diag_control = 0;
873 * Initialize carrier detect state
875 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
879 void ia_suni_pm7345_init (IADEV *iadev)
881 volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
882 if (iadev->phy_type & FE_DS3_PHY)
884 iadev->carrier_detect =
885 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
886 suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
887 suni_pm7345->suni_ds3_frm_cfg = 1;
888 suni_pm7345->suni_ds3_tran_cfg = 1;
889 suni_pm7345->suni_config = 0;
890 suni_pm7345->suni_splr_cfg = 0;
891 suni_pm7345->suni_splt_cfg = 0;
895 iadev->carrier_detect =
896 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
897 suni_pm7345->suni_e3_frm_fram_options = 0x4;
898 suni_pm7345->suni_e3_frm_maint_options = 0x20;
899 suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
900 suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
901 suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
902 suni_pm7345->suni_e3_tran_fram_options = 0x1;
903 suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
904 suni_pm7345->suni_splr_cfg = 0x41;
905 suni_pm7345->suni_splt_cfg = 0x41;
908 * Enable RSOP loss of signal interrupt.
910 suni_pm7345->suni_intr_enbl = 0x28;
913 * Clear error counters
915 suni_pm7345->suni_id_reset = 0;
918 * Clear "PMCTST" in master test register.
920 suni_pm7345->suni_master_test = 0;
922 suni_pm7345->suni_rxcp_ctrl = 0x2c;
923 suni_pm7345->suni_rxcp_fctrl = 0x81;
925 suni_pm7345->suni_rxcp_idle_pat_h1 =
926 suni_pm7345->suni_rxcp_idle_pat_h2 =
927 suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
928 suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
930 suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
931 suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
932 suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
933 suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
935 suni_pm7345->suni_rxcp_cell_pat_h1 =
936 suni_pm7345->suni_rxcp_cell_pat_h2 =
937 suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
938 suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
940 suni_pm7345->suni_rxcp_cell_mask_h1 =
941 suni_pm7345->suni_rxcp_cell_mask_h2 =
942 suni_pm7345->suni_rxcp_cell_mask_h3 =
943 suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
945 suni_pm7345->suni_txcp_ctrl = 0xa4;
946 suni_pm7345->suni_txcp_intr_en_sts = 0x10;
947 suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
949 suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
954 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
955 #endif /* __SNMP__ */
960 /***************************** IA_LIB END *****************************/
962 /* pwang_test debug utility */
963 int tcnter = 0, rcnter = 0;
964 void xdump( u_char* cp, int length, char* prefix )
968 u_char* pBuf = prntBuf;
970 while(count < length){
971 pBuf += sprintf( pBuf, "%s", prefix );
972 for(col = 0;count + col < length && col < 16; col++){
973 if (col != 0 && (col % 4) == 0)
974 pBuf += sprintf( pBuf, " " );
975 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
977 while(col++ < 16){ /* pad end of buffer with blanks */
979 sprintf( pBuf, " " );
980 pBuf += sprintf( pBuf, " " );
982 pBuf += sprintf( pBuf, " " );
983 for(col = 0;count + col < length && col < 16; col++){
984 if (isprint((int)cp[count + col]))
985 pBuf += sprintf( pBuf, "%c", cp[count + col] );
987 pBuf += sprintf( pBuf, "." );
989 sprintf( pBuf, "\n" );
996 } /* close xdump(... */
999 static struct atm_dev *ia_boards = NULL;
1001 #define ACTUAL_RAM_BASE \
1002 RAM_BASE*((iadev->mem)/(128 * 1024))
1003 #define ACTUAL_SEG_RAM_BASE \
1004 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1005 #define ACTUAL_REASS_RAM_BASE \
1006 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1009 /*-- some utilities and memory allocation stuff will come here -------------*/
1011 void desc_dbg(IADEV *iadev) {
1013 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1015 // regval = readl((u32)ia_cmds->maddr);
1016 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1017 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1018 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1019 readw(iadev->seg_ram+tcq_wr_ptr-2));
1020 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1022 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1023 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1024 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1026 while (tcq_st_ptr != tcq_ed_ptr) {
1027 tmp = iadev->seg_ram+tcq_st_ptr;
1028 printk("TCQ slot %d desc = %d Addr = 0x%x\n", i++, readw(tmp), tmp);
1031 for(i=0; i <iadev->num_tx_desc; i++)
1032 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1036 /*----------------------------- Recieving side stuff --------------------------*/
1038 static void rx_excp_rcvd(struct atm_dev *dev)
1040 #if 0 /* closing the receiving size will cause too many excp int */
1043 u_short excpq_rd_ptr;
1046 iadev = INPH_IA_DEV(dev);
1047 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1048 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1049 { printk("state = %x \n", state);
1050 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1051 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1052 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1053 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1054 // TODO: update exception stat
1055 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1056 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1059 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1060 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1061 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1062 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1067 static void free_desc(struct atm_dev *dev, int desc)
1070 iadev = INPH_IA_DEV(dev);
1071 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1072 iadev->rfL.fdq_wr +=2;
1073 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1074 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1075 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1079 static int rx_pkt(struct atm_dev *dev)
1082 struct atm_vcc *vcc;
1083 unsigned short status;
1084 struct rx_buf_desc *buf_desc_ptr;
1088 struct sk_buff *skb;
1089 u_int buf_addr, dma_addr;
1091 iadev = INPH_IA_DEV(dev);
1092 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1094 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1097 /* mask 1st 3 bits to get the actual descno. */
1098 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1099 IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1100 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1101 printk(" pcq_wr_ptr = 0x%x\n",
1102 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1103 /* update the read pointer - maybe we shud do this in the end*/
1104 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1105 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1107 iadev->rfL.pcq_rd += 2;
1108 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1110 /* get the buffer desc entry.
1111 update stuff. - doesn't seem to be any update necessary
1113 buf_desc_ptr = (struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1114 /* make the ptr point to the corresponding buffer desc entry */
1115 buf_desc_ptr += desc;
1116 if (!desc || (desc > iadev->num_rx_desc) ||
1117 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1118 free_desc(dev, desc);
1119 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1122 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1125 free_desc(dev, desc);
1126 printk("IA: null vcc, drop PDU\n");
1131 /* might want to check the status bits for errors */
1132 status = (u_short) (buf_desc_ptr->desc_mode);
1133 if (status & (RX_CER | RX_PTE | RX_OFL))
1135 atomic_inc(&vcc->stats->rx_err);
1136 IF_ERR(printk("IA: bad packet, dropping it");)
1137 if (status & RX_CER) {
1138 IF_ERR(printk(" cause: packet CRC error\n");)
1140 else if (status & RX_PTE) {
1141 IF_ERR(printk(" cause: packet time out\n");)
1144 IF_ERR(printk(" cause: buffer over flow\n");)
1153 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1154 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1155 len = dma_addr - buf_addr;
1156 if (len > iadev->rx_buf_sz) {
1157 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1158 atomic_inc(&vcc->stats->rx_err);
1162 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1164 printk("Drop control packets\n");
1169 ATM_SKB(skb)->vcc = vcc;
1170 ATM_DESC(skb) = desc;
1171 skb_queue_tail(&iadev->rx_dma_q, skb);
1173 /* Build the DLE structure */
1174 wr_ptr = iadev->rx_dle_q.write;
1175 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1176 len, PCI_DMA_FROMDEVICE);
1177 wr_ptr->local_pkt_addr = buf_addr;
1178 wr_ptr->bytes = len; /* We don't know this do we ?? */
1179 wr_ptr->mode = DMA_INT_ENABLE;
1181 /* shud take care of wrap around here too. */
1182 if(++wr_ptr == iadev->rx_dle_q.end)
1183 wr_ptr = iadev->rx_dle_q.start;
1184 iadev->rx_dle_q.write = wr_ptr;
1186 /* Increment transaction counter */
1187 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1190 free_desc(dev, desc);
1194 static void rx_intr(struct atm_dev *dev)
1200 iadev = INPH_IA_DEV(dev);
1201 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1202 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1203 if (status & RX_PKT_RCVD)
1206 /* Basically recvd an interrupt for receving a packet.
1207 A descriptor would have been written to the packet complete
1208 queue. Get all the descriptors and set up dma to move the
1209 packets till the packet complete queue is empty..
1211 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1212 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1213 while(!(state & PCQ_EMPTY))
1216 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1220 if (status & RX_FREEQ_EMPT)
1223 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1224 iadev->rx_tmp_jif = jiffies;
1227 else if (((jiffies - iadev->rx_tmp_jif) > 50) &&
1228 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1229 for (i = 1; i <= iadev->num_rx_desc; i++)
1231 printk("Test logic RUN!!!!\n");
1232 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1235 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1238 if (status & RX_EXCP_RCVD)
1240 /* probably need to handle the exception queue also. */
1241 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1246 if (status & RX_RAW_RCVD)
1248 /* need to handle the raw incoming cells. This deepnds on
1249 whether we have programmed to receive the raw cells or not.
1251 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1256 static void rx_dle_intr(struct atm_dev *dev)
1259 struct atm_vcc *vcc;
1260 struct sk_buff *skb;
1263 struct dle *dle, *cur_dle;
1266 iadev = INPH_IA_DEV(dev);
1268 /* free all the dles done, that is just update our own dle read pointer
1269 - do we really need to do this. Think not. */
1270 /* DMA is done, just get all the recevie buffers from the rx dma queue
1271 and push them up to the higher layer protocol. Also free the desc
1272 associated with the buffer. */
1273 dle = iadev->rx_dle_q.read;
1274 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1275 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1276 while(dle != cur_dle)
1278 /* free the DMAed skb */
1279 skb = skb_dequeue(&iadev->rx_dma_q);
1282 desc = ATM_DESC(skb);
1283 free_desc(dev, desc);
1285 if (!(len = skb->len))
1287 printk("rx_dle_intr: skb len 0\n");
1288 dev_kfree_skb_any(skb);
1292 struct cpcs_trailer *trailer;
1294 struct ia_vcc *ia_vcc;
1296 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1297 len, PCI_DMA_FROMDEVICE);
1298 /* no VCC related housekeeping done as yet. lets see */
1299 vcc = ATM_SKB(skb)->vcc;
1301 printk("IA: null vcc\n");
1302 dev_kfree_skb_any(skb);
1305 ia_vcc = INPH_IA_VCC(vcc);
1308 atomic_inc(&vcc->stats->rx_err);
1309 dev_kfree_skb_any(skb);
1310 atm_return(vcc, atm_guess_pdu2truesize(len));
1313 // get real pkt length pwang_test
1314 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1315 skb->len - sizeof(*trailer));
1316 length = swap(trailer->length);
1317 if ((length > iadev->rx_buf_sz) || (length >
1318 (skb->len - sizeof(struct cpcs_trailer))))
1320 atomic_inc(&vcc->stats->rx_err);
1321 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1323 dev_kfree_skb_any(skb);
1324 atm_return(vcc, atm_guess_pdu2truesize(len));
1327 skb_trim(skb, length);
1329 /* Display the packet */
1330 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1331 xdump(skb->data, skb->len, "RX: ");
1334 IF_RX(printk("rx_dle_intr: skb push");)
1336 atomic_inc(&vcc->stats->rx);
1337 iadev->rx_pkt_cnt++;
1340 if (++dle == iadev->rx_dle_q.end)
1341 dle = iadev->rx_dle_q.start;
1343 iadev->rx_dle_q.read = dle;
1345 /* if the interrupts are masked because there were no free desc available,
1347 if (!iadev->rxing) {
1348 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1349 if (!(state & FREEQ_EMPTY)) {
1350 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1351 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1352 iadev->reass_reg+REASS_MASK_REG);
1359 static int open_rx(struct atm_vcc *vcc)
1364 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1366 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1367 iadev = INPH_IA_DEV(vcc->dev);
1368 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1369 if (iadev->phy_type & FE_25MBIT_PHY) {
1370 printk("IA: ABR not support\n");
1374 /* Make only this VCI in the vc table valid and let all
1375 others be invalid entries */
1376 vc_table = (u_short *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1377 vc_table += vcc->vci;
1378 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1380 *vc_table = vcc->vci << 6;
1381 /* Also keep a list of open rx vcs so that we can attach them with
1382 incoming PDUs later. */
1383 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1384 (vcc->qos.txtp.traffic_class == ATM_ABR))
1386 srv_cls_param_t srv_p;
1387 init_abr_vc(iadev, &srv_p);
1388 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1390 else { /* for UBR later may need to add CBR logic */
1391 reass_ptr = (u_short *)
1392 (iadev->reass_ram+REASS_TABLE*iadev->memSize);
1393 reass_ptr += vcc->vci;
1394 *reass_ptr = NO_AAL5_PKT;
1397 if (iadev->rx_open[vcc->vci])
1398 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1399 vcc->dev->number, vcc->vci);
1400 iadev->rx_open[vcc->vci] = vcc;
1404 static int rx_init(struct atm_dev *dev)
1407 struct rx_buf_desc *buf_desc_ptr;
1408 unsigned long rx_pkt_start = 0;
1410 struct abr_vc_table *abr_vc_table;
1414 int i,j, vcsize_sel;
1415 u_short freeq_st_adr;
1416 u_short *freeq_start;
1418 iadev = INPH_IA_DEV(dev);
1419 // spin_lock_init(&iadev->rx_lock);
1421 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1422 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1423 &iadev->rx_dle_dma);
1425 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1428 iadev->rx_dle_q.start = (struct dle*)dle_addr;
1429 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1430 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1431 iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1432 /* the end of the dle q points to the entry after the last
1433 DLE that can be used. */
1435 /* write the upper 20 bits of the start address to rx list address register */
1436 writel(iadev->rx_dle_dma & 0xfffff000,
1437 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1438 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1439 (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1440 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1441 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1442 (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1443 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1445 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1446 writew(0, iadev->reass_reg+MODE_REG);
1447 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1449 /* Receive side control memory map
1450 -------------------------------
1452 Buffer descr 0x0000 (736 - 23K)
1453 VP Table 0x5c00 (256 - 512)
1454 Except q 0x5e00 (128 - 512)
1455 Free buffer q 0x6000 (1K - 2K)
1456 Packet comp q 0x6800 (1K - 2K)
1457 Reass Table 0x7000 (1K - 2K)
1458 VC Table 0x7800 (1K - 2K)
1459 ABR VC Table 0x8000 (1K - 32K)
1462 /* Base address for Buffer Descriptor Table */
1463 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1464 /* Set the buffer size register */
1465 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1467 /* Initialize each entry in the Buffer Descriptor Table */
1468 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1469 buf_desc_ptr =(struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1470 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1472 rx_pkt_start = iadev->rx_pkt_ram;
1473 for(i=1; i<=iadev->num_rx_desc; i++)
1475 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1476 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1477 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1479 rx_pkt_start += iadev->rx_buf_sz;
1481 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1482 i = FREE_BUF_DESC_Q*iadev->memSize;
1483 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1484 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1485 writew(i+iadev->num_rx_desc*sizeof(u_short),
1486 iadev->reass_reg+FREEQ_ED_ADR);
1487 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1488 writew(i+iadev->num_rx_desc*sizeof(u_short),
1489 iadev->reass_reg+FREEQ_WR_PTR);
1490 /* Fill the FREEQ with all the free descriptors. */
1491 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1492 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1493 for(i=1; i<=iadev->num_rx_desc; i++)
1495 *freeq_start = (u_short)i;
1498 IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1499 /* Packet Complete Queue */
1500 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1501 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1502 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1503 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1504 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1506 /* Exception Queue */
1507 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1508 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1509 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1510 iadev->reass_reg+EXCP_Q_ED_ADR);
1511 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1512 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1514 /* Load local copy of FREEQ and PCQ ptrs */
1515 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1516 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1517 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1518 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1519 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1520 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1521 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1522 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1524 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1525 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1526 iadev->rfL.pcq_wr);)
1527 /* just for check - no VP TBL */
1529 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1530 /* initialize VP Table for invalid VPIs
1531 - I guess we can write all 1s or 0x000f in the entire memory
1532 space or something similar.
1535 /* This seems to work and looks right to me too !!! */
1536 i = REASS_TABLE * iadev->memSize;
1537 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1538 /* initialize Reassembly table to I don't know what ???? */
1539 reass_table = (u16 *)(iadev->reass_ram+i);
1540 j = REASS_TABLE_SZ * iadev->memSize;
1541 for(i=0; i < j; i++)
1542 *reass_table++ = NO_AAL5_PKT;
1545 while (i != iadev->num_vc) {
1549 i = RX_VC_TABLE * iadev->memSize;
1550 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1551 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1552 j = RX_VC_TABLE_SZ * iadev->memSize;
1553 for(i = 0; i < j; i++)
1555 /* shift the reassembly pointer by 3 + lower 3 bits of
1556 vc_lkup_base register (=3 for 1K VCs) and the last byte
1557 is those low 3 bits.
1558 Shall program this later.
1560 *vc_table = (i << 6) | 15; /* for invalid VCI */
1564 i = ABR_VC_TABLE * iadev->memSize;
1565 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1567 i = ABR_VC_TABLE * iadev->memSize;
1568 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1569 j = REASS_TABLE_SZ * iadev->memSize;
1570 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1571 for(i = 0; i < j; i++) {
1572 abr_vc_table->rdf = 0x0003;
1573 abr_vc_table->air = 0x5eb1;
1577 /* Initialize other registers */
1579 /* VP Filter Register set for VC Reassembly only */
1580 writew(0xff00, iadev->reass_reg+VP_FILTER);
1581 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1582 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1584 /* Packet Timeout Count related Registers :
1585 Set packet timeout to occur in about 3 seconds
1586 Set Packet Aging Interval count register to overflow in about 4 us
1588 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1590 i = ((u32)ptr16 >> 6) & 0xff;
1592 i |=(((u32)ptr16 << 2) & 0xff00);
1593 writew(i, iadev->reass_reg+TMOUT_RANGE);
1594 /* initiate the desc_tble */
1595 for(i=0; i<iadev->num_tx_desc;i++)
1596 iadev->desc_tbl[i].timestamp = 0;
1598 /* to clear the interrupt status register - read it */
1599 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1601 /* Mask Register - clear it */
1602 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1604 skb_queue_head_init(&iadev->rx_dma_q);
1605 iadev->rx_free_desc_qhead = NULL;
1606 iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1607 if (!iadev->rx_open)
1609 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1613 memset(iadev->rx_open, 0, 4*iadev->num_vc);
1615 iadev->rx_pkt_cnt = 0;
1617 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1621 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1629 The memory map suggested in appendix A and the coding for it.
1630 Keeping it around just in case we change our mind later.
1632 Buffer descr 0x0000 (128 - 4K)
1633 UBR sched 0x1000 (1K - 4K)
1634 UBR Wait q 0x2000 (1K - 4K)
1635 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1637 extended VC 0x4000 (1K - 8K)
1638 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1639 CBR sched 0x7000 (as needed)
1640 VC table 0x8000 (1K - 32K)
1643 static void tx_intr(struct atm_dev *dev)
1646 unsigned short status;
1647 unsigned long flags;
1649 iadev = INPH_IA_DEV(dev);
1651 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1652 if (status & TRANSMIT_DONE){
1654 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1655 spin_lock_irqsave(&iadev->tx_lock, flags);
1657 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1658 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1659 if (iadev->close_pending)
1660 wake_up(&iadev->close_wait);
1662 if (status & TCQ_NOT_EMPTY)
1664 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1668 static void tx_dle_intr(struct atm_dev *dev)
1671 struct dle *dle, *cur_dle;
1672 struct sk_buff *skb;
1673 struct atm_vcc *vcc;
1674 struct ia_vcc *iavcc;
1676 unsigned long flags;
1678 iadev = INPH_IA_DEV(dev);
1679 spin_lock_irqsave(&iadev->tx_lock, flags);
1680 dle = iadev->tx_dle_q.read;
1681 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1682 (sizeof(struct dle)*DLE_ENTRIES - 1);
1683 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1684 while (dle != cur_dle)
1686 /* free the DMAed skb */
1687 skb = skb_dequeue(&iadev->tx_dma_q);
1690 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1691 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1692 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1695 vcc = ATM_SKB(skb)->vcc;
1697 printk("tx_dle_intr: vcc is null\n");
1698 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1699 dev_kfree_skb_any(skb);
1703 iavcc = INPH_IA_VCC(vcc);
1705 printk("tx_dle_intr: iavcc is null\n");
1706 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1707 dev_kfree_skb_any(skb);
1710 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1711 if ((vcc->pop) && (skb->len != 0))
1716 dev_kfree_skb_any(skb);
1719 else { /* Hold the rate-limited skb for flow control */
1720 IA_SKB_STATE(skb) |= IA_DLED;
1721 skb_queue_tail(&iavcc->txing_skb, skb);
1723 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1724 if (++dle == iadev->tx_dle_q.end)
1725 dle = iadev->tx_dle_q.start;
1727 iadev->tx_dle_q.read = dle;
1728 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1731 static int open_tx(struct atm_vcc *vcc)
1733 struct ia_vcc *ia_vcc;
1738 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1739 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1740 iadev = INPH_IA_DEV(vcc->dev);
1742 if (iadev->phy_type & FE_25MBIT_PHY) {
1743 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1744 printk("IA: ABR not support\n");
1747 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1748 printk("IA: CBR not support\n");
1752 ia_vcc = INPH_IA_VCC(vcc);
1753 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1754 if (vcc->qos.txtp.max_sdu >
1755 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1756 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1757 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1758 INPH_IA_VCC(vcc) = NULL;
1762 ia_vcc->vc_desc_cnt = 0;
1766 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1767 vcc->qos.txtp.pcr = iadev->LineRate;
1768 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1769 vcc->qos.txtp.pcr = iadev->LineRate;
1770 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1771 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1772 if (vcc->qos.txtp.pcr > iadev->LineRate)
1773 vcc->qos.txtp.pcr = iadev->LineRate;
1774 ia_vcc->pcr = vcc->qos.txtp.pcr;
1776 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1777 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1778 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1779 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1780 if (ia_vcc->pcr < iadev->rate_limit)
1781 skb_queue_head_init (&ia_vcc->txing_skb);
1782 if (ia_vcc->pcr < iadev->rate_limit) {
1783 if (vcc->qos.txtp.max_sdu != 0) {
1784 if (ia_vcc->pcr > 60000)
1785 vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1786 else if (ia_vcc->pcr > 2000)
1787 vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1789 vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1792 vcc->sk->sk_sndbuf = 24576;
1795 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1796 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1799 memset((caddr_t)vc, 0, sizeof(*vc));
1800 memset((caddr_t)evc, 0, sizeof(*evc));
1802 /* store the most significant 4 bits of vci as the last 4 bits
1803 of first part of atm header.
1804 store the last 12 bits of vci as first 12 bits of the second
1805 part of the atm header.
1807 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1808 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1810 /* check the following for different traffic classes */
1811 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1814 vc->status = CRC_APPEND;
1815 vc->acr = cellrate_to_float(iadev->LineRate);
1816 if (vcc->qos.txtp.pcr > 0)
1817 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1818 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1819 vcc->qos.txtp.max_pcr,vc->acr);)
1821 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1822 { srv_cls_param_t srv_p;
1823 IF_ABR(printk("Tx ABR VCC\n");)
1824 init_abr_vc(iadev, &srv_p);
1825 if (vcc->qos.txtp.pcr > 0)
1826 srv_p.pcr = vcc->qos.txtp.pcr;
1827 if (vcc->qos.txtp.min_pcr > 0) {
1828 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1829 if (tmpsum > iadev->LineRate)
1831 srv_p.mcr = vcc->qos.txtp.min_pcr;
1832 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1835 if (vcc->qos.txtp.icr)
1836 srv_p.icr = vcc->qos.txtp.icr;
1837 if (vcc->qos.txtp.tbe)
1838 srv_p.tbe = vcc->qos.txtp.tbe;
1839 if (vcc->qos.txtp.frtt)
1840 srv_p.frtt = vcc->qos.txtp.frtt;
1841 if (vcc->qos.txtp.rif)
1842 srv_p.rif = vcc->qos.txtp.rif;
1843 if (vcc->qos.txtp.rdf)
1844 srv_p.rdf = vcc->qos.txtp.rdf;
1845 if (vcc->qos.txtp.nrm_pres)
1846 srv_p.nrm = vcc->qos.txtp.nrm;
1847 if (vcc->qos.txtp.trm_pres)
1848 srv_p.trm = vcc->qos.txtp.trm;
1849 if (vcc->qos.txtp.adtf_pres)
1850 srv_p.adtf = vcc->qos.txtp.adtf;
1851 if (vcc->qos.txtp.cdf_pres)
1852 srv_p.cdf = vcc->qos.txtp.cdf;
1853 if (srv_p.icr > srv_p.pcr)
1854 srv_p.icr = srv_p.pcr;
1855 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1856 srv_p.pcr, srv_p.mcr);)
1857 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1858 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1859 if (iadev->phy_type & FE_25MBIT_PHY) {
1860 printk("IA: CBR not support\n");
1863 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1864 IF_CBR(printk("PCR is not availble\n");)
1868 vc->status = CRC_APPEND;
1869 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1874 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1876 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1877 IF_EVENT(printk("ia open_tx returning \n");)
1882 static int tx_init(struct atm_dev *dev)
1885 struct tx_buf_desc *buf_desc_ptr;
1886 unsigned int tx_pkt_start;
1898 iadev = INPH_IA_DEV(dev);
1899 spin_lock_init(&iadev->tx_lock);
1901 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1902 readw(iadev->seg_reg+SEG_MASK_REG));)
1904 /* Allocate 4k (boundary aligned) bytes */
1905 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1906 &iadev->tx_dle_dma);
1908 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1911 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1912 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1913 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1914 iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1916 /* write the upper 20 bits of the start address to tx list address register */
1917 writel(iadev->tx_dle_dma & 0xfffff000,
1918 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1919 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1920 writew(0, iadev->seg_reg+MODE_REG_0);
1921 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1922 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1923 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1924 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1927 Transmit side control memory map
1928 --------------------------------
1929 Buffer descr 0x0000 (128 - 4K)
1930 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1933 CBR Table 0x1800 (as needed) - 6K
1934 UBR Table 0x3000 (1K - 4K) - 12K
1935 UBR Wait queue 0x4000 (1K - 4K) - 16K
1936 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1937 ABR Tbl - 20K, ABR Wq - 22K
1938 extended VC 0x6000 (1K - 8K) - 24K
1939 VC Table 0x8000 (1K - 32K) - 32K
1941 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1942 and Wait q, which can be allotted later.
1945 /* Buffer Descriptor Table Base address */
1946 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1948 /* initialize each entry in the buffer descriptor table */
1949 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1950 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1952 tx_pkt_start = TX_PACKET_RAM;
1953 for(i=1; i<=iadev->num_tx_desc; i++)
1955 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1956 buf_desc_ptr->desc_mode = AAL5;
1957 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1958 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1960 tx_pkt_start += iadev->tx_buf_sz;
1962 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1963 if (!iadev->tx_buf) {
1964 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1967 for (i= 0; i< iadev->num_tx_desc; i++)
1969 struct cpcs_trailer *cpcs;
1971 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1973 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1974 goto err_free_tx_bufs;
1976 iadev->tx_buf[i].cpcs = cpcs;
1977 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1978 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1980 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1981 sizeof(struct desc_tbl_t), GFP_KERNEL);
1982 if (!iadev->desc_tbl) {
1983 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984 goto err_free_all_tx_bufs;
1987 /* Communication Queues base address */
1988 i = TX_COMP_Q * iadev->memSize;
1989 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
1991 /* Transmit Complete Queue */
1992 writew(i, iadev->seg_reg+TCQ_ST_ADR);
1993 writew(i, iadev->seg_reg+TCQ_RD_PTR);
1994 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
1995 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1996 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
1997 iadev->seg_reg+TCQ_ED_ADR);
1998 /* Fill the TCQ with all the free descriptors. */
1999 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2000 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2001 for(i=1; i<=iadev->num_tx_desc; i++)
2003 *tcq_start = (u_short)i;
2007 /* Packet Ready Queue */
2008 i = PKT_RDY_Q * iadev->memSize;
2009 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2010 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2011 iadev->seg_reg+PRQ_ED_ADR);
2012 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2013 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2015 /* Load local copy of PRQ and TCQ ptrs */
2016 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2017 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2018 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2020 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2021 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2022 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2024 /* Just for safety initializing the queue to have desc 1 always */
2025 /* Fill the PRQ with all the free descriptors. */
2026 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2027 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2028 for(i=1; i<=iadev->num_tx_desc; i++)
2030 *prq_start = (u_short)0; /* desc 1 in all entries */
2034 IF_INIT(printk("Start CBR Init\n");)
2035 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2036 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2037 #else /* Charlie's logic is wrong ? */
2038 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2039 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2040 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2043 IF_INIT(printk("value in register = 0x%x\n",
2044 readw(iadev->seg_reg+CBR_PTR_BASE));)
2045 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2046 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2047 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2048 readw(iadev->seg_reg+CBR_TAB_BEG));)
2049 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2050 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2051 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2052 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2053 (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2054 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2055 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2056 readw(iadev->seg_reg+CBR_TAB_END+1));)
2057 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
2059 /* Initialize the CBR Schedualing Table */
2060 memset((caddr_t)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize),
2061 0, iadev->num_vc*6);
2062 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2063 iadev->CbrEntryPt = 0;
2064 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2065 iadev->NumEnabledCBR = 0;
2067 /* UBR scheduling Table and wait queue */
2068 /* initialize all bytes of UBR scheduler table and wait queue to 0
2069 - SCHEDSZ is 1K (# of entries).
2070 - UBR Table size is 4K
2071 - UBR wait queue is 4K
2072 since the table and wait queues are contiguous, all the bytes
2073 can be initialized by one memeset.
2078 while (i != iadev->num_vc) {
2083 i = MAIN_VC_TABLE * iadev->memSize;
2084 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2085 i = EXT_VC_TABLE * iadev->memSize;
2086 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2087 i = UBR_SCHED_TABLE * iadev->memSize;
2088 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2089 i = UBR_WAIT_Q * iadev->memSize;
2090 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2091 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2092 0, iadev->num_vc*8);
2093 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2094 /* initialize all bytes of ABR scheduler table and wait queue to 0
2095 - SCHEDSZ is 1K (# of entries).
2096 - ABR Table size is 2K
2097 - ABR wait queue is 2K
2098 since the table and wait queues are contiguous, all the bytes
2099 can be intialized by one memeset.
2101 i = ABR_SCHED_TABLE * iadev->memSize;
2102 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2103 i = ABR_WAIT_Q * iadev->memSize;
2104 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2106 i = ABR_SCHED_TABLE*iadev->memSize;
2107 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2108 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2109 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2110 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2111 if (!iadev->testTable) {
2112 printk("Get freepage failed\n");
2113 goto err_free_desc_tbl;
2115 for(i=0; i<iadev->num_vc; i++)
2117 memset((caddr_t)vc, 0, sizeof(*vc));
2118 memset((caddr_t)evc, 0, sizeof(*evc));
2119 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2121 if (!iadev->testTable[i])
2122 goto err_free_test_tables;
2123 iadev->testTable[i]->lastTime = 0;
2124 iadev->testTable[i]->fract = 0;
2125 iadev->testTable[i]->vc_status = VC_UBR;
2130 /* Other Initialization */
2132 /* Max Rate Register */
2133 if (iadev->phy_type & FE_25MBIT_PHY) {
2134 writew(RATE25, iadev->seg_reg+MAXRATE);
2135 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2138 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2139 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2141 /* Set Idle Header Reigisters to be sure */
2142 writew(0, iadev->seg_reg+IDLEHEADHI);
2143 writew(0, iadev->seg_reg+IDLEHEADLO);
2145 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2146 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2148 iadev->close_pending = 0;
2149 init_waitqueue_head(&iadev->close_wait);
2150 init_waitqueue_head(&iadev->timeout_wait);
2151 skb_queue_head_init(&iadev->tx_dma_q);
2152 ia_init_rtn_q(&iadev->tx_return_q);
2154 /* RM Cell Protocol ID and Message Type */
2155 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2156 skb_queue_head_init (&iadev->tx_backlog);
2158 /* Mode Register 1 */
2159 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2161 /* Mode Register 0 */
2162 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2164 /* Interrupt Status Register - read to clear */
2165 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2167 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2168 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2169 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2170 iadev->tx_pkt_cnt = 0;
2171 iadev->rate_limit = iadev->LineRate / 3;
2175 err_free_test_tables:
2177 kfree(iadev->testTable[i]);
2178 kfree(iadev->testTable);
2180 kfree(iadev->desc_tbl);
2181 err_free_all_tx_bufs:
2182 i = iadev->num_tx_desc;
2185 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2187 pci_unmap_single(iadev->pci, desc->dma_addr,
2188 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2191 kfree(iadev->tx_buf);
2193 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2199 static irqreturn_t ia_int(int irq, void *dev_id, struct pt_regs *regs)
2201 struct atm_dev *dev;
2203 unsigned int status;
2207 iadev = INPH_IA_DEV(dev);
2208 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2211 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2212 if (status & STAT_REASSINT)
2215 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2218 if (status & STAT_DLERINT)
2220 /* Clear this bit by writing a 1 to it. */
2221 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2224 if (status & STAT_SEGINT)
2227 IF_EVENT(printk("IA: tx_intr \n");)
2230 if (status & STAT_DLETINT)
2232 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2235 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2237 if (status & STAT_FEINT)
2238 IaFrontEndIntr(iadev);
2241 return IRQ_RETVAL(handled);
2246 /*----------------------------- entries --------------------------------*/
2247 static int get_esi(struct atm_dev *dev)
2254 iadev = INPH_IA_DEV(dev);
2255 mac1 = cpu_to_be32(le32_to_cpu(readl(
2256 iadev->reg+IPHASE5575_MAC1)));
2257 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2258 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2259 for (i=0; i<MAC1_LEN; i++)
2260 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2262 for (i=0; i<MAC2_LEN; i++)
2263 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2267 static int reset_sar(struct atm_dev *dev)
2271 unsigned int pci[64];
2273 iadev = INPH_IA_DEV(dev);
2275 if ((error = pci_read_config_dword(iadev->pci,
2276 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2278 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2280 if ((error = pci_write_config_dword(iadev->pci,
2281 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2288 static int __init ia_init(struct atm_dev *dev)
2291 unsigned long real_base, base;
2292 unsigned short command;
2293 unsigned char revision;
2296 /* The device has been identified and registered. Now we read
2297 necessary configuration info like memory base address,
2298 interrupt number etc */
2300 IF_INIT(printk(">ia_init\n");)
2301 dev->ci_range.vpi_bits = 0;
2302 dev->ci_range.vci_bits = NR_VCI_LD;
2304 iadev = INPH_IA_DEV(dev);
2305 real_base = pci_resource_start (iadev->pci, 0);
2306 iadev->irq = iadev->pci->irq;
2308 if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))
2309 || (error = pci_read_config_byte(iadev->pci,
2310 PCI_REVISION_ID,&revision)))
2312 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2316 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2317 dev->number, revision, real_base, iadev->irq);)
2319 /* find mapping size of board */
2321 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2323 if (iadev->pci_map_size == 0x100000){
2324 iadev->num_vc = 4096;
2325 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2328 else if (iadev->pci_map_size == 0x40000) {
2329 iadev->num_vc = 1024;
2333 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2336 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2338 /* enable bus mastering */
2339 pci_set_master(iadev->pci);
2342 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2346 /* mapping the physical address to a virtual address in address space */
2347 base=(unsigned long)ioremap((unsigned long)real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2351 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2355 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=0x%lx,irq=%d\n",
2356 dev->number, revision, base, iadev->irq);)
2358 /* filling the iphase dev structure */
2359 iadev->mem = iadev->pci_map_size /2;
2360 iadev->base_diff = real_base - base;
2361 iadev->real_base = real_base;
2364 /* Bus Interface Control Registers */
2365 iadev->reg = (u32 *) (base + REG_BASE);
2366 /* Segmentation Control Registers */
2367 iadev->seg_reg = (u32 *) (base + SEG_BASE);
2368 /* Reassembly Control Registers */
2369 iadev->reass_reg = (u32 *) (base + REASS_BASE);
2370 /* Front end/ DMA control registers */
2371 iadev->phy = (u32 *) (base + PHY_BASE);
2372 iadev->dma = (u32 *) (base + PHY_BASE);
2373 /* RAM - Segmentation RAm and Reassembly RAM */
2374 iadev->ram = (u32 *) (base + ACTUAL_RAM_BASE);
2375 iadev->seg_ram = (base + ACTUAL_SEG_RAM_BASE);
2376 iadev->reass_ram = (base + ACTUAL_REASS_RAM_BASE);
2378 /* lets print out the above */
2379 IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n",
2380 (u32)iadev->reg,(u32)iadev->seg_reg,(u32)iadev->reass_reg,
2381 (u32)iadev->phy, (u32)iadev->ram, (u32)iadev->seg_ram,
2382 (u32)iadev->reass_ram);)
2384 /* lets try reading the MAC address */
2385 error = get_esi(dev);
2387 iounmap((void *) iadev->base);
2391 for (i=0; i < ESI_LEN; i++)
2392 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2396 if (reset_sar(dev)) {
2397 iounmap((void *) iadev->base);
2398 printk("IA: reset SAR fail, please try again\n");
2404 static void ia_update_stats(IADEV *iadev) {
2405 if (!iadev->carrier_detect)
2407 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2408 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2409 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2410 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2411 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2412 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2416 static void ia_led_timer(unsigned long arg) {
2417 unsigned long flags;
2418 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2420 static u32 ctrl_reg;
2421 for (i = 0; i < iadev_count; i++) {
2423 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2424 if (blinking[i] == 0) {
2426 ctrl_reg &= (~CTRL_LED);
2427 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2428 ia_update_stats(ia_dev[i]);
2432 ctrl_reg |= CTRL_LED;
2433 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2434 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2435 if (ia_dev[i]->close_pending)
2436 wake_up(&ia_dev[i]->close_wait);
2437 ia_tx_poll(ia_dev[i]);
2438 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2442 mod_timer(&ia_timer, jiffies + HZ / 4);
2446 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2449 writel(value, INPH_IA_DEV(dev)->phy+addr);
2452 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2454 return readl(INPH_IA_DEV(dev)->phy+addr);
2457 static void ia_free_tx(IADEV *iadev)
2461 kfree(iadev->desc_tbl);
2462 for (i = 0; i < iadev->num_vc; i++)
2463 kfree(iadev->testTable[i]);
2464 kfree(iadev->testTable);
2465 for (i = 0; i < iadev->num_tx_desc; i++) {
2466 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2468 pci_unmap_single(iadev->pci, desc->dma_addr,
2469 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2472 kfree(iadev->tx_buf);
2473 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2477 static void ia_free_rx(IADEV *iadev)
2479 kfree(iadev->rx_open);
2480 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2484 static int __init ia_start(struct atm_dev *dev)
2490 IF_EVENT(printk(">ia_start\n");)
2491 iadev = INPH_IA_DEV(dev);
2492 if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {
2493 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2494 dev->number, iadev->irq);
2498 /* @@@ should release IRQ on error */
2499 /* enabling memory + master */
2500 if ((error = pci_write_config_word(iadev->pci,
2502 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2504 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2505 "master (0x%x)\n",dev->number, error);
2511 /* Maybe we should reset the front end, initialize Bus Interface Control
2512 Registers and see. */
2514 IF_INIT(printk("Bus ctrl reg: %08x\n",
2515 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2516 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2517 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2525 | CTRL_DLETMASK /* shud be removed l8r */
2532 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2534 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2535 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2536 printk("Bus status reg after init: %08x\n",
2537 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2540 error = tx_init(dev);
2543 error = rx_init(dev);
2547 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2548 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2549 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2550 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2551 phy = 0; /* resolve compiler complaint */
2553 if ((phy=ia_phy_get(dev,0)) == 0x30)
2554 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2556 printk("IA: utopia,rev.%0x\n",phy);)
2558 if (iadev->phy_type & FE_25MBIT_PHY)
2559 ia_mb25_init(iadev);
2560 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2561 ia_suni_pm7345_init(iadev);
2563 error = suni_init(dev);
2567 * Enable interrupt on loss of signal
2568 * SUNI_RSOP_CIE - 0x10
2569 * SUNI_RSOP_CIE_LOSE - 0x04
2571 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2573 error = dev->phy->start(dev);
2577 /* Get iadev->carrier_detect status */
2578 IaFrontEndIntr(iadev);
2587 free_irq(iadev->irq, dev);
2592 static void ia_close(struct atm_vcc *vcc)
2596 struct ia_vcc *ia_vcc;
2597 struct sk_buff *skb = NULL;
2598 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2599 unsigned long closetime, flags;
2602 iadev = INPH_IA_DEV(vcc->dev);
2603 ia_vcc = INPH_IA_VCC(vcc);
2604 if (!ia_vcc) return;
2606 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2607 ia_vcc->vc_desc_cnt,vcc->vci);)
2608 clear_bit(ATM_VF_READY,&vcc->flags);
2609 skb_queue_head_init (&tmp_tx_backlog);
2610 skb_queue_head_init (&tmp_vcc_backlog);
2611 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2612 iadev->close_pending++;
2613 sleep_on_timeout(&iadev->timeout_wait, 50);
2614 spin_lock_irqsave(&iadev->tx_lock, flags);
2615 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2616 if (ATM_SKB(skb)->vcc == vcc){
2617 if (vcc->pop) vcc->pop(vcc, skb);
2618 else dev_kfree_skb_any(skb);
2621 skb_queue_tail(&tmp_tx_backlog, skb);
2623 while((skb = skb_dequeue(&tmp_tx_backlog)))
2624 skb_queue_tail(&iadev->tx_backlog, skb);
2625 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2626 closetime = jiffies;
2627 ctimeout = 300000 / ia_vcc->pcr;
2630 while (ia_vcc->vc_desc_cnt > 0){
2631 if ((jiffies - closetime) >= ctimeout)
2633 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2634 sleep_on(&iadev->close_wait);
2635 spin_lock_irqsave(&iadev->tx_lock, flags);
2637 iadev->close_pending--;
2638 iadev->testTable[vcc->vci]->lastTime = 0;
2639 iadev->testTable[vcc->vci]->fract = 0;
2640 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2641 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2642 if (vcc->qos.txtp.min_pcr > 0)
2643 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2645 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2646 ia_vcc = INPH_IA_VCC(vcc);
2647 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2648 ia_cbrVc_close (vcc);
2650 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2653 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2654 // reset reass table
2655 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2656 vc_table += vcc->vci;
2657 *vc_table = NO_AAL5_PKT;
2659 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2660 vc_table += vcc->vci;
2661 *vc_table = (vcc->vci << 6) | 15;
2662 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2663 struct abr_vc_table *abr_vc_table = (struct abr_vc_table *)
2664 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2665 abr_vc_table += vcc->vci;
2666 abr_vc_table->rdf = 0x0003;
2667 abr_vc_table->air = 0x5eb1;
2669 // Drain the packets
2670 rx_dle_intr(vcc->dev);
2671 iadev->rx_open[vcc->vci] = 0;
2673 kfree(INPH_IA_VCC(vcc));
2675 INPH_IA_VCC(vcc) = NULL;
2676 clear_bit(ATM_VF_ADDR,&vcc->flags);
2680 static int ia_open(struct atm_vcc *vcc, short vpi, int vci)
2683 struct ia_vcc *ia_vcc;
2685 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2687 IF_EVENT(printk("ia: not partially allocated resources\n");)
2688 INPH_IA_VCC(vcc) = NULL;
2690 iadev = INPH_IA_DEV(vcc->dev);
2691 error = atm_find_ci(vcc, &vpi, &vci);
2694 printk("iadev: atm_find_ci returned error %d\n", error);
2699 if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
2701 IF_EVENT(printk("iphase open: unspec part\n");)
2702 set_bit(ATM_VF_ADDR,&vcc->flags);
2704 if (vcc->qos.aal != ATM_AAL5)
2706 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2707 vcc->dev->number, vcc->vpi, vcc->vci);)
2709 /* Device dependent initialization */
2710 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2711 if (!ia_vcc) return -ENOMEM;
2712 INPH_IA_VCC(vcc) = ia_vcc;
2714 if ((error = open_rx(vcc)))
2716 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2721 if ((error = open_tx(vcc)))
2723 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2728 set_bit(ATM_VF_READY,&vcc->flags);
2732 static u8 first = 1;
2734 ia_timer.expires = jiffies + 3*HZ;
2735 add_timer(&ia_timer);
2740 IF_EVENT(printk("ia open returning\n");)
2744 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2746 IF_EVENT(printk(">ia_change_qos\n");)
2750 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
2756 IF_EVENT(printk(">ia_ioctl\n");)
2757 if (cmd != IA_CMD) {
2758 if (!dev->phy->ioctl) return -EINVAL;
2759 return dev->phy->ioctl(dev,cmd,arg);
2761 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2762 board = ia_cmds.status;
2763 if ((board < 0) || (board > iadev_count))
2765 iadev = ia_dev[board];
2766 switch (ia_cmds.cmd) {
2769 switch (ia_cmds.sub_cmd) {
2771 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2776 case MEMDUMP_SEGREG:
2777 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2778 tmps = (u16 *)ia_cmds.buf;
2779 for(i=0; i<0x80; i+=2, tmps++)
2780 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2784 case MEMDUMP_REASSREG:
2785 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2786 tmps = (u16 *)ia_cmds.buf;
2787 for(i=0; i<0x80; i+=2, tmps++)
2788 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2794 ia_regs_t *regs_local;
2798 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2799 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2800 if (!regs_local) return -ENOMEM;
2801 ffL = ®s_local->ffredn;
2802 rfL = ®s_local->rfredn;
2803 /* Copy real rfred registers into the local copy */
2804 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2805 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2806 /* Copy real ffred registers into the local copy */
2807 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2808 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2810 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2815 printk("Board %d registers dumped\n", board);
2821 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2829 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2830 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2835 struct k_sonet_stats *stats;
2836 stats = &PRIV(_ia_dev[board])->sonet_stats;
2837 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2838 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2839 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2840 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2841 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2842 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2843 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2844 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2845 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2850 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2851 for (i = 1; i <= iadev->num_rx_desc; i++)
2852 free_desc(_ia_dev[board], i);
2853 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2854 iadev->reass_reg+REASS_MASK_REG);
2861 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2862 IaFrontEndIntr(iadev);
2865 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2868 IADebugFlag = ia_cmds.maddr;
2869 printk("New debug option loaded\n");
2885 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2886 void *optval, int optlen)
2888 IF_EVENT(printk(">ia_getsockopt\n");)
2892 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2893 void *optval, int optlen)
2895 IF_EVENT(printk(">ia_setsockopt\n");)
2899 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2902 struct tx_buf_desc *buf_desc_ptr;
2906 struct cpcs_trailer *trailer;
2907 struct ia_vcc *iavcc;
2909 iadev = INPH_IA_DEV(vcc->dev);
2910 iavcc = INPH_IA_VCC(vcc);
2911 if (!iavcc->txing) {
2912 printk("discard packet on closed VC\n");
2916 dev_kfree_skb_any(skb);
2920 if (skb->len > iadev->tx_buf_sz - 8) {
2921 printk("Transmit size over tx buffer size\n");
2925 dev_kfree_skb_any(skb);
2928 if ((u32)skb->data & 3) {
2929 printk("Misaligned SKB\n");
2933 dev_kfree_skb_any(skb);
2936 /* Get a descriptor number from our free descriptor queue
2937 We get the descr number from the TCQ now, since I am using
2938 the TCQ as a free buffer queue. Initially TCQ will be
2939 initialized with all the descriptors and is hence, full.
2941 desc = get_desc (iadev, iavcc);
2944 comp_code = desc >> 13;
2947 if ((desc == 0) || (desc > iadev->num_tx_desc))
2949 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2950 atomic_inc(&vcc->stats->tx);
2954 dev_kfree_skb_any(skb);
2955 return 0; /* return SUCCESS */
2960 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2964 /* remember the desc and vcc mapping */
2965 iavcc->vc_desc_cnt++;
2966 iadev->desc_tbl[desc-1].iavcc = iavcc;
2967 iadev->desc_tbl[desc-1].txskb = skb;
2968 IA_SKB_STATE(skb) = 0;
2970 iadev->ffL.tcq_rd += 2;
2971 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2972 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2973 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2975 /* Put the descriptor number in the packet ready queue
2976 and put the updated write pointer in the DLE field
2978 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2980 iadev->ffL.prq_wr += 2;
2981 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2982 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2984 /* Figure out the exact length of the packet and padding required to
2985 make it aligned on a 48 byte boundary. */
2986 total_len = skb->len + sizeof(struct cpcs_trailer);
2987 total_len = ((total_len + 47) / 48) * 48;
2988 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2990 /* Put the packet in a tx buffer */
2991 trailer = iadev->tx_buf[desc-1].cpcs;
2992 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2993 (u32)skb, (u32)skb->data, skb->len, desc);)
2994 trailer->control = 0;
2996 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2997 trailer->crc32 = 0; /* not needed - dummy bytes */
2999 /* Display the packet */
3000 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
3001 skb->len, tcnter++);
3002 xdump(skb->data, skb->len, "TX: ");
3005 /* Build the buffer descriptor */
3006 buf_desc_ptr = (struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
3007 buf_desc_ptr += desc; /* points to the corresponding entry */
3008 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
3009 /* Huh ? p.115 of users guide describes this as a read-only register */
3010 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3011 buf_desc_ptr->vc_index = vcc->vci;
3012 buf_desc_ptr->bytes = total_len;
3014 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3015 clear_lockup (vcc, iadev);
3017 /* Build the DLE structure */
3018 wr_ptr = iadev->tx_dle_q.write;
3019 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3020 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3021 skb->len, PCI_DMA_TODEVICE);
3022 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3023 buf_desc_ptr->buf_start_lo;
3024 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */
3025 wr_ptr->bytes = skb->len;
3027 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3028 if ((wr_ptr->bytes >> 2) == 0xb)
3029 wr_ptr->bytes = 0x30;
3031 wr_ptr->mode = TX_DLE_PSI;
3032 wr_ptr->prq_wr_ptr_data = 0;
3034 /* end is not to be used for the DLE q */
3035 if (++wr_ptr == iadev->tx_dle_q.end)
3036 wr_ptr = iadev->tx_dle_q.start;
3038 /* Build trailer dle */
3039 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3040 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3041 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3043 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3044 wr_ptr->mode = DMA_INT_ENABLE;
3045 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3047 /* end is not to be used for the DLE q */
3048 if (++wr_ptr == iadev->tx_dle_q.end)
3049 wr_ptr = iadev->tx_dle_q.start;
3051 iadev->tx_dle_q.write = wr_ptr;
3052 ATM_DESC(skb) = vcc->vci;
3053 skb_queue_tail(&iadev->tx_dma_q, skb);
3055 atomic_inc(&vcc->stats->tx);
3056 iadev->tx_pkt_cnt++;
3057 /* Increment transaction counter */
3058 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3061 /* add flow control logic */
3062 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3063 if (iavcc->vc_desc_cnt > 10) {
3064 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3065 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3066 iavcc->flow_inc = -1;
3067 iavcc->saved_tx_quota = vcc->tx_quota;
3068 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3069 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3070 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3071 iavcc->flow_inc = 0;
3075 IF_TX(printk("ia send done\n");)
3079 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3082 struct ia_vcc *iavcc;
3083 unsigned long flags;
3085 iadev = INPH_IA_DEV(vcc->dev);
3086 iavcc = INPH_IA_VCC(vcc);
3087 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3090 printk(KERN_CRIT "null skb in ia_send\n");
3091 else dev_kfree_skb_any(skb);
3094 spin_lock_irqsave(&iadev->tx_lock, flags);
3095 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3096 dev_kfree_skb_any(skb);
3097 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3100 ATM_SKB(skb)->vcc = vcc;
3102 if (skb_peek(&iadev->tx_backlog)) {
3103 skb_queue_tail(&iadev->tx_backlog, skb);
3106 if (ia_pkt_tx (vcc, skb)) {
3107 skb_queue_tail(&iadev->tx_backlog, skb);
3110 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3115 static int ia_sg_send(struct atm_vcc *vcc, unsigned long start,
3118 IF_EVENT(printk(">ia_sg_send\n");)
3123 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3127 IADEV *iadev = INPH_IA_DEV(dev);
3129 if (iadev->phy_type == FE_25MBIT_PHY) {
3130 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3133 if (iadev->phy_type == FE_DS3_PHY)
3134 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3135 else if (iadev->phy_type == FE_E3_PHY)
3136 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3137 else if (iadev->phy_type == FE_UTP_OPTION)
3138 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3140 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3142 if (iadev->pci_map_size == 0x40000)
3143 n += sprintf(tmpPtr, "-1KVC-");
3145 n += sprintf(tmpPtr, "-4KVC-");
3147 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3148 n += sprintf(tmpPtr, "1M \n");
3149 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3150 n += sprintf(tmpPtr, "512K\n");
3152 n += sprintf(tmpPtr, "128K\n");
3156 return sprintf(page, " Number of Tx Buffer: %u\n"
3157 " Size of Tx Buffer : %u\n"
3158 " Number of Rx Buffer: %u\n"
3159 " Size of Rx Buffer : %u\n"
3160 " Packets Receiverd : %u\n"
3161 " Packets Transmitted: %u\n"
3162 " Cells Received : %u\n"
3163 " Cells Transmitted : %u\n"
3164 " Board Dropped Cells: %u\n"
3165 " Board Dropped Pkts : %u\n",
3166 iadev->num_tx_desc, iadev->tx_buf_sz,
3167 iadev->num_rx_desc, iadev->rx_buf_sz,
3168 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3169 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3170 iadev->drop_rxcell, iadev->drop_rxpkt);
3175 static const struct atmdev_ops ops = {
3179 .getsockopt = ia_getsockopt,
3180 .setsockopt = ia_setsockopt,
3182 .sg_send = ia_sg_send,
3183 .phy_put = ia_phy_put,
3184 .phy_get = ia_phy_get,
3185 .change_qos = ia_change_qos,
3186 .proc_read = ia_proc_read,
3187 .owner = THIS_MODULE,
3190 static int __devinit ia_init_one(struct pci_dev *pdev,
3191 const struct pci_device_id *ent)
3193 struct atm_dev *dev;
3195 unsigned long flags;
3198 iadev = kmalloc(sizeof(*iadev), GFP_KERNEL);
3203 memset(iadev, 0, sizeof(*iadev));
3206 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3207 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3208 if (pci_enable_device(pdev)) {
3210 goto err_out_free_iadev;
3212 dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3215 goto err_out_disable_dev;
3217 INPH_IA_DEV(dev) = iadev;
3218 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3219 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3222 ia_dev[iadev_count] = iadev;
3223 _ia_dev[iadev_count] = dev;
3225 spin_lock_init(&iadev->misc_lock);
3226 /* First fixes first. I don't want to think about this now. */
3227 spin_lock_irqsave(&iadev->misc_lock, flags);
3228 if (ia_init(dev) || ia_start(dev)) {
3229 IF_INIT(printk("IA register failed!\n");)
3231 ia_dev[iadev_count] = NULL;
3232 _ia_dev[iadev_count] = NULL;
3233 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3235 goto err_out_deregister_dev;
3237 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3238 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3240 iadev->next_board = ia_boards;
3243 pci_set_drvdata(pdev, dev);
3247 err_out_deregister_dev:
3248 atm_dev_deregister(dev);
3249 err_out_disable_dev:
3250 pci_disable_device(pdev);
3257 static void __devexit ia_remove_one(struct pci_dev *pdev)
3259 struct atm_dev *dev = pci_get_drvdata(pdev);
3260 IADEV *iadev = INPH_IA_DEV(dev);
3262 ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10);
3265 /* De-register device */
3266 free_irq(iadev->irq, dev);
3268 ia_dev[iadev_count] = NULL;
3269 _ia_dev[iadev_count] = NULL;
3270 atm_dev_deregister(dev);
3271 IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3273 iounmap((void *) iadev->base);
3274 pci_disable_device(pdev);
3282 static struct pci_device_id ia_pci_tbl[] = {
3283 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3284 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3287 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3289 static struct pci_driver ia_driver = {
3291 .id_table = ia_pci_tbl,
3292 .probe = ia_init_one,
3293 .remove = __devexit_p(ia_remove_one),
3296 static int __init ia_module_init(void)
3300 ret = pci_module_init(&ia_driver);
3302 ia_timer.expires = jiffies + 3*HZ;
3303 add_timer(&ia_timer);
3305 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3309 static void __exit ia_module_exit(void)
3311 pci_unregister_driver(&ia_driver);
3313 del_timer(&ia_timer);
3316 module_init(ia_module_init);
3317 module_exit(ia_module_exit);