commented early_printk patch because of rejects.
[linux-flexiantxendom0-3.2.10.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/version.h>
44 #include <linux/module.h>  
45 #include <linux/kernel.h>  
46 #include <linux/mm.h>  
47 #include <linux/pci.h>  
48 #include <linux/errno.h>  
49 #include <linux/atm.h>  
50 #include <linux/atmdev.h>  
51 #include <linux/sonet.h>  
52 #include <linux/skbuff.h>  
53 #include <linux/time.h>  
54 #include <linux/delay.h>  
55 #include <linux/uio.h>  
56 #include <linux/init.h>  
57 #include <asm/system.h>  
58 #include <asm/io.h>  
59 #include <asm/atomic.h>  
60 #include <asm/uaccess.h>  
61 #include <asm/string.h>  
62 #include <asm/byteorder.h>  
63 #include <linux/vmalloc.h>  
64 #include "iphase.h"               
65 #include "suni.h"                 
66 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
67 struct suni_priv {
68         struct k_sonet_stats sonet_stats; /* link diagnostics */
69         unsigned char loop_mode;        /* loopback mode */
70         struct atm_dev *dev;            /* device back-pointer */
71         struct suni_priv *next;         /* next SUNI */
72 }; 
73 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
74
75 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
76
77 static IADEV *ia_dev[8];
78 static struct atm_dev *_ia_dev[8];
79 static int iadev_count;
80 static void ia_led_timer(unsigned long arg);
81 static struct timer_list ia_timer = TIMER_INITIALIZER(ia_led_timer, 0, 0);
82 struct atm_vcc *vcc_close_que[100];
83 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
84 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
85 static u32 IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
86             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
87
88 MODULE_PARM(IA_TX_BUF, "i");
89 MODULE_PARM(IA_TX_BUF_SZ, "i");
90 MODULE_PARM(IA_RX_BUF, "i");
91 MODULE_PARM(IA_RX_BUF_SZ, "i");
92 MODULE_PARM(IADebugFlag, "i");
93
94 MODULE_LICENSE("GPL");
95
96 #if BITS_PER_LONG != 32
97 #  error FIXME: this driver only works on 32-bit platforms
98 #endif
99
100 /**************************** IA_LIB **********************************/
101
102 static void ia_init_rtn_q (IARTN_Q *que) 
103
104    que->next = NULL; 
105    que->tail = NULL; 
106 }
107
108 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
109 {
110    data->next = NULL;
111    if (que->next == NULL) 
112       que->next = que->tail = data;
113    else {
114       data->next = que->next;
115       que->next = data;
116    } 
117    return;
118 }
119
120 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
121    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122    if (!entry) return -1;
123    entry->data = data;
124    entry->next = NULL;
125    if (que->next == NULL) 
126       que->next = que->tail = entry;
127    else {
128       que->tail->next = entry;
129       que->tail = que->tail->next;
130    }      
131    return 1;
132 }
133
134 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
135    IARTN_Q *tmpdata;
136    if (que->next == NULL)
137       return NULL;
138    tmpdata = que->next;
139    if ( que->next == que->tail)  
140       que->next = que->tail = NULL;
141    else 
142       que->next = que->next->next;
143    return tmpdata;
144 }
145
146 static void ia_hack_tcq(IADEV *dev) {
147
148   u_short               desc1;
149   u_short               tcq_wr;
150   struct ia_vcc         *iavcc_r = NULL; 
151   extern void desc_dbg(IADEV *iadev);
152
153   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
154   while (dev->host_tcq_wr != tcq_wr) {
155      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
156      if (!desc1) ;
157      else if (!dev->desc_tbl[desc1 -1].timestamp) {
158         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
159         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
160      }                                 
161      else if (dev->desc_tbl[desc1 -1].timestamp) {
162         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
163            printk("IA: Fatal err in get_desc\n");
164            continue;
165         }
166         iavcc_r->vc_desc_cnt--;
167         dev->desc_tbl[desc1 -1].timestamp = 0;
168         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
169                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
170         if (iavcc_r->pcr < dev->rate_limit) {
171            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
172            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
173               printk("ia_hack_tcq: No memory available\n");
174         } 
175         dev->desc_tbl[desc1 -1].iavcc = NULL;
176         dev->desc_tbl[desc1 -1].txskb = NULL;
177      }
178      dev->host_tcq_wr += 2;
179      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
180         dev->host_tcq_wr = dev->ffL.tcq_st;
181   }
182 } /* ia_hack_tcq */
183
184 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
185   u_short               desc_num, i;
186   struct sk_buff        *skb;
187   struct ia_vcc         *iavcc_r = NULL; 
188   unsigned long delta;
189   static unsigned long timer = 0;
190   int ltimeout;
191   extern void desc_dbg(IADEV *iadev);
192
193   ia_hack_tcq (dev);
194   if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){      
195      timer = jiffies; 
196      i=0;
197      while (i < dev->num_tx_desc) {
198         if (!dev->desc_tbl[i].timestamp) {
199            i++;
200            continue;
201         }
202         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
203         delta = jiffies - dev->desc_tbl[i].timestamp;
204         if (delta >= ltimeout) {
205            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
206            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
207               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
208            else 
209               dev->ffL.tcq_rd -= 2;
210            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
211            if (!(skb = dev->desc_tbl[i].txskb) || 
212                           !(iavcc_r = dev->desc_tbl[i].iavcc))
213               printk("Fatal err, desc table vcc or skb is NULL\n");
214            else 
215               iavcc_r->vc_desc_cnt--;
216            dev->desc_tbl[i].timestamp = 0;
217            dev->desc_tbl[i].iavcc = NULL;
218            dev->desc_tbl[i].txskb = NULL;
219         }
220         i++;
221      } /* while */
222   }
223   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
224      return 0xFFFF;
225     
226   /* Get the next available descriptor number from TCQ */
227   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
228
229   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
230      dev->ffL.tcq_rd += 2;
231      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
232      dev->ffL.tcq_rd = dev->ffL.tcq_st;
233      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
234         return 0xFFFF; 
235      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
236   }
237
238   /* get system time */
239   dev->desc_tbl[desc_num -1].timestamp = jiffies;
240   return desc_num;
241 }
242
243 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
244   u_char                foundLockUp;
245   vcstatus_t            *vcstatus;
246   u_short               *shd_tbl;
247   u_short               tempCellSlot, tempFract;
248   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
249   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
250   u_int  i;
251
252   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
253      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
254      vcstatus->cnt++;
255      foundLockUp = 0;
256      if( vcstatus->cnt == 0x05 ) {
257         abr_vc += vcc->vci;
258         eabr_vc += vcc->vci;
259         if( eabr_vc->last_desc ) {
260            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
261               /* Wait for 10 Micro sec */
262               udelay(10);
263               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
264                  foundLockUp = 1;
265            }
266            else {
267               tempCellSlot = abr_vc->last_cell_slot;
268               tempFract    = abr_vc->fraction;
269               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
270                          && (tempFract == dev->testTable[vcc->vci]->fract))
271                  foundLockUp = 1;                   
272               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
273               dev->testTable[vcc->vci]->fract = tempFract; 
274            }        
275         } /* last descriptor */            
276         vcstatus->cnt = 0;      
277      } /* vcstatus->cnt */
278         
279      if (foundLockUp) {
280         IF_ABR(printk("LOCK UP found\n");) 
281         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
282         /* Wait for 10 Micro sec */
283         udelay(10); 
284         abr_vc->status &= 0xFFF8;
285         abr_vc->status |= 0x0001;  /* state is idle */
286         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
287         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
288         if (i < dev->num_vc)
289            shd_tbl[i] = vcc->vci;
290         else
291            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
292         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
293         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
294         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
295         vcstatus->cnt = 0;
296      } /* foundLockUp */
297
298   } /* if an ABR VC */
299
300
301 }
302  
303 /*
304 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
305 **
306 **  +----+----+------------------+-------------------------------+
307 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
308 **  +----+----+------------------+-------------------------------+
309 ** 
310 **    R = reserverd (written as 0)
311 **    NZ = 0 if 0 cells/sec; 1 otherwise
312 **
313 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
314 */
315 static u16
316 cellrate_to_float(u32 cr)
317 {
318
319 #define NZ              0x4000
320 #define M_BITS          9               /* Number of bits in mantissa */
321 #define E_BITS          5               /* Number of bits in exponent */
322 #define M_MASK          0x1ff           
323 #define E_MASK          0x1f
324   u16   flot;
325   u32   tmp = cr & 0x00ffffff;
326   int   i   = 0;
327   if (cr == 0)
328      return 0;
329   while (tmp != 1) {
330      tmp >>= 1;
331      i++;
332   }
333   if (i == M_BITS)
334      flot = NZ | (i << M_BITS) | (cr & M_MASK);
335   else if (i < M_BITS)
336      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
337   else
338      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
339   return flot;
340 }
341
342 #if 0
343 /*
344 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
345 */
346 static u32
347 float_to_cellrate(u16 rate)
348 {
349   u32   exp, mantissa, cps;
350   if ((rate & NZ) == 0)
351      return 0;
352   exp = (rate >> M_BITS) & E_MASK;
353   mantissa = rate & M_MASK;
354   if (exp == 0)
355      return 1;
356   cps = (1 << M_BITS) | mantissa;
357   if (exp == M_BITS)
358      cps = cps;
359   else if (exp > M_BITS)
360      cps <<= (exp - M_BITS);
361   else
362      cps >>= (M_BITS - exp);
363   return cps;
364 }
365 #endif 
366
367 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
368   srv_p->class_type = ATM_ABR;
369   srv_p->pcr        = dev->LineRate;
370   srv_p->mcr        = 0;
371   srv_p->icr        = 0x055cb7;
372   srv_p->tbe        = 0xffffff;
373   srv_p->frtt       = 0x3a;
374   srv_p->rif        = 0xf;
375   srv_p->rdf        = 0xb;
376   srv_p->nrm        = 0x4;
377   srv_p->trm        = 0x7;
378   srv_p->cdf        = 0x3;
379   srv_p->adtf       = 50;
380 }
381
382 static int
383 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
384                                                 struct atm_vcc *vcc, u8 flag)
385 {
386   f_vc_abr_entry  *f_abr_vc;
387   r_vc_abr_entry  *r_abr_vc;
388   u32           icr;
389   u8            trm, nrm, crm;
390   u16           adtf, air, *ptr16;      
391   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
392   f_abr_vc += vcc->vci;       
393   switch (flag) {
394      case 1: /* FFRED initialization */
395 #if 0  /* sanity check */
396        if (srv_p->pcr == 0)
397           return INVALID_PCR;
398        if (srv_p->pcr > dev->LineRate)
399           srv_p->pcr = dev->LineRate;
400        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
401           return MCR_UNAVAILABLE;
402        if (srv_p->mcr > srv_p->pcr)
403           return INVALID_MCR;
404        if (!(srv_p->icr))
405           srv_p->icr = srv_p->pcr;
406        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
407           return INVALID_ICR;
408        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
409           return INVALID_TBE;
410        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
411           return INVALID_FRTT;
412        if (srv_p->nrm > MAX_NRM)
413           return INVALID_NRM;
414        if (srv_p->trm > MAX_TRM)
415           return INVALID_TRM;
416        if (srv_p->adtf > MAX_ADTF)
417           return INVALID_ADTF;
418        else if (srv_p->adtf == 0)
419           srv_p->adtf = 1;
420        if (srv_p->cdf > MAX_CDF)
421           return INVALID_CDF;
422        if (srv_p->rif > MAX_RIF)
423           return INVALID_RIF;
424        if (srv_p->rdf > MAX_RDF)
425           return INVALID_RDF;
426 #endif
427        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
428        f_abr_vc->f_vc_type = ABR;
429        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
430                                   /* i.e 2**n = 2 << (n-1) */
431        f_abr_vc->f_nrm = nrm << 8 | nrm;
432        trm = 100000/(2 << (16 - srv_p->trm));
433        if ( trm == 0) trm = 1;
434        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
435        crm = srv_p->tbe / nrm;
436        if (crm == 0) crm = 1;
437        f_abr_vc->f_crm = crm & 0xff;
438        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
439        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
440                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
441                                 (1000000/(srv_p->frtt/srv_p->tbe)));
442        f_abr_vc->f_icr = cellrate_to_float(icr);
443        adtf = (10000 * srv_p->adtf)/8192;
444        if (adtf == 0) adtf = 1; 
445        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
446        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
447        f_abr_vc->f_acr = f_abr_vc->f_icr;
448        f_abr_vc->f_status = 0x0042;
449        break;
450     case 0: /* RFRED initialization */  
451        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
452        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
453        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
454        r_abr_vc += vcc->vci;
455        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
456        air = srv_p->pcr << (15 - srv_p->rif);
457        if (air == 0) air = 1;
458        r_abr_vc->r_air = cellrate_to_float(air);
459        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
460        dev->sum_mcr        += srv_p->mcr;
461        dev->n_abr++;
462        break;
463     default:
464        break;
465   }
466   return        0;
467 }
468 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
469    u32 rateLow=0, rateHigh, rate;
470    int entries;
471    struct ia_vcc *ia_vcc;
472
473    int   idealSlot =0, testSlot, toBeAssigned, inc;
474    u32   spacing;
475    u16  *SchedTbl, *TstSchedTbl;
476    u16  cbrVC, vcIndex;
477    u32   fracSlot    = 0;
478    u32   sp_mod      = 0;
479    u32   sp_mod2     = 0;
480
481    /* IpAdjustTrafficParams */
482    if (vcc->qos.txtp.max_pcr <= 0) {
483       IF_ERR(printk("PCR for CBR not defined\n");)
484       return -1;
485    }
486    rate = vcc->qos.txtp.max_pcr;
487    entries = rate / dev->Granularity;
488    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
489                                 entries, rate, dev->Granularity);)
490    if (entries < 1)
491       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
492    rateLow  =  entries * dev->Granularity;
493    rateHigh = (entries + 1) * dev->Granularity;
494    if (3*(rate - rateLow) > (rateHigh - rate))
495       entries++;
496    if (entries > dev->CbrRemEntries) {
497       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
498       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
499                                        entries, dev->CbrRemEntries);)
500       return -EBUSY;
501    }   
502
503    ia_vcc = INPH_IA_VCC(vcc);
504    ia_vcc->NumCbrEntry = entries; 
505    dev->sum_mcr += entries * dev->Granularity; 
506    /* IaFFrednInsertCbrSched */
507    // Starting at an arbitrary location, place the entries into the table
508    // as smoothly as possible
509    cbrVC   = 0;
510    spacing = dev->CbrTotEntries / entries;
511    sp_mod  = dev->CbrTotEntries % entries; // get modulo
512    toBeAssigned = entries;
513    fracSlot = 0;
514    vcIndex  = vcc->vci;
515    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
516    while (toBeAssigned)
517    {
518       // If this is the first time, start the table loading for this connection
519       // as close to entryPoint as possible.
520       if (toBeAssigned == entries)
521       {
522          idealSlot = dev->CbrEntryPt;
523          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
524          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
525             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
526       } else {
527          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
528          // in the table that would be  smoothest
529          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
530          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
531       }
532       if (idealSlot >= (int)dev->CbrTotEntries) 
533          idealSlot -= dev->CbrTotEntries;  
534       // Continuously check around this ideal value until a null
535       // location is encountered.
536       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
537       inc = 0;
538       testSlot = idealSlot;
539       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
540       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
541                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
542       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
543       while (cbrVC)  // If another VC at this location, we have to keep looking
544       {
545           inc++;
546           testSlot = idealSlot - inc;
547           if (testSlot < 0) { // Wrap if necessary
548              testSlot += dev->CbrTotEntries;
549              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
550                                                        (u32)SchedTbl,testSlot);)
551           }
552           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
553           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
554           if (!cbrVC)
555              break;
556           testSlot = idealSlot + inc;
557           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
558              testSlot -= dev->CbrTotEntries;
559              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
560              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
561                                             testSlot, toBeAssigned);)
562           } 
563           // set table index and read in value
564           TstSchedTbl = (u16*)(SchedTbl + testSlot);
565           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
566                           (u32)TstSchedTbl,cbrVC,inc);) 
567           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
568        } /* while */
569        // Move this VCI number into this location of the CBR Sched table.
570        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
571        dev->CbrRemEntries--;
572        toBeAssigned--;
573    } /* while */ 
574
575    /* IaFFrednCbrEnable */
576    dev->NumEnabledCBR++;
577    if (dev->NumEnabledCBR == 1) {
578        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
579        IF_CBR(printk("CBR is enabled\n");)
580    }
581    return 0;
582 }
583 static void ia_cbrVc_close (struct atm_vcc *vcc) {
584    IADEV *iadev;
585    u16 *SchedTbl, NullVci = 0;
586    u32 i, NumFound;
587
588    iadev = INPH_IA_DEV(vcc->dev);
589    iadev->NumEnabledCBR--;
590    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
591    if (iadev->NumEnabledCBR == 0) {
592       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
593       IF_CBR (printk("CBR support disabled\n");)
594    }
595    NumFound = 0;
596    for (i=0; i < iadev->CbrTotEntries; i++)
597    {
598       if (*SchedTbl == vcc->vci) {
599          iadev->CbrRemEntries++;
600          *SchedTbl = NullVci;
601          IF_CBR(NumFound++;)
602       }
603       SchedTbl++;   
604    } 
605    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
606 }
607
608 static int ia_avail_descs(IADEV *iadev) {
609    int tmp = 0;
610    ia_hack_tcq(iadev);
611    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
612       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
613    else
614       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
615                    iadev->ffL.tcq_st) / 2;
616    return tmp;
617 }    
618
619 static int ia_que_tx (IADEV *iadev) { 
620    struct sk_buff *skb;
621    int num_desc;
622    struct atm_vcc *vcc;
623    struct ia_vcc *iavcc;
624    static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
625    num_desc = ia_avail_descs(iadev);
626
627    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
628       if (!(vcc = ATM_SKB(skb)->vcc)) {
629          dev_kfree_skb_any(skb);
630          printk("ia_que_tx: Null vcc\n");
631          break;
632       }
633       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
634          dev_kfree_skb_any(skb);
635          printk("Free the SKB on closed vci %d \n", vcc->vci);
636          break;
637       }
638       iavcc = INPH_IA_VCC(vcc);
639       if (ia_pkt_tx (vcc, skb)) {
640          skb_queue_head(&iadev->tx_backlog, skb);
641       }
642       num_desc--;
643    }
644    return 0;
645 }
646
647 void ia_tx_poll (IADEV *iadev) {
648    struct atm_vcc *vcc = NULL;
649    struct sk_buff *skb = NULL, *skb1 = NULL;
650    struct ia_vcc *iavcc;
651    IARTN_Q *  rtne;
652
653    ia_hack_tcq(iadev);
654    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
655        skb = rtne->data.txskb;
656        if (!skb) {
657            printk("ia_tx_poll: skb is null\n");
658            goto out;
659        }
660        vcc = ATM_SKB(skb)->vcc;
661        if (!vcc) {
662            printk("ia_tx_poll: vcc is null\n");
663            dev_kfree_skb_any(skb);
664            goto out;
665        }
666
667        iavcc = INPH_IA_VCC(vcc);
668        if (!iavcc) {
669            printk("ia_tx_poll: iavcc is null\n");
670            dev_kfree_skb_any(skb);
671            goto out;
672        }
673
674        skb1 = skb_dequeue(&iavcc->txing_skb);
675        while (skb1 && (skb1 != skb)) {
676           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
677              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
678           }
679           IF_ERR(printk("Release the SKB not match\n");)
680           if ((vcc->pop) && (skb1->len != 0))
681           {
682              vcc->pop(vcc, skb1);
683              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
684                                                           (long)skb1);)
685           }
686           else 
687              dev_kfree_skb_any(skb1);
688           skb1 = skb_dequeue(&iavcc->txing_skb);
689        }                                                        
690        if (!skb1) {
691           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
692           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
693           break;
694        }
695        if ((vcc->pop) && (skb->len != 0))
696        {
697           vcc->pop(vcc, skb);
698           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
699        }
700        else 
701           dev_kfree_skb_any(skb);
702        kfree(rtne);
703     }
704     ia_que_tx(iadev);
705 out:
706     return;
707 }
708 #if 0
709 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
710 {
711         u32     t;
712         int     i;
713         /*
714          * Issue a command to enable writes to the NOVRAM
715          */
716         NVRAM_CMD (EXTEND + EWEN);
717         NVRAM_CLR_CE;
718         /*
719          * issue the write command
720          */
721         NVRAM_CMD(IAWRITE + addr);
722         /* 
723          * Send the data, starting with D15, then D14, and so on for 16 bits
724          */
725         for (i=15; i>=0; i--) {
726                 NVRAM_CLKOUT (val & 0x8000);
727                 val <<= 1;
728         }
729         NVRAM_CLR_CE;
730         CFG_OR(NVCE);
731         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
732         while (!(t & NVDO))
733                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
734
735         NVRAM_CLR_CE;
736         /*
737          * disable writes again
738          */
739         NVRAM_CMD(EXTEND + EWDS)
740         NVRAM_CLR_CE;
741         CFG_AND(~NVDI);
742 }
743 #endif
744
745 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
746 {
747         u_short val;
748         u32     t;
749         int     i;
750         /*
751          * Read the first bit that was clocked with the falling edge of the
752          * the last command data clock
753          */
754         NVRAM_CMD(IAREAD + addr);
755         /*
756          * Now read the rest of the bits, the next bit read is D14, then D13,
757          * and so on.
758          */
759         val = 0;
760         for (i=15; i>=0; i--) {
761                 NVRAM_CLKIN(t);
762                 val |= (t << i);
763         }
764         NVRAM_CLR_CE;
765         CFG_AND(~NVDI);
766         return val;
767 }
768
769 static void ia_hw_type(IADEV *iadev) {
770    u_short memType = ia_eeprom_get(iadev, 25);   
771    iadev->memType = memType;
772    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
773       iadev->num_tx_desc = IA_TX_BUF;
774       iadev->tx_buf_sz = IA_TX_BUF_SZ;
775       iadev->num_rx_desc = IA_RX_BUF;
776       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
777    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
778       if (IA_TX_BUF == DFL_TX_BUFFERS)
779         iadev->num_tx_desc = IA_TX_BUF / 2;
780       else 
781         iadev->num_tx_desc = IA_TX_BUF;
782       iadev->tx_buf_sz = IA_TX_BUF_SZ;
783       if (IA_RX_BUF == DFL_RX_BUFFERS)
784         iadev->num_rx_desc = IA_RX_BUF / 2;
785       else
786         iadev->num_rx_desc = IA_RX_BUF;
787       iadev->rx_buf_sz = IA_RX_BUF_SZ;
788    }
789    else {
790       if (IA_TX_BUF == DFL_TX_BUFFERS) 
791         iadev->num_tx_desc = IA_TX_BUF / 8;
792       else
793         iadev->num_tx_desc = IA_TX_BUF;
794       iadev->tx_buf_sz = IA_TX_BUF_SZ;
795       if (IA_RX_BUF == DFL_RX_BUFFERS)
796         iadev->num_rx_desc = IA_RX_BUF / 8;
797       else
798         iadev->num_rx_desc = IA_RX_BUF;
799       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
800    } 
801    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
802    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
803          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
804          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
805
806 #if 0
807    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
808       iadev->phy_type = PHY_OC3C_S;
809    else if ((memType & FE_MASK) == FE_UTP_OPTION)
810       iadev->phy_type = PHY_UTP155;
811    else
812      iadev->phy_type = PHY_OC3C_M;
813 #endif
814    
815    iadev->phy_type = memType & FE_MASK;
816    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
817                                          memType,iadev->phy_type);)
818    if (iadev->phy_type == FE_25MBIT_PHY) 
819       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
820    else if (iadev->phy_type == FE_DS3_PHY)
821       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
822    else if (iadev->phy_type == FE_E3_PHY) 
823       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
824    else
825        iadev->LineRate = (u32)(ATM_OC3_PCR);
826    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
827
828 }
829
830 static void IaFrontEndIntr(IADEV *iadev) {
831   volatile IA_SUNI *suni;
832   volatile ia_mb25_t *mb25;
833   volatile suni_pm7345_t *suni_pm7345;
834   u32 intr_status;
835   u_int frmr_intr;
836
837   if(iadev->phy_type & FE_25MBIT_PHY) {
838      mb25 = (ia_mb25_t*)iadev->phy;
839      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
840   } else if (iadev->phy_type & FE_DS3_PHY) {
841      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
842      /* clear FRMR interrupts */
843      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
844      iadev->carrier_detect =  
845            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
846   } else if (iadev->phy_type & FE_E3_PHY ) {
847      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
848      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
849      iadev->carrier_detect =
850            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
851   }
852   else { 
853      suni = (IA_SUNI *)iadev->phy;
854      intr_status = suni->suni_rsop_status & 0xff;
855      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
856   }
857   if (iadev->carrier_detect)
858     printk("IA: SUNI carrier detected\n");
859   else
860     printk("IA: SUNI carrier lost signal\n"); 
861   return;
862 }
863
864 void ia_mb25_init (IADEV *iadev)
865 {
866    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
867 #if 0
868    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
869 #endif
870    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
871    mb25->mb25_diag_control = 0;
872    /*
873     * Initialize carrier detect state
874     */
875    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
876    return;
877 }                   
878
879 void ia_suni_pm7345_init (IADEV *iadev)
880 {
881    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
882    if (iadev->phy_type & FE_DS3_PHY)
883    {
884       iadev->carrier_detect = 
885           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
886       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
887       suni_pm7345->suni_ds3_frm_cfg = 1;
888       suni_pm7345->suni_ds3_tran_cfg = 1;
889       suni_pm7345->suni_config = 0;
890       suni_pm7345->suni_splr_cfg = 0;
891       suni_pm7345->suni_splt_cfg = 0;
892    }
893    else 
894    {
895       iadev->carrier_detect = 
896           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
897       suni_pm7345->suni_e3_frm_fram_options = 0x4;
898       suni_pm7345->suni_e3_frm_maint_options = 0x20;
899       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
900       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
901       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
902       suni_pm7345->suni_e3_tran_fram_options = 0x1;
903       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
904       suni_pm7345->suni_splr_cfg = 0x41;
905       suni_pm7345->suni_splt_cfg = 0x41;
906    } 
907    /*
908     * Enable RSOP loss of signal interrupt.
909     */
910    suni_pm7345->suni_intr_enbl = 0x28;
911  
912    /*
913     * Clear error counters
914     */
915    suni_pm7345->suni_id_reset = 0;
916
917    /*
918     * Clear "PMCTST" in master test register.
919     */
920    suni_pm7345->suni_master_test = 0;
921
922    suni_pm7345->suni_rxcp_ctrl = 0x2c;
923    suni_pm7345->suni_rxcp_fctrl = 0x81;
924  
925    suni_pm7345->suni_rxcp_idle_pat_h1 =
926         suni_pm7345->suni_rxcp_idle_pat_h2 =
927         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
928    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
929  
930    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
931    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
932    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
933    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
934  
935    suni_pm7345->suni_rxcp_cell_pat_h1 =
936         suni_pm7345->suni_rxcp_cell_pat_h2 =
937         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
938    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
939  
940    suni_pm7345->suni_rxcp_cell_mask_h1 =
941         suni_pm7345->suni_rxcp_cell_mask_h2 =
942         suni_pm7345->suni_rxcp_cell_mask_h3 =
943         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
944  
945    suni_pm7345->suni_txcp_ctrl = 0xa4;
946    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
947    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
948  
949    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
950                                  SUNI_PM7345_CLB |
951                                  SUNI_PM7345_DLB |
952                                   SUNI_PM7345_PLB);
953 #ifdef __SNMP__
954    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
955 #endif /* __SNMP__ */
956    return;
957 }
958
959
960 /***************************** IA_LIB END *****************************/
961     
962 /* pwang_test debug utility */
963 int tcnter = 0, rcnter = 0;
964 void xdump( u_char*  cp, int  length, char*  prefix )
965 {
966     int col, count;
967     u_char prntBuf[120];
968     u_char*  pBuf = prntBuf;
969     count = 0;
970     while(count < length){
971         pBuf += sprintf( pBuf, "%s", prefix );
972         for(col = 0;count + col < length && col < 16; col++){
973             if (col != 0 && (col % 4) == 0)
974                 pBuf += sprintf( pBuf, " " );
975             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
976         }
977         while(col++ < 16){      /* pad end of buffer with blanks */
978             if ((col % 4) == 0)
979                 sprintf( pBuf, " " );
980             pBuf += sprintf( pBuf, "   " );
981         }
982         pBuf += sprintf( pBuf, "  " );
983         for(col = 0;count + col < length && col < 16; col++){
984             if (isprint((int)cp[count + col]))
985                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
986             else
987                 pBuf += sprintf( pBuf, "." );
988                 }
989         sprintf( pBuf, "\n" );
990         // SPrint(prntBuf);
991         printk(prntBuf);
992         count += col;
993         pBuf = prntBuf;
994     }
995
996 }  /* close xdump(... */
997
998   
999 static struct atm_dev *ia_boards = NULL;  
1000   
1001 #define ACTUAL_RAM_BASE \
1002         RAM_BASE*((iadev->mem)/(128 * 1024))  
1003 #define ACTUAL_SEG_RAM_BASE \
1004         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1005 #define ACTUAL_REASS_RAM_BASE \
1006         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1007   
1008   
1009 /*-- some utilities and memory allocation stuff will come here -------------*/  
1010   
1011 void desc_dbg(IADEV *iadev) {
1012
1013   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1014   u32 tmp, i;
1015   // regval = readl((u32)ia_cmds->maddr);
1016   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1017   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1018                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1019                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1020   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1021                    iadev->ffL.tcq_rd);
1022   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1023   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1024   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1025   i = 0;
1026   while (tcq_st_ptr != tcq_ed_ptr) {
1027       tmp = iadev->seg_ram+tcq_st_ptr;
1028       printk("TCQ slot %d desc = %d  Addr = 0x%x\n", i++, readw(tmp), tmp);
1029       tcq_st_ptr += 2;
1030   }
1031   for(i=0; i <iadev->num_tx_desc; i++)
1032       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1033
1034   
1035   
1036 /*----------------------------- Recieving side stuff --------------------------*/  
1037  
1038 static void rx_excp_rcvd(struct atm_dev *dev)  
1039 {  
1040 #if 0 /* closing the receiving size will cause too many excp int */  
1041   IADEV *iadev;  
1042   u_short state;  
1043   u_short excpq_rd_ptr;  
1044   //u_short *ptr;  
1045   int vci, error = 1;  
1046   iadev = INPH_IA_DEV(dev);  
1047   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1048   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1049   { printk("state = %x \n", state); 
1050         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1051  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1052         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1053             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1054         // TODO: update exception stat
1055         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1056         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1057         // pwang_test
1058         excpq_rd_ptr += 4;  
1059         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1060             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1061         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1062         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1063   }  
1064 #endif
1065 }  
1066   
1067 static void free_desc(struct atm_dev *dev, int desc)  
1068 {  
1069         IADEV *iadev;  
1070         iadev = INPH_IA_DEV(dev);  
1071         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1072         iadev->rfL.fdq_wr +=2;
1073         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1074                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1075         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1076 }  
1077   
1078   
1079 static int rx_pkt(struct atm_dev *dev)  
1080 {  
1081         IADEV *iadev;  
1082         struct atm_vcc *vcc;  
1083         unsigned short status;  
1084         struct rx_buf_desc *buf_desc_ptr;  
1085         int desc;   
1086         struct dle* wr_ptr;  
1087         int len;  
1088         struct sk_buff *skb;  
1089         u_int buf_addr, dma_addr;  
1090
1091         iadev = INPH_IA_DEV(dev);  
1092         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1093         {  
1094             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1095             return -EINVAL;  
1096         }  
1097         /* mask 1st 3 bits to get the actual descno. */  
1098         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1099         IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1100                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1101               printk(" pcq_wr_ptr = 0x%x\n",
1102                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1103         /* update the read pointer  - maybe we shud do this in the end*/  
1104         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1105                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1106         else  
1107                 iadev->rfL.pcq_rd += 2;
1108         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1109   
1110         /* get the buffer desc entry.  
1111                 update stuff. - doesn't seem to be any update necessary  
1112         */  
1113         buf_desc_ptr = (struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1114         /* make the ptr point to the corresponding buffer desc entry */  
1115         buf_desc_ptr += desc;     
1116         if (!desc || (desc > iadev->num_rx_desc) || 
1117                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1118             free_desc(dev, desc);
1119             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1120             return -1;
1121         }
1122         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1123         if (!vcc)  
1124         {      
1125                 free_desc(dev, desc); 
1126                 printk("IA: null vcc, drop PDU\n");  
1127                 return -1;  
1128         }  
1129           
1130   
1131         /* might want to check the status bits for errors */  
1132         status = (u_short) (buf_desc_ptr->desc_mode);  
1133         if (status & (RX_CER | RX_PTE | RX_OFL))  
1134         {  
1135                 atomic_inc(&vcc->stats->rx_err);
1136                 IF_ERR(printk("IA: bad packet, dropping it");)  
1137                 if (status & RX_CER) { 
1138                     IF_ERR(printk(" cause: packet CRC error\n");)
1139                 }
1140                 else if (status & RX_PTE) {
1141                     IF_ERR(printk(" cause: packet time out\n");)
1142                 }
1143                 else {
1144                     IF_ERR(printk(" cause: buffer over flow\n");)
1145                 }
1146                 goto out_free_desc;
1147         }  
1148   
1149         /*  
1150                 build DLE.        
1151         */  
1152   
1153         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1154         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1155         len = dma_addr - buf_addr;  
1156         if (len > iadev->rx_buf_sz) {
1157            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1158            atomic_inc(&vcc->stats->rx_err);
1159            goto out_free_desc;
1160         }
1161                   
1162         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1163            if (vcc->vci < 32)
1164               printk("Drop control packets\n");
1165               goto out_free_desc;
1166         }
1167         skb_put(skb,len);  
1168         // pwang_test
1169         ATM_SKB(skb)->vcc = vcc;
1170         ATM_DESC(skb) = desc;        
1171         skb_queue_tail(&iadev->rx_dma_q, skb);  
1172
1173         /* Build the DLE structure */  
1174         wr_ptr = iadev->rx_dle_q.write;  
1175         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1176                 len, PCI_DMA_FROMDEVICE);
1177         wr_ptr->local_pkt_addr = buf_addr;  
1178         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1179         wr_ptr->mode = DMA_INT_ENABLE;  
1180   
1181         /* shud take care of wrap around here too. */  
1182         if(++wr_ptr == iadev->rx_dle_q.end)
1183              wr_ptr = iadev->rx_dle_q.start;
1184         iadev->rx_dle_q.write = wr_ptr;  
1185         udelay(1);  
1186         /* Increment transaction counter */  
1187         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1188 out:    return 0;  
1189 out_free_desc:
1190         free_desc(dev, desc);
1191         goto out;
1192 }  
1193   
1194 static void rx_intr(struct atm_dev *dev)  
1195 {  
1196   IADEV *iadev;  
1197   u_short status;  
1198   u_short state, i;  
1199   
1200   iadev = INPH_IA_DEV(dev);  
1201   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1202   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1203   if (status & RX_PKT_RCVD)  
1204   {  
1205         /* do something */  
1206         /* Basically recvd an interrupt for receving a packet.  
1207         A descriptor would have been written to the packet complete   
1208         queue. Get all the descriptors and set up dma to move the   
1209         packets till the packet complete queue is empty..  
1210         */  
1211         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1212         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1213         while(!(state & PCQ_EMPTY))  
1214         {  
1215              rx_pkt(dev);  
1216              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1217         }  
1218         iadev->rxing = 1;
1219   }  
1220   if (status & RX_FREEQ_EMPT)  
1221   {   
1222      if (iadev->rxing) {
1223         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1224         iadev->rx_tmp_jif = jiffies; 
1225         iadev->rxing = 0;
1226      } 
1227      else if (((jiffies - iadev->rx_tmp_jif) > 50) && 
1228                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1229         for (i = 1; i <= iadev->num_rx_desc; i++)
1230                free_desc(dev, i);
1231 printk("Test logic RUN!!!!\n");
1232         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1233         iadev->rxing = 1;
1234      }
1235      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1236   }  
1237
1238   if (status & RX_EXCP_RCVD)  
1239   {  
1240         /* probably need to handle the exception queue also. */  
1241         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1242         rx_excp_rcvd(dev);  
1243   }  
1244
1245
1246   if (status & RX_RAW_RCVD)  
1247   {  
1248         /* need to handle the raw incoming cells. This deepnds on   
1249         whether we have programmed to receive the raw cells or not.  
1250         Else ignore. */  
1251         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1252   }  
1253 }  
1254   
1255   
1256 static void rx_dle_intr(struct atm_dev *dev)  
1257 {  
1258   IADEV *iadev;  
1259   struct atm_vcc *vcc;   
1260   struct sk_buff *skb;  
1261   int desc;  
1262   u_short state;   
1263   struct dle *dle, *cur_dle;  
1264   u_int dle_lp;  
1265   int len;
1266   iadev = INPH_IA_DEV(dev);  
1267  
1268   /* free all the dles done, that is just update our own dle read pointer   
1269         - do we really need to do this. Think not. */  
1270   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1271         and push them up to the higher layer protocol. Also free the desc  
1272         associated with the buffer. */  
1273   dle = iadev->rx_dle_q.read;  
1274   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1275   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1276   while(dle != cur_dle)  
1277   {  
1278       /* free the DMAed skb */  
1279       skb = skb_dequeue(&iadev->rx_dma_q);  
1280       if (!skb)  
1281          goto INCR_DLE;
1282       desc = ATM_DESC(skb);
1283       free_desc(dev, desc);  
1284                
1285       if (!(len = skb->len))
1286       {  
1287           printk("rx_dle_intr: skb len 0\n");  
1288           dev_kfree_skb_any(skb);  
1289       }  
1290       else  
1291       {  
1292           struct cpcs_trailer *trailer;
1293           u_short length;
1294           struct ia_vcc *ia_vcc;
1295
1296           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1297                 len, PCI_DMA_FROMDEVICE);
1298           /* no VCC related housekeeping done as yet. lets see */  
1299           vcc = ATM_SKB(skb)->vcc;
1300           if (!vcc) {
1301               printk("IA: null vcc\n");  
1302               dev_kfree_skb_any(skb);
1303               goto INCR_DLE;
1304           }
1305           ia_vcc = INPH_IA_VCC(vcc);
1306           if (ia_vcc == NULL)
1307           {
1308              atomic_inc(&vcc->stats->rx_err);
1309              dev_kfree_skb_any(skb);
1310              atm_return(vcc, atm_guess_pdu2truesize(len));
1311              goto INCR_DLE;
1312            }
1313           // get real pkt length  pwang_test
1314           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1315                                  skb->len - sizeof(*trailer));
1316           length =  swap(trailer->length);
1317           if ((length > iadev->rx_buf_sz) || (length > 
1318                               (skb->len - sizeof(struct cpcs_trailer))))
1319           {
1320              atomic_inc(&vcc->stats->rx_err);
1321              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1322                                                             length, skb->len);)
1323              dev_kfree_skb_any(skb);
1324              atm_return(vcc, atm_guess_pdu2truesize(len));
1325              goto INCR_DLE;
1326           }
1327           skb_trim(skb, length);
1328           
1329           /* Display the packet */  
1330           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1331           xdump(skb->data, skb->len, "RX: ");
1332           printk("\n");)
1333
1334           IF_RX(printk("rx_dle_intr: skb push");)  
1335           vcc->push(vcc,skb);  
1336           atomic_inc(&vcc->stats->rx);
1337           iadev->rx_pkt_cnt++;
1338       }  
1339 INCR_DLE:
1340       if (++dle == iadev->rx_dle_q.end)  
1341           dle = iadev->rx_dle_q.start;  
1342   }  
1343   iadev->rx_dle_q.read = dle;  
1344   
1345   /* if the interrupts are masked because there were no free desc available,  
1346                 unmask them now. */ 
1347   if (!iadev->rxing) {
1348      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1349      if (!(state & FREEQ_EMPTY)) {
1350         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1351         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1352                                       iadev->reass_reg+REASS_MASK_REG);
1353         iadev->rxing++; 
1354      }
1355   }
1356 }  
1357   
1358   
1359 static int open_rx(struct atm_vcc *vcc)  
1360 {  
1361         IADEV *iadev;  
1362         u_short *vc_table;  
1363         u_short *reass_ptr;  
1364         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1365
1366         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1367         iadev = INPH_IA_DEV(vcc->dev);  
1368         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1369            if (iadev->phy_type & FE_25MBIT_PHY) {
1370                printk("IA:  ABR not support\n");
1371                return -EINVAL; 
1372            }
1373         }
1374         /* Make only this VCI in the vc table valid and let all   
1375                 others be invalid entries */  
1376         vc_table = (u_short *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1377         vc_table += vcc->vci;  
1378         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1379
1380         *vc_table = vcc->vci << 6;
1381         /* Also keep a list of open rx vcs so that we can attach them with  
1382                 incoming PDUs later. */  
1383         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1384                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1385         {  
1386                 srv_cls_param_t srv_p;
1387                 init_abr_vc(iadev, &srv_p);
1388                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1389         } 
1390         else {  /* for UBR  later may need to add CBR logic */
1391                 reass_ptr = (u_short *)
1392                            (iadev->reass_ram+REASS_TABLE*iadev->memSize);
1393                 reass_ptr += vcc->vci;  
1394                 *reass_ptr = NO_AAL5_PKT;
1395         }
1396         
1397         if (iadev->rx_open[vcc->vci])  
1398                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1399                         vcc->dev->number, vcc->vci);  
1400         iadev->rx_open[vcc->vci] = vcc;  
1401         return 0;  
1402 }  
1403   
1404 static int rx_init(struct atm_dev *dev)  
1405 {  
1406         IADEV *iadev;  
1407         struct rx_buf_desc *buf_desc_ptr;  
1408         unsigned long rx_pkt_start = 0;  
1409         void *dle_addr;  
1410         struct abr_vc_table  *abr_vc_table; 
1411         u16 *vc_table;  
1412         u16 *reass_table;  
1413         u16 *ptr16;
1414         int i,j, vcsize_sel;  
1415         u_short freeq_st_adr;  
1416         u_short *freeq_start;  
1417   
1418         iadev = INPH_IA_DEV(dev);  
1419   //    spin_lock_init(&iadev->rx_lock); 
1420   
1421         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1422         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1423                                         &iadev->rx_dle_dma);  
1424         if (!dle_addr)  {  
1425                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1426                 goto err_out;
1427         }
1428         iadev->rx_dle_q.start = (struct dle*)dle_addr;  
1429         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1430         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1431         iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1432         /* the end of the dle q points to the entry after the last  
1433         DLE that can be used. */  
1434   
1435         /* write the upper 20 bits of the start address to rx list address register */  
1436         writel(iadev->rx_dle_dma & 0xfffff000,
1437                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1438         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1439                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1440                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1441         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1442                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1443                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1444   
1445         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1446         writew(0, iadev->reass_reg+MODE_REG);  
1447         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1448   
1449         /* Receive side control memory map  
1450            -------------------------------  
1451   
1452                 Buffer descr    0x0000 (736 - 23K)  
1453                 VP Table        0x5c00 (256 - 512)  
1454                 Except q        0x5e00 (128 - 512)  
1455                 Free buffer q   0x6000 (1K - 2K)  
1456                 Packet comp q   0x6800 (1K - 2K)  
1457                 Reass Table     0x7000 (1K - 2K)  
1458                 VC Table        0x7800 (1K - 2K)  
1459                 ABR VC Table    0x8000 (1K - 32K)  
1460         */  
1461           
1462         /* Base address for Buffer Descriptor Table */  
1463         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1464         /* Set the buffer size register */  
1465         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1466   
1467         /* Initialize each entry in the Buffer Descriptor Table */  
1468         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1469         buf_desc_ptr =(struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1470         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1471         buf_desc_ptr++;  
1472         rx_pkt_start = iadev->rx_pkt_ram;  
1473         for(i=1; i<=iadev->num_rx_desc; i++)  
1474         {  
1475                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1476                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1477                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1478                 buf_desc_ptr++;           
1479                 rx_pkt_start += iadev->rx_buf_sz;  
1480         }  
1481         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1482         i = FREE_BUF_DESC_Q*iadev->memSize; 
1483         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1484         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1485         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1486                                          iadev->reass_reg+FREEQ_ED_ADR);
1487         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1488         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1489                                         iadev->reass_reg+FREEQ_WR_PTR);    
1490         /* Fill the FREEQ with all the free descriptors. */  
1491         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1492         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1493         for(i=1; i<=iadev->num_rx_desc; i++)  
1494         {  
1495                 *freeq_start = (u_short)i;  
1496                 freeq_start++;  
1497         }  
1498         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1499         /* Packet Complete Queue */
1500         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1501         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1502         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1503         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1504         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1505
1506         /* Exception Queue */
1507         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1508         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1509         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1510                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1511         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1512         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1513  
1514         /* Load local copy of FREEQ and PCQ ptrs */
1515         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1516         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1517         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1518         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1519         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1520         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1521         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1522         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1523         
1524         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1525               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1526               iadev->rfL.pcq_wr);)                
1527         /* just for check - no VP TBL */  
1528         /* VP Table */  
1529         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1530         /* initialize VP Table for invalid VPIs  
1531                 - I guess we can write all 1s or 0x000f in the entire memory  
1532                   space or something similar.  
1533         */  
1534   
1535         /* This seems to work and looks right to me too !!! */  
1536         i =  REASS_TABLE * iadev->memSize;
1537         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1538         /* initialize Reassembly table to I don't know what ???? */  
1539         reass_table = (u16 *)(iadev->reass_ram+i);  
1540         j = REASS_TABLE_SZ * iadev->memSize;
1541         for(i=0; i < j; i++)  
1542                 *reass_table++ = NO_AAL5_PKT;  
1543        i = 8*1024;
1544        vcsize_sel =  0;
1545        while (i != iadev->num_vc) {
1546           i /= 2;
1547           vcsize_sel++;
1548        }
1549        i = RX_VC_TABLE * iadev->memSize;
1550        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1551        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1552         j = RX_VC_TABLE_SZ * iadev->memSize;
1553         for(i = 0; i < j; i++)  
1554         {  
1555                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1556                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1557                 is those low 3 bits.   
1558                 Shall program this later.  
1559                 */  
1560                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1561                 vc_table++;  
1562         }  
1563         /* ABR VC table */
1564         i =  ABR_VC_TABLE * iadev->memSize;
1565         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1566                    
1567         i = ABR_VC_TABLE * iadev->memSize;
1568         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1569         j = REASS_TABLE_SZ * iadev->memSize;
1570         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1571         for(i = 0; i < j; i++) {                
1572                 abr_vc_table->rdf = 0x0003;
1573                 abr_vc_table->air = 0x5eb1;
1574                 abr_vc_table++;         
1575         }  
1576
1577         /* Initialize other registers */  
1578   
1579         /* VP Filter Register set for VC Reassembly only */  
1580         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1581         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1582         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1583
1584         /* Packet Timeout Count  related Registers : 
1585            Set packet timeout to occur in about 3 seconds
1586            Set Packet Aging Interval count register to overflow in about 4 us
1587         */  
1588         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1589         ptr16 = (u16*)j;
1590         i = ((u32)ptr16 >> 6) & 0xff;
1591         ptr16  += j - 1;
1592         i |=(((u32)ptr16 << 2) & 0xff00);
1593         writew(i, iadev->reass_reg+TMOUT_RANGE);
1594         /* initiate the desc_tble */
1595         for(i=0; i<iadev->num_tx_desc;i++)
1596             iadev->desc_tbl[i].timestamp = 0;
1597
1598         /* to clear the interrupt status register - read it */  
1599         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1600   
1601         /* Mask Register - clear it */  
1602         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1603   
1604         skb_queue_head_init(&iadev->rx_dma_q);  
1605         iadev->rx_free_desc_qhead = NULL;   
1606         iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1607         if (!iadev->rx_open)  
1608         {  
1609                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1610                 dev->number);  
1611                 goto err_free_dle;
1612         }  
1613         memset(iadev->rx_open, 0, 4*iadev->num_vc);  
1614         iadev->rxing = 1;
1615         iadev->rx_pkt_cnt = 0;
1616         /* Mode Register */  
1617         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1618         return 0;  
1619
1620 err_free_dle:
1621         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1622                             iadev->rx_dle_dma);  
1623 err_out:
1624         return -ENOMEM;
1625 }  
1626   
1627
1628 /*  
1629         The memory map suggested in appendix A and the coding for it.   
1630         Keeping it around just in case we change our mind later.  
1631   
1632                 Buffer descr    0x0000 (128 - 4K)  
1633                 UBR sched       0x1000 (1K - 4K)  
1634                 UBR Wait q      0x2000 (1K - 4K)  
1635                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1636                                         (128 - 256) each  
1637                 extended VC     0x4000 (1K - 8K)  
1638                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1639                 CBR sched       0x7000 (as needed)  
1640                 VC table        0x8000 (1K - 32K)  
1641 */  
1642   
1643 static void tx_intr(struct atm_dev *dev)  
1644 {  
1645         IADEV *iadev;  
1646         unsigned short status;  
1647         unsigned long flags;
1648
1649         iadev = INPH_IA_DEV(dev);  
1650   
1651         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1652         if (status & TRANSMIT_DONE){
1653
1654            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1655            spin_lock_irqsave(&iadev->tx_lock, flags);
1656            ia_tx_poll(iadev);
1657            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1658            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1659            if (iadev->close_pending)  
1660                wake_up(&iadev->close_wait);
1661         }         
1662         if (status & TCQ_NOT_EMPTY)  
1663         {  
1664             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1665         }  
1666 }  
1667   
1668 static void tx_dle_intr(struct atm_dev *dev)
1669 {
1670         IADEV *iadev;
1671         struct dle *dle, *cur_dle; 
1672         struct sk_buff *skb;
1673         struct atm_vcc *vcc;
1674         struct ia_vcc  *iavcc;
1675         u_int dle_lp;
1676         unsigned long flags;
1677
1678         iadev = INPH_IA_DEV(dev);
1679         spin_lock_irqsave(&iadev->tx_lock, flags);   
1680         dle = iadev->tx_dle_q.read;
1681         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1682                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1683         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1684         while (dle != cur_dle)
1685         {
1686             /* free the DMAed skb */ 
1687             skb = skb_dequeue(&iadev->tx_dma_q); 
1688             if (!skb) break;
1689
1690             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1691             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1692                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1693                                  PCI_DMA_TODEVICE);
1694             }
1695             vcc = ATM_SKB(skb)->vcc;
1696             if (!vcc) {
1697                   printk("tx_dle_intr: vcc is null\n");
1698                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1699                   dev_kfree_skb_any(skb);
1700
1701                   return;
1702             }
1703             iavcc = INPH_IA_VCC(vcc);
1704             if (!iavcc) {
1705                   printk("tx_dle_intr: iavcc is null\n");
1706                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1707                   dev_kfree_skb_any(skb);
1708                   return;
1709             }
1710             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1711                if ((vcc->pop) && (skb->len != 0))
1712                {     
1713                  vcc->pop(vcc, skb);
1714                } 
1715                else {
1716                  dev_kfree_skb_any(skb);
1717                }
1718             }
1719             else { /* Hold the rate-limited skb for flow control */
1720                IA_SKB_STATE(skb) |= IA_DLED;
1721                skb_queue_tail(&iavcc->txing_skb, skb);
1722             }
1723             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1724             if (++dle == iadev->tx_dle_q.end)
1725                  dle = iadev->tx_dle_q.start;
1726         }
1727         iadev->tx_dle_q.read = dle;
1728         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1729 }
1730   
1731 static int open_tx(struct atm_vcc *vcc)  
1732 {  
1733         struct ia_vcc *ia_vcc;  
1734         IADEV *iadev;  
1735         struct main_vc *vc;  
1736         struct ext_vc *evc;  
1737         int ret;
1738         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1739         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1740         iadev = INPH_IA_DEV(vcc->dev);  
1741         
1742         if (iadev->phy_type & FE_25MBIT_PHY) {
1743            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1744                printk("IA:  ABR not support\n");
1745                return -EINVAL; 
1746            }
1747           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1748                printk("IA:  CBR not support\n");
1749                return -EINVAL; 
1750           }
1751         }
1752         ia_vcc =  INPH_IA_VCC(vcc);
1753         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1754         if (vcc->qos.txtp.max_sdu > 
1755                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1756            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1757                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1758            INPH_IA_VCC(vcc) = NULL;  
1759            kfree(ia_vcc);
1760            return -EINVAL; 
1761         }
1762         ia_vcc->vc_desc_cnt = 0;
1763         ia_vcc->txing = 1;
1764
1765         /* find pcr */
1766         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1767            vcc->qos.txtp.pcr = iadev->LineRate;
1768         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1769            vcc->qos.txtp.pcr = iadev->LineRate;
1770         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1771            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1772         if (vcc->qos.txtp.pcr > iadev->LineRate)
1773              vcc->qos.txtp.pcr = iadev->LineRate;
1774         ia_vcc->pcr = vcc->qos.txtp.pcr;
1775
1776         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1777         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1778         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1779         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1780         if (ia_vcc->pcr < iadev->rate_limit)
1781            skb_queue_head_init (&ia_vcc->txing_skb);
1782         if (ia_vcc->pcr < iadev->rate_limit) {
1783            if (vcc->qos.txtp.max_sdu != 0) {
1784                if (ia_vcc->pcr > 60000)
1785                   vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1786                else if (ia_vcc->pcr > 2000)
1787                   vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1788                else
1789                  vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1790            }
1791            else
1792              vcc->sk->sk_sndbuf = 24576;
1793         }
1794            
1795         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1796         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1797         vc += vcc->vci;  
1798         evc += vcc->vci;  
1799         memset((caddr_t)vc, 0, sizeof(*vc));  
1800         memset((caddr_t)evc, 0, sizeof(*evc));  
1801           
1802         /* store the most significant 4 bits of vci as the last 4 bits   
1803                 of first part of atm header.  
1804            store the last 12 bits of vci as first 12 bits of the second  
1805                 part of the atm header.  
1806         */  
1807         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1808         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1809  
1810         /* check the following for different traffic classes */  
1811         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1812         {  
1813                 vc->type = UBR;  
1814                 vc->status = CRC_APPEND;
1815                 vc->acr = cellrate_to_float(iadev->LineRate);  
1816                 if (vcc->qos.txtp.pcr > 0) 
1817                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1818                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1819                                              vcc->qos.txtp.max_pcr,vc->acr);)
1820         }  
1821         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1822         {       srv_cls_param_t srv_p;
1823                 IF_ABR(printk("Tx ABR VCC\n");)  
1824                 init_abr_vc(iadev, &srv_p);
1825                 if (vcc->qos.txtp.pcr > 0) 
1826                    srv_p.pcr = vcc->qos.txtp.pcr;
1827                 if (vcc->qos.txtp.min_pcr > 0) {
1828                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1829                    if (tmpsum > iadev->LineRate)
1830                        return -EBUSY;
1831                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1832                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1833                 } 
1834                 else srv_p.mcr = 0;
1835                 if (vcc->qos.txtp.icr)
1836                    srv_p.icr = vcc->qos.txtp.icr;
1837                 if (vcc->qos.txtp.tbe)
1838                    srv_p.tbe = vcc->qos.txtp.tbe;
1839                 if (vcc->qos.txtp.frtt)
1840                    srv_p.frtt = vcc->qos.txtp.frtt;
1841                 if (vcc->qos.txtp.rif)
1842                    srv_p.rif = vcc->qos.txtp.rif;
1843                 if (vcc->qos.txtp.rdf)
1844                    srv_p.rdf = vcc->qos.txtp.rdf;
1845                 if (vcc->qos.txtp.nrm_pres)
1846                    srv_p.nrm = vcc->qos.txtp.nrm;
1847                 if (vcc->qos.txtp.trm_pres)
1848                    srv_p.trm = vcc->qos.txtp.trm;
1849                 if (vcc->qos.txtp.adtf_pres)
1850                    srv_p.adtf = vcc->qos.txtp.adtf;
1851                 if (vcc->qos.txtp.cdf_pres)
1852                    srv_p.cdf = vcc->qos.txtp.cdf;    
1853                 if (srv_p.icr > srv_p.pcr)
1854                    srv_p.icr = srv_p.pcr;    
1855                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1856                                                       srv_p.pcr, srv_p.mcr);)
1857                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1858         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1859                 if (iadev->phy_type & FE_25MBIT_PHY) {
1860                     printk("IA:  CBR not support\n");
1861                     return -EINVAL; 
1862                 }
1863                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1864                    IF_CBR(printk("PCR is not availble\n");)
1865                    return -1;
1866                 }
1867                 vc->type = CBR;
1868                 vc->status = CRC_APPEND;
1869                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1870                     return ret;
1871                 }
1872        } 
1873         else  
1874            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1875         
1876         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1877         IF_EVENT(printk("ia open_tx returning \n");)  
1878         return 0;  
1879 }  
1880   
1881   
1882 static int tx_init(struct atm_dev *dev)  
1883 {  
1884         IADEV *iadev;  
1885         struct tx_buf_desc *buf_desc_ptr;
1886         unsigned int tx_pkt_start;  
1887         void *dle_addr;  
1888         int i;  
1889         u_short tcq_st_adr;  
1890         u_short *tcq_start;  
1891         u_short prq_st_adr;  
1892         u_short *prq_start;  
1893         struct main_vc *vc;  
1894         struct ext_vc *evc;   
1895         u_short tmp16;
1896         u32 vcsize_sel;
1897  
1898         iadev = INPH_IA_DEV(dev);  
1899         spin_lock_init(&iadev->tx_lock);
1900  
1901         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1902                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1903
1904         /* Allocate 4k (boundary aligned) bytes */
1905         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1906                                         &iadev->tx_dle_dma);  
1907         if (!dle_addr)  {
1908                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1909                 goto err_out;
1910         }
1911         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1912         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1913         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1914         iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1915
1916         /* write the upper 20 bits of the start address to tx list address register */  
1917         writel(iadev->tx_dle_dma & 0xfffff000,
1918                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1919         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1920         writew(0, iadev->seg_reg+MODE_REG_0);  
1921         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1922         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1923         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1924         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1925   
1926         /*  
1927            Transmit side control memory map  
1928            --------------------------------    
1929          Buffer descr   0x0000 (128 - 4K)  
1930          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1931                                         (512 - 1K) each  
1932                                         TCQ - 4K, PRQ - 5K  
1933          CBR Table      0x1800 (as needed) - 6K  
1934          UBR Table      0x3000 (1K - 4K) - 12K  
1935          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1936          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1937                                 ABR Tbl - 20K, ABR Wq - 22K   
1938          extended VC    0x6000 (1K - 8K) - 24K  
1939          VC Table       0x8000 (1K - 32K) - 32K  
1940           
1941         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1942         and Wait q, which can be allotted later.  
1943         */  
1944      
1945         /* Buffer Descriptor Table Base address */  
1946         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1947   
1948         /* initialize each entry in the buffer descriptor table */  
1949         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1950         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1951         buf_desc_ptr++;  
1952         tx_pkt_start = TX_PACKET_RAM;  
1953         for(i=1; i<=iadev->num_tx_desc; i++)  
1954         {  
1955                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1956                 buf_desc_ptr->desc_mode = AAL5;  
1957                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1958                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1959                 buf_desc_ptr++;           
1960                 tx_pkt_start += iadev->tx_buf_sz;  
1961         }  
1962         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1963         if (!iadev->tx_buf) {
1964             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1965             goto err_free_dle;
1966         }
1967         for (i= 0; i< iadev->num_tx_desc; i++)
1968         {
1969             struct cpcs_trailer *cpcs;
1970  
1971             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1972             if(!cpcs) {                
1973                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1974                 goto err_free_tx_bufs;
1975             }
1976             iadev->tx_buf[i].cpcs = cpcs;
1977             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1978                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1979         }
1980         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1981                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1982         if (!iadev->desc_tbl) {
1983                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984                 goto err_free_all_tx_bufs;
1985         }
1986   
1987         /* Communication Queues base address */  
1988         i = TX_COMP_Q * iadev->memSize;
1989         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1990   
1991         /* Transmit Complete Queue */  
1992         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1993         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1994         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1995         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1996         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1997                                               iadev->seg_reg+TCQ_ED_ADR); 
1998         /* Fill the TCQ with all the free descriptors. */  
1999         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2000         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2001         for(i=1; i<=iadev->num_tx_desc; i++)  
2002         {  
2003                 *tcq_start = (u_short)i;  
2004                 tcq_start++;  
2005         }  
2006   
2007         /* Packet Ready Queue */  
2008         i = PKT_RDY_Q * iadev->memSize; 
2009         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2010         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2011                                               iadev->seg_reg+PRQ_ED_ADR);
2012         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2013         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2014          
2015         /* Load local copy of PRQ and TCQ ptrs */
2016         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2017         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2018         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2019
2020         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2021         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2022         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2023
2024         /* Just for safety initializing the queue to have desc 1 always */  
2025         /* Fill the PRQ with all the free descriptors. */  
2026         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2027         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2028         for(i=1; i<=iadev->num_tx_desc; i++)  
2029         {  
2030                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2031                 prq_start++;  
2032         }  
2033         /* CBR Table */  
2034         IF_INIT(printk("Start CBR Init\n");)
2035 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2036         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2037 #else /* Charlie's logic is wrong ? */
2038         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2039         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2040         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2041 #endif
2042
2043         IF_INIT(printk("value in register = 0x%x\n",
2044                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2045         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2046         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2047         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2048                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2049         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2050         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2051         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2052         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2053                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2054         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2055           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2056           readw(iadev->seg_reg+CBR_TAB_END+1));)
2057         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
2058
2059         /* Initialize the CBR Schedualing Table */
2060         memset((caddr_t)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize), 
2061                                                           0, iadev->num_vc*6); 
2062         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2063         iadev->CbrEntryPt = 0;
2064         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2065         iadev->NumEnabledCBR = 0;
2066
2067         /* UBR scheduling Table and wait queue */  
2068         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2069                 - SCHEDSZ is 1K (# of entries).  
2070                 - UBR Table size is 4K  
2071                 - UBR wait queue is 4K  
2072            since the table and wait queues are contiguous, all the bytes   
2073            can be initialized by one memeset.  
2074         */  
2075         
2076         vcsize_sel = 0;
2077         i = 8*1024;
2078         while (i != iadev->num_vc) {
2079           i /= 2;
2080           vcsize_sel++;
2081         }
2082  
2083         i = MAIN_VC_TABLE * iadev->memSize;
2084         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2085         i =  EXT_VC_TABLE * iadev->memSize;
2086         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2087         i = UBR_SCHED_TABLE * iadev->memSize;
2088         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2089         i = UBR_WAIT_Q * iadev->memSize; 
2090         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2091         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2092                                                        0, iadev->num_vc*8);
2093         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2094         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2095                 - SCHEDSZ is 1K (# of entries).  
2096                 - ABR Table size is 2K  
2097                 - ABR wait queue is 2K  
2098            since the table and wait queues are contiguous, all the bytes   
2099            can be intialized by one memeset.  
2100         */  
2101         i = ABR_SCHED_TABLE * iadev->memSize;
2102         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2103         i = ABR_WAIT_Q * iadev->memSize;
2104         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2105  
2106         i = ABR_SCHED_TABLE*iadev->memSize;
2107         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2108         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2109         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2110         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2111         if (!iadev->testTable) {
2112            printk("Get freepage  failed\n");
2113            goto err_free_desc_tbl;
2114         }
2115         for(i=0; i<iadev->num_vc; i++)  
2116         {  
2117                 memset((caddr_t)vc, 0, sizeof(*vc));  
2118                 memset((caddr_t)evc, 0, sizeof(*evc));  
2119                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2120                                                 GFP_KERNEL);
2121                 if (!iadev->testTable[i])
2122                         goto err_free_test_tables;
2123                 iadev->testTable[i]->lastTime = 0;
2124                 iadev->testTable[i]->fract = 0;
2125                 iadev->testTable[i]->vc_status = VC_UBR;
2126                 vc++;  
2127                 evc++;  
2128         }  
2129   
2130         /* Other Initialization */  
2131           
2132         /* Max Rate Register */  
2133         if (iadev->phy_type & FE_25MBIT_PHY) {
2134            writew(RATE25, iadev->seg_reg+MAXRATE);  
2135            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2136         }
2137         else {
2138            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2139            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2140         }
2141         /* Set Idle Header Reigisters to be sure */  
2142         writew(0, iadev->seg_reg+IDLEHEADHI);  
2143         writew(0, iadev->seg_reg+IDLEHEADLO);  
2144   
2145         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2146         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2147
2148         iadev->close_pending = 0;
2149         init_waitqueue_head(&iadev->close_wait);
2150         init_waitqueue_head(&iadev->timeout_wait);
2151         skb_queue_head_init(&iadev->tx_dma_q);  
2152         ia_init_rtn_q(&iadev->tx_return_q);  
2153
2154         /* RM Cell Protocol ID and Message Type */  
2155         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2156         skb_queue_head_init (&iadev->tx_backlog);
2157   
2158         /* Mode Register 1 */  
2159         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2160   
2161         /* Mode Register 0 */  
2162         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2163   
2164         /* Interrupt Status Register - read to clear */  
2165         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2166   
2167         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2168         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2169         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2170         iadev->tx_pkt_cnt = 0;
2171         iadev->rate_limit = iadev->LineRate / 3;
2172   
2173         return 0;
2174
2175 err_free_test_tables:
2176         while (--i >= 0)
2177                 kfree(iadev->testTable[i]);
2178         kfree(iadev->testTable);
2179 err_free_desc_tbl:
2180         kfree(iadev->desc_tbl);
2181 err_free_all_tx_bufs:
2182         i = iadev->num_tx_desc;
2183 err_free_tx_bufs:
2184         while (--i >= 0) {
2185                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2186
2187                 pci_unmap_single(iadev->pci, desc->dma_addr,
2188                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2189                 kfree(desc->cpcs);
2190         }
2191         kfree(iadev->tx_buf);
2192 err_free_dle:
2193         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2194                             iadev->tx_dle_dma);  
2195 err_out:
2196         return -ENOMEM;
2197 }   
2198    
2199 static irqreturn_t ia_int(int irq, void *dev_id, struct pt_regs *regs)  
2200 {  
2201    struct atm_dev *dev;  
2202    IADEV *iadev;  
2203    unsigned int status;  
2204    int handled = 0;
2205
2206    dev = dev_id;  
2207    iadev = INPH_IA_DEV(dev);  
2208    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2209    { 
2210         handled = 1;
2211         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2212         if (status & STAT_REASSINT)  
2213         {  
2214            /* do something */  
2215            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2216            rx_intr(dev);  
2217         }  
2218         if (status & STAT_DLERINT)  
2219         {  
2220            /* Clear this bit by writing a 1 to it. */  
2221            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2222            rx_dle_intr(dev);  
2223         }  
2224         if (status & STAT_SEGINT)  
2225         {  
2226            /* do something */ 
2227            IF_EVENT(printk("IA: tx_intr \n");) 
2228            tx_intr(dev);  
2229         }  
2230         if (status & STAT_DLETINT)  
2231         {  
2232            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2233            tx_dle_intr(dev);  
2234         }  
2235         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2236         {  
2237            if (status & STAT_FEINT) 
2238                IaFrontEndIntr(iadev);
2239         }  
2240    }
2241    return IRQ_RETVAL(handled);
2242 }  
2243           
2244           
2245           
2246 /*----------------------------- entries --------------------------------*/  
2247 static int get_esi(struct atm_dev *dev)  
2248 {  
2249         IADEV *iadev;  
2250         int i;  
2251         u32 mac1;  
2252         u16 mac2;  
2253           
2254         iadev = INPH_IA_DEV(dev);  
2255         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2256                                 iadev->reg+IPHASE5575_MAC1)));  
2257         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2258         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2259         for (i=0; i<MAC1_LEN; i++)  
2260                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2261           
2262         for (i=0; i<MAC2_LEN; i++)  
2263                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2264         return 0;  
2265 }  
2266           
2267 static int reset_sar(struct atm_dev *dev)  
2268 {  
2269         IADEV *iadev;  
2270         int i, error = 1;  
2271         unsigned int pci[64];  
2272           
2273         iadev = INPH_IA_DEV(dev);  
2274         for(i=0; i<64; i++)  
2275           if ((error = pci_read_config_dword(iadev->pci,  
2276                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2277               return error;  
2278         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2279         for(i=0; i<64; i++)  
2280           if ((error = pci_write_config_dword(iadev->pci,  
2281                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2282             return error;  
2283         udelay(5);  
2284         return 0;  
2285 }  
2286           
2287           
2288 static int __init ia_init(struct atm_dev *dev)
2289 {  
2290         IADEV *iadev;  
2291         unsigned long real_base, base;  
2292         unsigned short command;  
2293         unsigned char revision;  
2294         int error, i; 
2295           
2296         /* The device has been identified and registered. Now we read   
2297            necessary configuration info like memory base address,   
2298            interrupt number etc */  
2299           
2300         IF_INIT(printk(">ia_init\n");)  
2301         dev->ci_range.vpi_bits = 0;  
2302         dev->ci_range.vci_bits = NR_VCI_LD;  
2303
2304         iadev = INPH_IA_DEV(dev);  
2305         real_base = pci_resource_start (iadev->pci, 0);
2306         iadev->irq = iadev->pci->irq;
2307                   
2308         if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))   
2309                     || (error = pci_read_config_byte(iadev->pci,   
2310                                 PCI_REVISION_ID,&revision)))   
2311         {  
2312                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2313                                 dev->number,error);  
2314                 return -EINVAL;  
2315         }  
2316         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2317                         dev->number, revision, real_base, iadev->irq);)  
2318           
2319         /* find mapping size of board */  
2320           
2321         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2322
2323         if (iadev->pci_map_size == 0x100000){
2324           iadev->num_vc = 4096;
2325           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2326           iadev->memSize = 4;
2327         }
2328         else if (iadev->pci_map_size == 0x40000) {
2329           iadev->num_vc = 1024;
2330           iadev->memSize = 1;
2331         }
2332         else {
2333            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2334            return -EINVAL;
2335         }
2336         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2337           
2338         /* enable bus mastering */
2339         pci_set_master(iadev->pci);
2340
2341         /*  
2342          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2343          */  
2344         udelay(10);  
2345           
2346         /* mapping the physical address to a virtual address in address space */  
2347         base=(unsigned long)ioremap((unsigned long)real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2348           
2349         if (!base)  
2350         {  
2351                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2352                             dev->number);  
2353                 return error;  
2354         }  
2355         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=0x%lx,irq=%d\n",  
2356                         dev->number, revision, base, iadev->irq);)  
2357           
2358         /* filling the iphase dev structure */  
2359         iadev->mem = iadev->pci_map_size /2;  
2360         iadev->base_diff = real_base - base;  
2361         iadev->real_base = real_base;  
2362         iadev->base = base;  
2363                   
2364         /* Bus Interface Control Registers */  
2365         iadev->reg = (u32 *) (base + REG_BASE);  
2366         /* Segmentation Control Registers */  
2367         iadev->seg_reg = (u32 *) (base + SEG_BASE);  
2368         /* Reassembly Control Registers */  
2369         iadev->reass_reg = (u32 *) (base + REASS_BASE);  
2370         /* Front end/ DMA control registers */  
2371         iadev->phy = (u32 *) (base + PHY_BASE);  
2372         iadev->dma = (u32 *) (base + PHY_BASE);  
2373         /* RAM - Segmentation RAm and Reassembly RAM */  
2374         iadev->ram = (u32 *) (base + ACTUAL_RAM_BASE);  
2375         iadev->seg_ram =  (base + ACTUAL_SEG_RAM_BASE);  
2376         iadev->reass_ram = (base + ACTUAL_REASS_RAM_BASE);  
2377   
2378         /* lets print out the above */  
2379         IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n", 
2380           (u32)iadev->reg,(u32)iadev->seg_reg,(u32)iadev->reass_reg, 
2381           (u32)iadev->phy, (u32)iadev->ram, (u32)iadev->seg_ram, 
2382           (u32)iadev->reass_ram);) 
2383           
2384         /* lets try reading the MAC address */  
2385         error = get_esi(dev);  
2386         if (error) {
2387           iounmap((void *) iadev->base);
2388           return error;  
2389         }
2390         printk("IA: ");
2391         for (i=0; i < ESI_LEN; i++)  
2392                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2393         printk("\n");  
2394   
2395         /* reset SAR */  
2396         if (reset_sar(dev)) {
2397            iounmap((void *) iadev->base);
2398            printk("IA: reset SAR fail, please try again\n");
2399            return 1;
2400         }
2401         return 0;  
2402 }  
2403
2404 static void ia_update_stats(IADEV *iadev) {
2405     if (!iadev->carrier_detect)
2406         return;
2407     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2408     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2409     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2410     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2411     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2412     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2413     return;
2414 }
2415   
2416 static void ia_led_timer(unsigned long arg) {
2417         unsigned long flags;
2418         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2419         u_char i;
2420         static u32 ctrl_reg; 
2421         for (i = 0; i < iadev_count; i++) {
2422            if (ia_dev[i]) {
2423               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2424               if (blinking[i] == 0) {
2425                  blinking[i]++;
2426                  ctrl_reg &= (~CTRL_LED);
2427                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2428                  ia_update_stats(ia_dev[i]);
2429               }
2430               else {
2431                  blinking[i] = 0;
2432                  ctrl_reg |= CTRL_LED;
2433                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2434                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2435                  if (ia_dev[i]->close_pending)  
2436                     wake_up(&ia_dev[i]->close_wait);
2437                  ia_tx_poll(ia_dev[i]);
2438                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2439               }
2440            }
2441         }
2442         mod_timer(&ia_timer, jiffies + HZ / 4);
2443         return;
2444 }
2445
2446 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2447         unsigned long addr)  
2448 {  
2449         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2450 }  
2451   
2452 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2453 {  
2454         return readl(INPH_IA_DEV(dev)->phy+addr);  
2455 }  
2456
2457 static void ia_free_tx(IADEV *iadev)
2458 {
2459         int i;
2460
2461         kfree(iadev->desc_tbl);
2462         for (i = 0; i < iadev->num_vc; i++)
2463                 kfree(iadev->testTable[i]);
2464         kfree(iadev->testTable);
2465         for (i = 0; i < iadev->num_tx_desc; i++) {
2466                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2467
2468                 pci_unmap_single(iadev->pci, desc->dma_addr,
2469                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2470                 kfree(desc->cpcs);
2471         }
2472         kfree(iadev->tx_buf);
2473         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2474                             iadev->tx_dle_dma);  
2475 }
2476
2477 static void ia_free_rx(IADEV *iadev)
2478 {
2479         kfree(iadev->rx_open);
2480         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2481                           iadev->rx_dle_dma);  
2482 }
2483
2484 static int __init ia_start(struct atm_dev *dev)
2485 {  
2486         IADEV *iadev;  
2487         int error;  
2488         unsigned char phy;  
2489         u32 ctrl_reg;  
2490         IF_EVENT(printk(">ia_start\n");)  
2491         iadev = INPH_IA_DEV(dev);  
2492         if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {  
2493                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2494                     dev->number, iadev->irq);  
2495                 error = -EAGAIN;
2496                 goto err_out;
2497         }  
2498         /* @@@ should release IRQ on error */  
2499         /* enabling memory + master */  
2500         if ((error = pci_write_config_word(iadev->pci,   
2501                                 PCI_COMMAND,   
2502                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2503         {  
2504                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2505                     "master (0x%x)\n",dev->number, error);  
2506                 error = -EIO;  
2507                 goto err_free_irq;
2508         }  
2509         udelay(10);  
2510   
2511         /* Maybe we should reset the front end, initialize Bus Interface Control   
2512                 Registers and see. */  
2513   
2514         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2515                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2516         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2517         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2518                         | CTRL_B8  
2519                         | CTRL_B16  
2520                         | CTRL_B32  
2521                         | CTRL_B48  
2522                         | CTRL_B64  
2523                         | CTRL_B128  
2524                         | CTRL_ERRMASK  
2525                         | CTRL_DLETMASK         /* shud be removed l8r */  
2526                         | CTRL_DLERMASK  
2527                         | CTRL_SEGMASK  
2528                         | CTRL_REASSMASK          
2529                         | CTRL_FEMASK  
2530                         | CTRL_CSPREEMPT;  
2531   
2532        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2533   
2534         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2535                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2536            printk("Bus status reg after init: %08x\n", 
2537                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2538     
2539         ia_hw_type(iadev); 
2540         error = tx_init(dev);  
2541         if (error)
2542                 goto err_free_irq;
2543         error = rx_init(dev);  
2544         if (error)
2545                 goto err_free_tx;
2546   
2547         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2548         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2549         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2550                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2551         phy = 0; /* resolve compiler complaint */
2552         IF_INIT ( 
2553         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2554                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2555         else  
2556                 printk("IA: utopia,rev.%0x\n",phy);) 
2557
2558         if (iadev->phy_type &  FE_25MBIT_PHY)
2559            ia_mb25_init(iadev);
2560         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2561            ia_suni_pm7345_init(iadev);
2562         else {
2563                 error = suni_init(dev);
2564                 if (error)
2565                         goto err_free_rx;
2566                 /* 
2567                  * Enable interrupt on loss of signal
2568                  * SUNI_RSOP_CIE - 0x10
2569                  * SUNI_RSOP_CIE_LOSE - 0x04
2570                  */
2571                 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2572 #ifndef MODULE
2573                 error = dev->phy->start(dev);
2574                 if (error)
2575                         goto err_free_rx;
2576 #endif
2577                 /* Get iadev->carrier_detect status */
2578                 IaFrontEndIntr(iadev);
2579         }
2580         return 0;
2581
2582 err_free_rx:
2583         ia_free_rx(iadev);
2584 err_free_tx:
2585         ia_free_tx(iadev);
2586 err_free_irq:
2587         free_irq(iadev->irq, dev);  
2588 err_out:
2589         return error;
2590 }  
2591   
2592 static void ia_close(struct atm_vcc *vcc)  
2593 {  
2594         u16 *vc_table;
2595         IADEV *iadev;
2596         struct ia_vcc *ia_vcc;
2597         struct sk_buff *skb = NULL;
2598         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2599         unsigned long closetime, flags;
2600         int ctimeout;
2601
2602         iadev = INPH_IA_DEV(vcc->dev);
2603         ia_vcc = INPH_IA_VCC(vcc);
2604         if (!ia_vcc) return;  
2605
2606         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2607                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2608         clear_bit(ATM_VF_READY,&vcc->flags);
2609         skb_queue_head_init (&tmp_tx_backlog);
2610         skb_queue_head_init (&tmp_vcc_backlog); 
2611         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2612            iadev->close_pending++;
2613            sleep_on_timeout(&iadev->timeout_wait, 50);
2614            spin_lock_irqsave(&iadev->tx_lock, flags); 
2615            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2616               if (ATM_SKB(skb)->vcc == vcc){ 
2617                  if (vcc->pop) vcc->pop(vcc, skb);
2618                  else dev_kfree_skb_any(skb);
2619               }
2620               else 
2621                  skb_queue_tail(&tmp_tx_backlog, skb);
2622            } 
2623            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2624              skb_queue_tail(&iadev->tx_backlog, skb);
2625            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2626            closetime = jiffies;
2627            ctimeout = 300000 / ia_vcc->pcr;
2628            if (ctimeout == 0)
2629               ctimeout = 1;
2630            while (ia_vcc->vc_desc_cnt > 0){
2631               if ((jiffies - closetime) >= ctimeout) 
2632                  break;
2633               spin_unlock_irqrestore(&iadev->tx_lock, flags);
2634               sleep_on(&iadev->close_wait);
2635               spin_lock_irqsave(&iadev->tx_lock, flags);
2636            }    
2637            iadev->close_pending--;
2638            iadev->testTable[vcc->vci]->lastTime = 0;
2639            iadev->testTable[vcc->vci]->fract = 0; 
2640            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2641            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2642               if (vcc->qos.txtp.min_pcr > 0)
2643                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2644            }
2645            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2646               ia_vcc = INPH_IA_VCC(vcc); 
2647               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2648               ia_cbrVc_close (vcc);
2649            }
2650            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2651         }
2652         
2653         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2654            // reset reass table
2655            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2656            vc_table += vcc->vci; 
2657            *vc_table = NO_AAL5_PKT;
2658            // reset vc table
2659            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2660            vc_table += vcc->vci;
2661            *vc_table = (vcc->vci << 6) | 15;
2662            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2663               struct abr_vc_table *abr_vc_table = (struct abr_vc_table *)
2664                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2665               abr_vc_table +=  vcc->vci;
2666               abr_vc_table->rdf = 0x0003;
2667               abr_vc_table->air = 0x5eb1;
2668            }                                 
2669            // Drain the packets
2670            rx_dle_intr(vcc->dev); 
2671            iadev->rx_open[vcc->vci] = 0;
2672         }
2673         kfree(INPH_IA_VCC(vcc));  
2674         ia_vcc = NULL;
2675         INPH_IA_VCC(vcc) = NULL;  
2676         clear_bit(ATM_VF_ADDR,&vcc->flags);
2677         return;        
2678 }  
2679   
2680 static int ia_open(struct atm_vcc *vcc, short vpi, int vci)  
2681 {  
2682         IADEV *iadev;  
2683         struct ia_vcc *ia_vcc;  
2684         int error;  
2685         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2686         {  
2687                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2688                 INPH_IA_VCC(vcc) = NULL;  
2689         }  
2690         iadev = INPH_IA_DEV(vcc->dev);  
2691         error = atm_find_ci(vcc, &vpi, &vci);  
2692         if (error)   
2693         {  
2694             printk("iadev: atm_find_ci returned error %d\n", error);  
2695             return error;  
2696         }  
2697         vcc->vpi = vpi;  
2698         vcc->vci = vci;  
2699         if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)  
2700         {  
2701                 IF_EVENT(printk("iphase open: unspec part\n");)  
2702                 set_bit(ATM_VF_ADDR,&vcc->flags);
2703         }  
2704         if (vcc->qos.aal != ATM_AAL5)  
2705                 return -EINVAL;  
2706         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2707                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2708   
2709         /* Device dependent initialization */  
2710         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2711         if (!ia_vcc) return -ENOMEM;  
2712         INPH_IA_VCC(vcc) = ia_vcc;  
2713   
2714         if ((error = open_rx(vcc)))  
2715         {  
2716                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2717                 ia_close(vcc);  
2718                 return error;  
2719         }  
2720   
2721         if ((error = open_tx(vcc)))  
2722         {  
2723                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2724                 ia_close(vcc);  
2725                 return error;  
2726         }  
2727   
2728         set_bit(ATM_VF_READY,&vcc->flags);
2729
2730 #if 0
2731         {
2732            static u8 first = 1; 
2733            if (first) {
2734               ia_timer.expires = jiffies + 3*HZ;
2735               add_timer(&ia_timer);
2736               first = 0;
2737            }           
2738         }
2739 #endif
2740         IF_EVENT(printk("ia open returning\n");)  
2741         return 0;  
2742 }  
2743   
2744 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2745 {  
2746         IF_EVENT(printk(">ia_change_qos\n");)  
2747         return 0;  
2748 }  
2749   
2750 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)  
2751 {  
2752    IA_CMDBUF ia_cmds;
2753    IADEV *iadev;
2754    int i, board;
2755    u16 *tmps;
2756    IF_EVENT(printk(">ia_ioctl\n");)  
2757    if (cmd != IA_CMD) {
2758       if (!dev->phy->ioctl) return -EINVAL;
2759       return dev->phy->ioctl(dev,cmd,arg);
2760    }
2761    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2762    board = ia_cmds.status;
2763    if ((board < 0) || (board > iadev_count))
2764          board = 0;    
2765    iadev = ia_dev[board];
2766    switch (ia_cmds.cmd) {
2767    case MEMDUMP:
2768    {
2769         switch (ia_cmds.sub_cmd) {
2770           case MEMDUMP_DEV:     
2771              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2773                 return -EFAULT;
2774              ia_cmds.status = 0;
2775              break;
2776           case MEMDUMP_SEGREG:
2777              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2778              tmps = (u16 *)ia_cmds.buf;
2779              for(i=0; i<0x80; i+=2, tmps++)
2780                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2781              ia_cmds.status = 0;
2782              ia_cmds.len = 0x80;
2783              break;
2784           case MEMDUMP_REASSREG:
2785              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2786              tmps = (u16 *)ia_cmds.buf;
2787              for(i=0; i<0x80; i+=2, tmps++)
2788                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2789              ia_cmds.status = 0;
2790              ia_cmds.len = 0x80;
2791              break;
2792           case MEMDUMP_FFL:
2793           {  
2794              ia_regs_t       *regs_local;
2795              ffredn_t        *ffL;
2796              rfredn_t        *rfL;
2797                      
2798              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2799              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2800              if (!regs_local) return -ENOMEM;
2801              ffL = &regs_local->ffredn;
2802              rfL = &regs_local->rfredn;
2803              /* Copy real rfred registers into the local copy */
2804              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2805                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2806                 /* Copy real ffred registers into the local copy */
2807              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2808                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2809
2810              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2811                 kfree(regs_local);
2812                 return -EFAULT;
2813              }
2814              kfree(regs_local);
2815              printk("Board %d registers dumped\n", board);
2816              ia_cmds.status = 0;                  
2817          }      
2818              break;        
2819          case READ_REG:
2820          {  
2821              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2822              desc_dbg(iadev); 
2823              ia_cmds.status = 0; 
2824          }
2825              break;
2826          case 0x6:
2827          {  
2828              ia_cmds.status = 0; 
2829              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2830              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2831          }
2832              break;
2833          case 0x8:
2834          {
2835              struct k_sonet_stats *stats;
2836              stats = &PRIV(_ia_dev[board])->sonet_stats;
2837              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2838              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2839              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2840              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2841              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2842              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2843              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2844              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2845              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2846          }
2847             ia_cmds.status = 0;
2848             break;
2849          case 0x9:
2850             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2851             for (i = 1; i <= iadev->num_rx_desc; i++)
2852                free_desc(_ia_dev[board], i);
2853             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2854                                             iadev->reass_reg+REASS_MASK_REG);
2855             iadev->rxing = 1;
2856             
2857             ia_cmds.status = 0;
2858             break;
2859
2860          case 0xb:
2861             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2862             IaFrontEndIntr(iadev);
2863             break;
2864          case 0xa:
2865             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2866          {  
2867              ia_cmds.status = 0; 
2868              IADebugFlag = ia_cmds.maddr;
2869              printk("New debug option loaded\n");
2870          }
2871              break;
2872          default:
2873              ia_cmds.status = 0;
2874              break;
2875       } 
2876    }
2877       break;
2878    default:
2879       break;
2880
2881    }    
2882    return 0;  
2883 }  
2884   
2885 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2886         void *optval, int optlen)  
2887 {  
2888         IF_EVENT(printk(">ia_getsockopt\n");)  
2889         return -EINVAL;  
2890 }  
2891   
2892 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2893         void *optval, int optlen)  
2894 {  
2895         IF_EVENT(printk(">ia_setsockopt\n");)  
2896         return -EINVAL;  
2897 }  
2898   
2899 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2900         IADEV *iadev;
2901         struct dle *wr_ptr;
2902         struct tx_buf_desc *buf_desc_ptr;
2903         int desc;
2904         int comp_code;
2905         int total_len;
2906         struct cpcs_trailer *trailer;
2907         struct ia_vcc *iavcc;
2908
2909         iadev = INPH_IA_DEV(vcc->dev);  
2910         iavcc = INPH_IA_VCC(vcc);
2911         if (!iavcc->txing) {
2912            printk("discard packet on closed VC\n");
2913            if (vcc->pop)
2914                 vcc->pop(vcc, skb);
2915            else
2916                 dev_kfree_skb_any(skb);
2917            return 0;
2918         }
2919
2920         if (skb->len > iadev->tx_buf_sz - 8) {
2921            printk("Transmit size over tx buffer size\n");
2922            if (vcc->pop)
2923                  vcc->pop(vcc, skb);
2924            else
2925                  dev_kfree_skb_any(skb);
2926           return 0;
2927         }
2928         if ((u32)skb->data & 3) {
2929            printk("Misaligned SKB\n");
2930            if (vcc->pop)
2931                  vcc->pop(vcc, skb);
2932            else
2933                  dev_kfree_skb_any(skb);
2934            return 0;
2935         }       
2936         /* Get a descriptor number from our free descriptor queue  
2937            We get the descr number from the TCQ now, since I am using  
2938            the TCQ as a free buffer queue. Initially TCQ will be   
2939            initialized with all the descriptors and is hence, full.  
2940         */
2941         desc = get_desc (iadev, iavcc);
2942         if (desc == 0xffff) 
2943             return 1;
2944         comp_code = desc >> 13;  
2945         desc &= 0x1fff;  
2946   
2947         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2948         {  
2949                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2950                 atomic_inc(&vcc->stats->tx);
2951                 if (vcc->pop)   
2952                     vcc->pop(vcc, skb);   
2953                 else  
2954                     dev_kfree_skb_any(skb);
2955                 return 0;   /* return SUCCESS */
2956         }  
2957   
2958         if (comp_code)  
2959         {  
2960             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2961                                                             desc, comp_code);)  
2962         }  
2963        
2964         /* remember the desc and vcc mapping */
2965         iavcc->vc_desc_cnt++;
2966         iadev->desc_tbl[desc-1].iavcc = iavcc;
2967         iadev->desc_tbl[desc-1].txskb = skb;
2968         IA_SKB_STATE(skb) = 0;
2969
2970         iadev->ffL.tcq_rd += 2;
2971         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2972                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2973         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2974   
2975         /* Put the descriptor number in the packet ready queue  
2976                 and put the updated write pointer in the DLE field   
2977         */   
2978         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2979
2980         iadev->ffL.prq_wr += 2;
2981         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2982                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2983           
2984         /* Figure out the exact length of the packet and padding required to 
2985            make it  aligned on a 48 byte boundary.  */
2986         total_len = skb->len + sizeof(struct cpcs_trailer);  
2987         total_len = ((total_len + 47) / 48) * 48;
2988         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2989  
2990         /* Put the packet in a tx buffer */   
2991         trailer = iadev->tx_buf[desc-1].cpcs;
2992         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2993                   (u32)skb, (u32)skb->data, skb->len, desc);)
2994         trailer->control = 0; 
2995         /*big endian*/ 
2996         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2997         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2998
2999         /* Display the packet */  
3000         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
3001                                                         skb->len, tcnter++);  
3002         xdump(skb->data, skb->len, "TX: ");
3003         printk("\n");)
3004
3005         /* Build the buffer descriptor */  
3006         buf_desc_ptr = (struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
3007         buf_desc_ptr += desc;   /* points to the corresponding entry */  
3008         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3009         /* Huh ? p.115 of users guide describes this as a read-only register */
3010         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3011         buf_desc_ptr->vc_index = vcc->vci;
3012         buf_desc_ptr->bytes = total_len;  
3013
3014         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3015            clear_lockup (vcc, iadev);
3016
3017         /* Build the DLE structure */  
3018         wr_ptr = iadev->tx_dle_q.write;  
3019         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3020         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3021                 skb->len, PCI_DMA_TODEVICE);
3022         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3023                                                   buf_desc_ptr->buf_start_lo;  
3024         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
3025         wr_ptr->bytes = skb->len;  
3026
3027         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3028         if ((wr_ptr->bytes >> 2) == 0xb)
3029            wr_ptr->bytes = 0x30;
3030
3031         wr_ptr->mode = TX_DLE_PSI; 
3032         wr_ptr->prq_wr_ptr_data = 0;
3033   
3034         /* end is not to be used for the DLE q */  
3035         if (++wr_ptr == iadev->tx_dle_q.end)  
3036                 wr_ptr = iadev->tx_dle_q.start;  
3037         
3038         /* Build trailer dle */
3039         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3040         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3041           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3042
3043         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3044         wr_ptr->mode = DMA_INT_ENABLE; 
3045         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3046         
3047         /* end is not to be used for the DLE q */
3048         if (++wr_ptr == iadev->tx_dle_q.end)  
3049                 wr_ptr = iadev->tx_dle_q.start;
3050
3051         iadev->tx_dle_q.write = wr_ptr;  
3052         ATM_DESC(skb) = vcc->vci;
3053         skb_queue_tail(&iadev->tx_dma_q, skb);
3054
3055         atomic_inc(&vcc->stats->tx);
3056         iadev->tx_pkt_cnt++;
3057         /* Increment transaction counter */  
3058         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3059         
3060 #if 0        
3061         /* add flow control logic */ 
3062         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3063           if (iavcc->vc_desc_cnt > 10) {
3064              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3065             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3066               iavcc->flow_inc = -1;
3067               iavcc->saved_tx_quota = vcc->tx_quota;
3068            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3069              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3070              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3071               iavcc->flow_inc = 0;
3072            }
3073         }
3074 #endif
3075         IF_TX(printk("ia send done\n");)  
3076         return 0;  
3077 }  
3078
3079 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3080 {
3081         IADEV *iadev; 
3082         struct ia_vcc *iavcc;
3083         unsigned long flags;
3084
3085         iadev = INPH_IA_DEV(vcc->dev);
3086         iavcc = INPH_IA_VCC(vcc); 
3087         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3088         {
3089             if (!skb)
3090                 printk(KERN_CRIT "null skb in ia_send\n");
3091             else dev_kfree_skb_any(skb);
3092             return -EINVAL;
3093         }                         
3094         spin_lock_irqsave(&iadev->tx_lock, flags); 
3095         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3096             dev_kfree_skb_any(skb);
3097             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3098             return -EINVAL; 
3099         }
3100         ATM_SKB(skb)->vcc = vcc;
3101  
3102         if (skb_peek(&iadev->tx_backlog)) {
3103            skb_queue_tail(&iadev->tx_backlog, skb);
3104         }
3105         else {
3106            if (ia_pkt_tx (vcc, skb)) {
3107               skb_queue_tail(&iadev->tx_backlog, skb);
3108            }
3109         }
3110         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3111         return 0;
3112
3113 }
3114
3115 static int ia_sg_send(struct atm_vcc *vcc, unsigned long start,   
3116         unsigned long size)  
3117 {  
3118         IF_EVENT(printk(">ia_sg_send\n");)  
3119         return 0;  
3120 }  
3121   
3122   
3123 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3124
3125   int   left = *pos, n;   
3126   char  *tmpPtr;
3127   IADEV *iadev = INPH_IA_DEV(dev);
3128   if(!left--) {
3129      if (iadev->phy_type == FE_25MBIT_PHY) {
3130        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3131        return n;
3132      }
3133      if (iadev->phy_type == FE_DS3_PHY)
3134         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3135      else if (iadev->phy_type == FE_E3_PHY)
3136         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3137      else if (iadev->phy_type == FE_UTP_OPTION)
3138          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3139      else
3140         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3141      tmpPtr = page + n;
3142      if (iadev->pci_map_size == 0x40000)
3143         n += sprintf(tmpPtr, "-1KVC-");
3144      else
3145         n += sprintf(tmpPtr, "-4KVC-");  
3146      tmpPtr = page + n; 
3147      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3148         n += sprintf(tmpPtr, "1M  \n");
3149      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3150         n += sprintf(tmpPtr, "512K\n");
3151      else
3152        n += sprintf(tmpPtr, "128K\n");
3153      return n;
3154   }
3155   if (!left) {
3156      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3157                            "  Size of Tx Buffer  :  %u\n"
3158                            "  Number of Rx Buffer:  %u\n"
3159                            "  Size of Rx Buffer  :  %u\n"
3160                            "  Packets Receiverd  :  %u\n"
3161                            "  Packets Transmitted:  %u\n"
3162                            "  Cells Received     :  %u\n"
3163                            "  Cells Transmitted  :  %u\n"
3164                            "  Board Dropped Cells:  %u\n"
3165                            "  Board Dropped Pkts :  %u\n",
3166                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3167                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3168                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3169                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3170                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3171   }
3172   return 0;
3173 }
3174   
3175 static const struct atmdev_ops ops = {  
3176         .open           = ia_open,  
3177         .close          = ia_close,  
3178         .ioctl          = ia_ioctl,  
3179         .getsockopt     = ia_getsockopt,  
3180         .setsockopt     = ia_setsockopt,  
3181         .send           = ia_send,  
3182         .sg_send        = ia_sg_send,  
3183         .phy_put        = ia_phy_put,  
3184         .phy_get        = ia_phy_get,  
3185         .change_qos     = ia_change_qos,  
3186         .proc_read      = ia_proc_read,
3187         .owner          = THIS_MODULE,
3188 };  
3189           
3190 static int __devinit ia_init_one(struct pci_dev *pdev,
3191                                  const struct pci_device_id *ent)
3192 {  
3193         struct atm_dev *dev;  
3194         IADEV *iadev;  
3195         unsigned long flags;
3196         int ret;
3197
3198         iadev = kmalloc(sizeof(*iadev), GFP_KERNEL); 
3199         if (!iadev) {
3200                 ret = -ENOMEM;
3201                 goto err_out;
3202         }
3203         memset(iadev, 0, sizeof(*iadev));
3204         iadev->pci = pdev;
3205
3206         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3207                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3208         if (pci_enable_device(pdev)) {
3209                 ret = -ENODEV;
3210                 goto err_out_free_iadev;
3211         }
3212         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3213         if (!dev) {
3214                 ret = -ENOMEM;
3215                 goto err_out_disable_dev;
3216         }
3217         INPH_IA_DEV(dev) = iadev; 
3218         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3219         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3220                 iadev->LineRate);)
3221
3222         ia_dev[iadev_count] = iadev;
3223         _ia_dev[iadev_count] = dev;
3224         iadev_count++;
3225         spin_lock_init(&iadev->misc_lock);
3226         /* First fixes first. I don't want to think about this now. */
3227         spin_lock_irqsave(&iadev->misc_lock, flags); 
3228         if (ia_init(dev) || ia_start(dev)) {  
3229                 IF_INIT(printk("IA register failed!\n");)
3230                 iadev_count--;
3231                 ia_dev[iadev_count] = NULL;
3232                 _ia_dev[iadev_count] = NULL;
3233                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3234                 ret = -EINVAL;
3235                 goto err_out_deregister_dev;
3236         }
3237         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3238         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3239
3240         iadev->next_board = ia_boards;  
3241         ia_boards = dev;  
3242
3243         pci_set_drvdata(pdev, dev);
3244
3245         return 0;
3246
3247 err_out_deregister_dev:
3248         atm_dev_deregister(dev);  
3249 err_out_disable_dev:
3250         pci_disable_device(pdev);
3251 err_out_free_iadev:
3252         kfree(iadev);
3253 err_out:
3254         return ret;
3255 }
3256
3257 static void __devexit ia_remove_one(struct pci_dev *pdev)
3258 {
3259         struct atm_dev *dev = pci_get_drvdata(pdev);
3260         IADEV *iadev = INPH_IA_DEV(dev);
3261
3262         ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10); 
3263         udelay(1);
3264
3265         /* De-register device */  
3266         free_irq(iadev->irq, dev);
3267         iadev_count--;
3268         ia_dev[iadev_count] = NULL;
3269         _ia_dev[iadev_count] = NULL;
3270         atm_dev_deregister(dev);
3271         IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3272
3273         iounmap((void *) iadev->base);  
3274         pci_disable_device(pdev);
3275
3276         ia_free_rx(iadev);
3277         ia_free_tx(iadev);
3278
3279         kfree(iadev);
3280 }
3281
3282 static struct pci_device_id ia_pci_tbl[] = {
3283         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3284         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3285         { 0,}
3286 };
3287 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3288
3289 static struct pci_driver ia_driver = {
3290         .name =         DEV_LABEL,
3291         .id_table =     ia_pci_tbl,
3292         .probe =        ia_init_one,
3293         .remove =       __devexit_p(ia_remove_one),
3294 };
3295
3296 static int __init ia_module_init(void)
3297 {
3298         int ret;
3299
3300         ret = pci_module_init(&ia_driver);
3301         if (ret >= 0) {
3302                 ia_timer.expires = jiffies + 3*HZ;
3303                 add_timer(&ia_timer); 
3304         } else
3305                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3306         return ret;
3307 }
3308
3309 static void __exit ia_module_exit(void)
3310 {
3311         pci_unregister_driver(&ia_driver);
3312
3313         del_timer(&ia_timer);
3314 }
3315
3316 module_init(ia_module_init);
3317 module_exit(ia_module_exit);