+- add patches.fixes/linux-post-2.6.3-20040220
[linux-flexiantxendom0-3.2.10.git] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with PCI chip/adapter(s):
5  *          LSIFC9xx/LSI409xx Fibre Channel
6  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7  *
8  *  Credits:
9  *      This driver would not exist if not for Alan Cox's development
10  *      of the linux i2o driver.
11  *
12  *      Special thanks goes to the I2O LAN driver people at the
13  *      University of Helsinki, who, unbeknownst to them, provided
14  *      the inspiration and initial structure for this driver.
15  *
16  *      A huge debt of gratitude is owed to David S. Miller (DaveM)
17  *      for fixing much of the stupid and broken stuff in the early
18  *      driver while porting to sparc64 platform.  THANK YOU!
19  *
20  *      A really huge debt of gratitude is owed to Eddie C. Dost
21  *      for gobs of hard work fixing and optimizing LAN code.
22  *      THANK YOU!
23  *
24  *      (see also mptbase.c)
25  *
26  *  Copyright (c) 2000-2003 LSI Logic Corporation
27  *  Originally By: Noah Romer
28  *  (mailto:mpt_linux_developer@lsil.com)
29  *
30  *  $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
31  */
32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
33 /*
34     This program is free software; you can redistribute it and/or modify
35     it under the terms of the GNU General Public License as published by
36     the Free Software Foundation; version 2 of the License.
37
38     This program is distributed in the hope that it will be useful,
39     but WITHOUT ANY WARRANTY; without even the implied warranty of
40     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41     GNU General Public License for more details.
42
43     NO WARRANTY
44     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48     solely responsible for determining the appropriateness of using and
49     distributing the Program and assumes all risks associated with its
50     exercise of rights under this Agreement, including but not limited to
51     the risks and costs of program errors, damage to or loss of data,
52     programs or equipment, and unavailability or interruption of operations.
53
54     DISCLAIMER OF LIABILITY
55     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
62
63     You should have received a copy of the GNU General Public License
64     along with this program; if not, write to the Free Software
65     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
66 */
67
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * Define statements used for debugging
71  */
72 //#define MPT_LAN_IO_DEBUG
73
74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
75
76 #include "mptlan.h"
77 #include <linux/init.h>
78 #include <linux/module.h>
79 #include <linux/fs.h>
80
81 #define MYNAM           "mptlan"
82
83 MODULE_LICENSE("GPL");
84
85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
86 /*
87  * MPT LAN message sizes without variable part.
88  */
89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
91
92 #define MPT_LAN_TRANSACTION32_SIZE \
93         (sizeof(SGETransaction32_t) - sizeof(u32))
94
95 /*
96  *  Fusion MPT LAN private structures
97  */
98
99 struct NAA_Hosed {
100         u16 NAA;
101         u8 ieee[FC_ALEN];
102         struct NAA_Hosed *next;
103 };
104
105 struct BufferControl {
106         struct sk_buff  *skb;
107         dma_addr_t      dma;
108         unsigned int    len;
109 };
110
111 struct mpt_lan_priv {
112         MPT_ADAPTER *mpt_dev;
113         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
114
115         atomic_t buckets_out;           /* number of unused buckets on IOC */
116         int bucketthresh;               /* Send more when this many left */
117
118         int *mpt_txfidx; /* Free Tx Context list */
119         int mpt_txfidx_tail;
120         spinlock_t txfidx_lock;
121
122         int *mpt_rxfidx; /* Free Rx Context list */
123         int mpt_rxfidx_tail;
124         spinlock_t rxfidx_lock;
125
126         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
127         struct BufferControl *SendCtl;  /* Send BufferControl structs */
128
129         int max_buckets_out;            /* Max buckets to send to IOC */
130         int tx_max_out;                 /* IOC's Tx queue len */
131
132         u32 total_posted;
133         u32 total_received;
134         struct net_device_stats stats;  /* Per device statistics */
135
136         struct mpt_work_struct post_buckets_task;
137         unsigned long post_buckets_active;
138 };
139
140 struct mpt_lan_ohdr {
141         u16     dtype;
142         u8      daddr[FC_ALEN];
143         u16     stype;
144         u8      saddr[FC_ALEN];
145 };
146
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
148
149 /*
150  *  Forward protos...
151  */
152 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153                        MPT_FRAME_HDR *reply);
154 static int  mpt_lan_open(struct net_device *dev);
155 static int  mpt_lan_reset(struct net_device *dev);
156 static int  mpt_lan_close(struct net_device *dev);
157 static void mpt_lan_post_receive_buckets(void *dev_id);
158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
159                                            int priority);
160 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161 static int  mpt_lan_receive_post_reply(struct net_device *dev,
162                                        LANReceivePostReply_t *pRecvRep);
163 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164 static int  mpt_lan_send_reply(struct net_device *dev,
165                                LANSendReply_t *pSendRep);
166 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169                                          struct net_device *dev);
170
171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
172 /*
173  *  Fusion MPT LAN private data
174  */
175 static int LanCtx = -1;
176
177 static u32 max_buckets_out = 127;
178 static u32 tx_max_out_p = 127 - 16;
179
180 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
181
182 #ifdef QLOGIC_NAA_WORKAROUND
183 static struct NAA_Hosed *mpt_bad_naa = NULL;
184 rwlock_t bad_naa_lock;
185 #endif
186
187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
188 /*
189  * Fusion MPT LAN external data
190  */
191 extern int mpt_lan_index;
192
193 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
194 /**
195  *      lan_reply - Handle all data sent from the hardware.
196  *      @ioc: Pointer to MPT_ADAPTER structure
197  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
198  *      @reply: Pointer to MPT reply frame
199  *
200  *      Returns 1 indicating original alloc'd request frame ptr
201  *      should be freed, or 0 if it shouldn't.
202  */
203 static int
204 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
205 {
206         struct net_device *dev = mpt_landev[ioc->id];
207         int FreeReqFrame = 0;
208
209         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
210                   IOC_AND_NETDEV_NAMES_s_s(dev)));
211
212 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
213 //                      mf, reply));
214
215         if (mf == NULL) {
216                 u32 tmsg = CAST_PTR_TO_U32(reply);
217
218                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
219                                 IOC_AND_NETDEV_NAMES_s_s(dev),
220                                 tmsg));
221
222                 switch (GET_LAN_FORM(tmsg)) {
223
224                 // NOTE!  (Optimization) First case here is now caught in
225                 //  mptbase.c::mpt_interrupt() routine and callcack here
226                 //  is now skipped for this case!  20001218 -sralston
227 #if 0
228                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
229 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
230 //                                "MessageContext turbo reply received\n"));
231                         FreeReqFrame = 1;
232                         break;
233 #endif
234
235                 case LAN_REPLY_FORM_SEND_SINGLE:
236 //                      dioprintk((MYNAM "/lan_reply: "
237 //                                "calling mpt_lan_send_reply (turbo)\n"));
238
239                         // Potential BUG here?  -sralston
240                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
241                         //  If/when mpt_lan_send_turbo would return 1 here,
242                         //  calling routine (mptbase.c|mpt_interrupt)
243                         //  would Oops because mf has already been set
244                         //  to NULL.  So after return from this func,
245                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
246                         //  item back onto its adapter FreeQ - Oops!:-(
247                         //  It's Ok, since mpt_lan_send_turbo() *currently*
248                         //  always returns 0, but..., just in case:
249
250                         (void) mpt_lan_send_turbo(dev, tmsg);
251                         FreeReqFrame = 0;
252
253                         break;
254
255                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
256 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
257 //                                "rcv-Turbo = %08x\n", tmsg));
258                         mpt_lan_receive_post_turbo(dev, tmsg);
259                         break;
260
261                 default:
262                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
263                                 "that I don't know what to do with\n");
264
265                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
266
267                         break;
268                 }
269
270                 return FreeReqFrame;
271         }
272
273 //      msg = (u32 *) reply;
274 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
275 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
276 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
277 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
278 //                reply->u.hdr.Function));
279
280         switch (reply->u.hdr.Function) {
281
282         case MPI_FUNCTION_LAN_SEND:
283         {
284                 LANSendReply_t *pSendRep;
285
286                 pSendRep = (LANSendReply_t *) reply;
287                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
288                 break;
289         }
290
291         case MPI_FUNCTION_LAN_RECEIVE:
292         {
293                 LANReceivePostReply_t *pRecvRep;
294
295                 pRecvRep = (LANReceivePostReply_t *) reply;
296                 if (pRecvRep->NumberOfContexts) {
297                         mpt_lan_receive_post_reply(dev, pRecvRep);
298                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
299                                 FreeReqFrame = 1;
300                 } else
301                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
302                                   "ReceivePostReply received.\n"));
303                 break;
304         }
305
306         case MPI_FUNCTION_LAN_RESET:
307                 /* Just a default reply. Might want to check it to
308                  * make sure that everything went ok.
309                  */
310                 FreeReqFrame = 1;
311                 break;
312
313         case MPI_FUNCTION_EVENT_NOTIFICATION:
314         case MPI_FUNCTION_EVENT_ACK:
315                 /* UPDATE!  20010120 -sralston
316                  *  _EVENT_NOTIFICATION should NOT come down this path any more.
317                  *  Should be routed to mpt_lan_event_process(), but just in case...
318                  */
319                 FreeReqFrame = 1;
320                 break;
321
322         default:
323                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
324                         "reply that I don't know what to do with\n");
325
326                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
327                 FreeReqFrame = 1;
328
329                 break;
330         }
331
332         return FreeReqFrame;
333 }
334
335 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
336 static int
337 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
338 {
339         struct net_device *dev = mpt_landev[ioc->id];
340         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
341
342         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
343                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"));
344
345         if (priv->mpt_rxfidx == NULL)
346                 return (1);
347
348         if (reset_phase == MPT_IOC_PRE_RESET) {
349                 int i;
350                 unsigned long flags;
351
352                 netif_stop_queue(dev);
353
354                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
355
356                 atomic_set(&priv->buckets_out, 0);
357
358                 /* Reset Rx Free Tail index and re-populate the queue. */
359                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
360                 priv->mpt_rxfidx_tail = -1;
361                 for (i = 0; i < priv->max_buckets_out; i++)
362                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
363                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
364         } else {
365                 mpt_lan_post_receive_buckets(dev);
366                 netif_wake_queue(dev);
367         }
368
369         return 1;
370 }
371
372 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
373 static int
374 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
375 {
376         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
377
378         switch (le32_to_cpu(pEvReply->Event)) {
379         case MPI_EVENT_NONE:                            /* 00 */
380         case MPI_EVENT_LOG_DATA:                        /* 01 */
381         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
382         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
383         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
384         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
385         case MPI_EVENT_RESCAN:                          /* 06 */
386                 /* Ok, do we need to do anything here? As far as
387                    I can tell, this is when a new device gets added
388                    to the loop. */
389         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
390         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
391         case MPI_EVENT_LOGOUT:                          /* 09 */
392         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
393         default:
394                 break;
395         }
396
397         /*
398          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
399          *  Do NOT do it here now!
400          */
401
402         return 1;
403 }
404
405 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
406 static int
407 mpt_lan_open(struct net_device *dev)
408 {
409         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
410         int i;
411
412         if (mpt_lan_reset(dev) != 0) {
413                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
414
415                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
416
417                 if (mpt_dev->active)
418                         printk ("The ioc is active. Perhaps it needs to be"
419                                 " reset?\n");
420                 else
421                         printk ("The ioc in inactive, most likely in the "
422                                 "process of being reset. Please try again in "
423                                 "a moment.\n");
424         }
425
426         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
427         if (priv->mpt_txfidx == NULL)
428                 goto out;
429         priv->mpt_txfidx_tail = -1;
430
431         priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
432                                 GFP_KERNEL);
433         if (priv->SendCtl == NULL)
434                 goto out_mpt_txfidx;
435         for (i = 0; i < priv->tx_max_out; i++) {
436                 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
437                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
438         }
439
440         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
441
442         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
443                                    GFP_KERNEL);
444         if (priv->mpt_rxfidx == NULL)
445                 goto out_SendCtl;
446         priv->mpt_rxfidx_tail = -1;
447
448         priv->RcvCtl = kmalloc(priv->max_buckets_out *
449                                                 sizeof(struct BufferControl),
450                                GFP_KERNEL);
451         if (priv->RcvCtl == NULL)
452                 goto out_mpt_rxfidx;
453         for (i = 0; i < priv->max_buckets_out; i++) {
454                 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
455                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
456         }
457
458 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
459 /**/    for (i = 0; i < priv->tx_max_out; i++)
460 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
461 /**/    dlprintk(("\n"));
462
463         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
464
465         mpt_lan_post_receive_buckets(dev);
466         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
467                         IOC_AND_NETDEV_NAMES_s_s(dev));
468
469         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
470                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
471                         " Notifications. This is a bad thing! We're not going "
472                         "to go ahead, but I'd be leery of system stability at "
473                         "this point.\n");
474         }
475
476         netif_start_queue(dev);
477         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
478
479         return 0;
480 out_mpt_rxfidx:
481         kfree(priv->mpt_rxfidx);
482         priv->mpt_rxfidx = NULL;
483 out_SendCtl:
484         kfree(priv->SendCtl);
485         priv->SendCtl = NULL;
486 out_mpt_txfidx:
487         kfree(priv->mpt_txfidx);
488         priv->mpt_txfidx = NULL;
489 out:    return -ENOMEM;
490 }
491
492 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
493 /* Send a LanReset message to the FW. This should result in the FW returning
494    any buckets it still has. */
495 static int
496 mpt_lan_reset(struct net_device *dev)
497 {
498         MPT_FRAME_HDR *mf;
499         LANResetRequest_t *pResetReq;
500         struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
501
502         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id);
503
504         if (mf == NULL) {
505 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
506                 "Unable to allocate a request frame.\n"));
507 */
508                 return -1;
509         }
510
511         pResetReq = (LANResetRequest_t *) mf;
512
513         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
514         pResetReq->ChainOffset  = 0;
515         pResetReq->Reserved     = 0;
516         pResetReq->PortNumber   = priv->pnum;
517         pResetReq->MsgFlags     = 0;
518         pResetReq->Reserved2    = 0;
519
520         mpt_put_msg_frame(LanCtx, priv->mpt_dev->id, mf);
521
522         return 0;
523 }
524
525 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
526 static int
527 mpt_lan_close(struct net_device *dev)
528 {
529         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
530         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
531         unsigned int timeout;
532         int i;
533
534         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
535
536         mpt_event_deregister(LanCtx);
537
538         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
539                   "since driver was loaded, %d still out\n",
540                   priv->total_posted,atomic_read(&priv->buckets_out)));
541
542         netif_stop_queue(dev);
543
544         mpt_lan_reset(dev);
545
546         timeout = 2 * HZ;
547         while (atomic_read(&priv->buckets_out) && --timeout) {
548                 set_current_state(TASK_INTERRUPTIBLE);
549                 schedule_timeout(1);
550         }
551
552         for (i = 0; i < priv->max_buckets_out; i++) {
553                 if (priv->RcvCtl[i].skb != NULL) {
554 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
555 /**/                              "is still out\n", i));
556                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
557                                          priv->RcvCtl[i].len,
558                                          PCI_DMA_FROMDEVICE);
559                         dev_kfree_skb(priv->RcvCtl[i].skb);
560                 }
561         }
562
563         kfree (priv->RcvCtl);
564         kfree (priv->mpt_rxfidx);
565
566         for (i = 0; i < priv->tx_max_out; i++) {
567                 if (priv->SendCtl[i].skb != NULL) {
568                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
569                                          priv->SendCtl[i].len,
570                                          PCI_DMA_TODEVICE);
571                         dev_kfree_skb(priv->SendCtl[i].skb);
572                 }
573         }
574
575         kfree(priv->SendCtl);
576         kfree(priv->mpt_txfidx);
577
578         atomic_set(&priv->buckets_out, 0);
579
580         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
581                         IOC_AND_NETDEV_NAMES_s_s(dev));
582
583         return 0;
584 }
585
586 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
587 static struct net_device_stats *
588 mpt_lan_get_stats(struct net_device *dev)
589 {
590         struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
591
592         return (struct net_device_stats *) &priv->stats;
593 }
594
595 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
596 static int
597 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
598 {
599         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
600                 return -EINVAL;
601         dev->mtu = new_mtu;
602         return 0;
603 }
604
605 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
606 /* Tx timeout handler. */
607 static void
608 mpt_lan_tx_timeout(struct net_device *dev)
609 {
610         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
611         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
612
613         if (mpt_dev->active) {
614                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
615                 netif_wake_queue(dev);
616         }
617 }
618
619 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
620 //static inline int
621 static int
622 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
623 {
624         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
625         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
626         struct sk_buff *sent;
627         unsigned long flags;
628         u32 ctx;
629
630         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
631         sent = priv->SendCtl[ctx].skb;
632
633         priv->stats.tx_packets++;
634         priv->stats.tx_bytes += sent->len;
635
636         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
637                         IOC_AND_NETDEV_NAMES_s_s(dev),
638                         __FUNCTION__, sent));
639
640         priv->SendCtl[ctx].skb = NULL;
641         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
642                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
643         dev_kfree_skb_irq(sent);
644
645         spin_lock_irqsave(&priv->txfidx_lock, flags);
646         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
647         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
648
649         netif_wake_queue(dev);
650         return 0;
651 }
652
653 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
654 static int
655 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
656 {
657         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
658         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
659         struct sk_buff *sent;
660         unsigned long flags;
661         int FreeReqFrame = 0;
662         u32 *pContext;
663         u32 ctx;
664         u8 count;
665
666         count = pSendRep->NumberOfContexts;
667
668         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
669                  le16_to_cpu(pSendRep->IOCStatus)));
670
671         /* Add check for Loginfo Flag in IOCStatus */
672
673         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
674         case MPI_IOCSTATUS_SUCCESS:
675                 priv->stats.tx_packets += count;
676                 break;
677
678         case MPI_IOCSTATUS_LAN_CANCELED:
679         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
680                 break;
681
682         case MPI_IOCSTATUS_INVALID_SGL:
683                 priv->stats.tx_errors += count;
684                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
685                                 IOC_AND_NETDEV_NAMES_s_s(dev));
686                 goto out;
687
688         default:
689                 priv->stats.tx_errors += count;
690                 break;
691         }
692
693         pContext = &pSendRep->BufferContext;
694
695         spin_lock_irqsave(&priv->txfidx_lock, flags);
696         while (count > 0) {
697                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
698
699                 sent = priv->SendCtl[ctx].skb;
700                 priv->stats.tx_bytes += sent->len;
701
702                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
703                                 IOC_AND_NETDEV_NAMES_s_s(dev),
704                                 __FUNCTION__, sent));
705
706                 priv->SendCtl[ctx].skb = NULL;
707                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
708                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
709                 dev_kfree_skb_irq(sent);
710
711                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
712
713                 pContext++;
714                 count--;
715         }
716         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
717
718 out:
719         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
720                 FreeReqFrame = 1;
721
722         netif_wake_queue(dev);
723         return FreeReqFrame;
724 }
725
726 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
727 static int
728 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
729 {
730         struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
731         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
732         MPT_FRAME_HDR *mf;
733         LANSendRequest_t *pSendReq;
734         SGETransaction32_t *pTrans;
735         SGESimple64_t *pSimple;
736         dma_addr_t dma;
737         unsigned long flags;
738         int ctx;
739         u16 cur_naa = 0x1000;
740
741         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
742                         __FUNCTION__, skb));
743
744         spin_lock_irqsave(&priv->txfidx_lock, flags);
745         if (priv->mpt_txfidx_tail < 0) {
746                 netif_stop_queue(dev);
747                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
748
749                 printk (KERN_ERR "%s: no tx context available: %u\n",
750                         __FUNCTION__, priv->mpt_txfidx_tail);
751                 return 1;
752         }
753
754         mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
755         if (mf == NULL) {
756                 netif_stop_queue(dev);
757                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
758
759                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
760                         __FUNCTION__);
761                 return 1;
762         }
763
764         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
765         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
766
767 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
768 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
769
770         pSendReq = (LANSendRequest_t *) mf;
771
772         /* Set the mac.raw pointer, since this apparently isn't getting
773          * done before we get the skb. Pull the data pointer past the mac data.
774          */
775         skb->mac.raw = skb->data;
776         skb_pull(skb, 12);
777
778         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
779                              PCI_DMA_TODEVICE);
780
781         priv->SendCtl[ctx].skb = skb;
782         priv->SendCtl[ctx].dma = dma;
783         priv->SendCtl[ctx].len = skb->len;
784
785         /* Message Header */
786         pSendReq->Reserved    = 0;
787         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
788         pSendReq->ChainOffset = 0;
789         pSendReq->Reserved2   = 0;
790         pSendReq->MsgFlags    = 0;
791         pSendReq->PortNumber  = priv->pnum;
792
793         /* Transaction Context Element */
794         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
795
796         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
797         pTrans->ContextSize   = sizeof(u32);
798         pTrans->DetailsLength = 2 * sizeof(u32);
799         pTrans->Flags         = 0;
800         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
801
802 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
803 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
804 //                      ctx, skb, skb->data));
805
806 #ifdef QLOGIC_NAA_WORKAROUND
807 {
808         struct NAA_Hosed *nh;
809
810         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
811            RFC 2625. The longer I look at this, the more my opinion of Qlogic
812            drops. */
813         read_lock_irq(&bad_naa_lock);
814         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
815                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
816                     (nh->ieee[1] == skb->mac.raw[1]) &&
817                     (nh->ieee[2] == skb->mac.raw[2]) &&
818                     (nh->ieee[3] == skb->mac.raw[3]) &&
819                     (nh->ieee[4] == skb->mac.raw[4]) &&
820                     (nh->ieee[5] == skb->mac.raw[5])) {
821                         cur_naa = nh->NAA;
822                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
823                                   "= %04x.\n", cur_naa));
824                         break;
825                 }
826         }
827         read_unlock_irq(&bad_naa_lock);
828 }
829 #endif
830
831         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
832                                                     (skb->mac.raw[0] <<  8) |
833                                                     (skb->mac.raw[1] <<  0));
834         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
835                                                     (skb->mac.raw[3] << 16) |
836                                                     (skb->mac.raw[4] <<  8) |
837                                                     (skb->mac.raw[5] <<  0));
838
839         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
840
841         /* If we ever decide to send more than one Simple SGE per LANSend, then
842            we will need to make sure that LAST_ELEMENT only gets set on the
843            last one. Otherwise, bad voodoo and evil funkiness will commence. */
844         pSimple->FlagsLength = cpu_to_le32(
845                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
846                           MPI_SGE_FLAGS_END_OF_BUFFER |
847                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
848                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
849                           MPI_SGE_FLAGS_HOST_TO_IOC |
850                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
851                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
852                         skb->len);
853         pSimple->Address.Low = cpu_to_le32((u32) dma);
854         if (sizeof(dma_addr_t) > sizeof(u32))
855                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
856         else
857                 pSimple->Address.High = 0;
858
859         mpt_put_msg_frame (LanCtx, mpt_dev->id, mf);
860         dev->trans_start = jiffies;
861
862         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
863                         IOC_AND_NETDEV_NAMES_s_s(dev),
864                         le32_to_cpu(pSimple->FlagsLength)));
865
866         return 0;
867 }
868
869 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
870 static inline void
871 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
872 /*
873  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
874  */
875 {
876         struct mpt_lan_priv *priv = dev->priv;
877         
878         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
879                 if (priority) {
880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
881                         schedule_work(&priv->post_buckets_task);
882 #else
883                         queue_task(&priv->post_buckets_task, &tq_immediate);
884                         mark_bh(IMMEDIATE_BH);
885 #endif
886                 } else {
887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
888                         schedule_delayed_work(&priv->post_buckets_task, 1);
889 #else
890                         queue_task(&priv->post_buckets_task, &tq_timer);
891 #endif
892                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
893                                    "timer.\n"));
894                 }
895                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
896                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
897         }
898 }
899
900 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
901 static inline int
902 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
903 {
904         struct mpt_lan_priv *priv = dev->priv;
905
906         skb->protocol = mpt_lan_type_trans(skb, dev);
907
908         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
909                  "delivered to upper level.\n",
910                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
911
912         priv->stats.rx_bytes += skb->len;
913         priv->stats.rx_packets++;
914
915         skb->dev = dev;
916         netif_rx(skb);
917
918         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
919                  atomic_read(&priv->buckets_out)));
920
921         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
922                 mpt_lan_wake_post_buckets_task(dev, 1);
923
924         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
925                   "remaining, %d received back since sod\n",
926                   atomic_read(&priv->buckets_out), priv->total_received));
927
928         return 0;
929 }
930
931 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
932 //static inline int
933 static int
934 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
935 {
936         struct mpt_lan_priv *priv = dev->priv;
937         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
938         struct sk_buff *skb, *old_skb;
939         unsigned long flags;
940         u32 ctx, len;
941
942         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
943         skb = priv->RcvCtl[ctx].skb;
944
945         len = GET_LAN_PACKET_LENGTH(tmsg);
946
947         if (len < MPT_LAN_RX_COPYBREAK) {
948                 old_skb = skb;
949
950                 skb = (struct sk_buff *)dev_alloc_skb(len);
951                 if (!skb) {
952                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
953                                         IOC_AND_NETDEV_NAMES_s_s(dev),
954                                         __FILE__, __LINE__);
955                         return -ENOMEM;
956                 }
957
958                 pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
959                                     priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
960
961                 memcpy(skb_put(skb, len), old_skb->data, len);
962
963                 goto out;
964         }
965
966         skb_put(skb, len);
967
968         priv->RcvCtl[ctx].skb = NULL;
969
970         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
971                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
972
973 out:
974         spin_lock_irqsave(&priv->rxfidx_lock, flags);
975         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
976         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
977
978         atomic_dec(&priv->buckets_out);
979         priv->total_received++;
980
981         return mpt_lan_receive_skb(dev, skb);
982 }
983
984 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
985 static int
986 mpt_lan_receive_post_free(struct net_device *dev,
987                           LANReceivePostReply_t *pRecvRep)
988 {
989         struct mpt_lan_priv *priv = dev->priv;
990         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
991         unsigned long flags;
992         struct sk_buff *skb;
993         u32 ctx;
994         int count;
995         int i;
996
997         count = pRecvRep->NumberOfContexts;
998
999 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
1000                   "IOC returned %d buckets, freeing them...\n", count));
1001
1002         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1003         for (i = 0; i < count; i++) {
1004                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1005
1006                 skb = priv->RcvCtl[ctx].skb;
1007
1008 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1009 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1010 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1011 //                              priv, &(priv->buckets_out)));
1012 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1013
1014                 priv->RcvCtl[ctx].skb = NULL;
1015                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1016                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1017                 dev_kfree_skb_any(skb);
1018
1019                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1020         }
1021         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1022
1023         atomic_sub(count, &priv->buckets_out);
1024
1025 //      for (i = 0; i < priv->max_buckets_out; i++)
1026 //              if (priv->RcvCtl[i].skb != NULL)
1027 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1028 //                                "is still out\n", i));
1029
1030 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1031                   count));
1032 */
1033 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1034 /**/              "remaining, %d received back since sod.\n",
1035 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1036         return 0;
1037 }
1038
1039 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1040 static int
1041 mpt_lan_receive_post_reply(struct net_device *dev,
1042                            LANReceivePostReply_t *pRecvRep)
1043 {
1044         struct mpt_lan_priv *priv = dev->priv;
1045         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1046         struct sk_buff *skb, *old_skb;
1047         unsigned long flags;
1048         u32 len, ctx, offset;
1049         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1050         int count;
1051         int i, l;
1052
1053         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1054         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1055                  le16_to_cpu(pRecvRep->IOCStatus)));
1056
1057         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1058                                                 MPI_IOCSTATUS_LAN_CANCELED)
1059                 return mpt_lan_receive_post_free(dev, pRecvRep);
1060
1061         len = le32_to_cpu(pRecvRep->PacketLength);
1062         if (len == 0) {
1063                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1064                         "ReceivePostReply w/ PacketLength zero!\n",
1065                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1066                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1067                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1068                 return -1;
1069         }
1070
1071         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1072         count  = pRecvRep->NumberOfContexts;
1073         skb    = priv->RcvCtl[ctx].skb;
1074
1075         offset = le32_to_cpu(pRecvRep->PacketOffset);
1076 //      if (offset != 0) {
1077 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1078 //                      "w/ PacketOffset %u\n",
1079 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1080 //                              offset);
1081 //      }
1082
1083         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1084                         IOC_AND_NETDEV_NAMES_s_s(dev),
1085                         offset, len));
1086
1087         if (count > 1) {
1088                 int szrem = len;
1089
1090 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1091 //                      "for single packet, concatenating...\n",
1092 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1093
1094                 skb = (struct sk_buff *)dev_alloc_skb(len);
1095                 if (!skb) {
1096                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1097                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1098                                         __FILE__, __LINE__);
1099                         return -ENOMEM;
1100                 }
1101
1102                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1103                 for (i = 0; i < count; i++) {
1104
1105                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1106                         old_skb = priv->RcvCtl[ctx].skb;
1107
1108                         l = priv->RcvCtl[ctx].len;
1109                         if (szrem < l)
1110                                 l = szrem;
1111
1112 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1113 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1114 //                                      i, l));
1115
1116                         pci_dma_sync_single(mpt_dev->pcidev,
1117                                             priv->RcvCtl[ctx].dma,
1118                                             priv->RcvCtl[ctx].len,
1119                                             PCI_DMA_FROMDEVICE);
1120                         memcpy(skb_put(skb, l), old_skb->data, l);
1121
1122                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1123                         szrem -= l;
1124                 }
1125                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1126
1127         } else if (len < MPT_LAN_RX_COPYBREAK) {
1128
1129                 old_skb = skb;
1130
1131                 skb = (struct sk_buff *)dev_alloc_skb(len);
1132                 if (!skb) {
1133                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1134                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1135                                         __FILE__, __LINE__);
1136                         return -ENOMEM;
1137                 }
1138
1139                 pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1140                                     priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1141
1142                 memcpy(skb_put(skb, len), old_skb->data, len);
1143
1144                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1145                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1146                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1147
1148         } else {
1149                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1150
1151                 priv->RcvCtl[ctx].skb = NULL;
1152
1153                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1154                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1155                 priv->RcvCtl[ctx].dma = 0;
1156
1157                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1158                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1159
1160                 skb_put(skb,len);
1161         }
1162
1163         atomic_sub(count, &priv->buckets_out);
1164         priv->total_received += count;
1165
1166         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1167                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1168                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1169                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1170                                 priv->mpt_rxfidx_tail,
1171                                 MPT_LAN_MAX_BUCKETS_OUT);
1172
1173                 panic("Damn it Jim! I'm a doctor, not a programmer! "
1174                                 "Oh, wait a sec, I am a programmer. "
1175                                 "And, who's Jim?!?!\n"
1176                                 "Arrgghh! We've done it again!\n");
1177         }
1178
1179         if (remaining == 0)
1180                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1181                         "(priv->buckets_out = %d)\n",
1182                         IOC_AND_NETDEV_NAMES_s_s(dev),
1183                         atomic_read(&priv->buckets_out));
1184         else if (remaining < 10)
1185                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1186                         "(priv->buckets_out = %d)\n",
1187                         IOC_AND_NETDEV_NAMES_s_s(dev),
1188                         remaining, atomic_read(&priv->buckets_out));
1189         
1190         if ((remaining < priv->bucketthresh) &&
1191             ((atomic_read(&priv->buckets_out) - remaining) >
1192              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1193                 
1194                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1195                         "buckets_out count and fw's BucketsRemaining "
1196                         "count has crossed the threshold, issuing a "
1197                         "LanReset to clear the fw's hashtable. You may "
1198                         "want to check your /var/log/messages for \"CRC "
1199                         "error\" event notifications.\n");
1200                 
1201                 mpt_lan_reset(dev);
1202                 mpt_lan_wake_post_buckets_task(dev, 0);
1203         }
1204         
1205         return mpt_lan_receive_skb(dev, skb);
1206 }
1207
1208 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1209 /* Simple SGE's only at the moment */
1210
1211 static void
1212 mpt_lan_post_receive_buckets(void *dev_id)
1213 {
1214         struct net_device *dev = dev_id;
1215         struct mpt_lan_priv *priv = dev->priv;
1216         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1217         MPT_FRAME_HDR *mf;
1218         LANReceivePostRequest_t *pRecvReq;
1219         SGETransaction32_t *pTrans;
1220         SGESimple64_t *pSimple;
1221         struct sk_buff *skb;
1222         dma_addr_t dma;
1223         u32 curr, buckets, count, max;
1224         u32 len = (dev->mtu + dev->hard_header_len + 4);
1225         unsigned long flags;
1226         int i;
1227
1228         curr = atomic_read(&priv->buckets_out);
1229         buckets = (priv->max_buckets_out - curr);
1230
1231         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1232                         IOC_AND_NETDEV_NAMES_s_s(dev),
1233                         __FUNCTION__, buckets, curr));
1234
1235         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1236                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1237
1238         while (buckets) {
1239                 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
1240                 if (mf == NULL) {
1241                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1242                                 __FUNCTION__);
1243                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1244                                  __FUNCTION__, buckets));
1245                         goto out;
1246                 }
1247                 pRecvReq = (LANReceivePostRequest_t *) mf;
1248
1249                 count = buckets;
1250                 if (count > max)
1251                         count = max;
1252
1253                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1254                 pRecvReq->ChainOffset = 0;
1255                 pRecvReq->MsgFlags    = 0;
1256                 pRecvReq->PortNumber  = priv->pnum;
1257
1258                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1259                 pSimple = NULL;
1260
1261                 for (i = 0; i < count; i++) {
1262                         int ctx;
1263
1264                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1265                         if (priv->mpt_rxfidx_tail < 0) {
1266                                 printk (KERN_ERR "%s: Can't alloc context\n",
1267                                         __FUNCTION__);
1268                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1269                                                        flags);
1270                                 break;
1271                         }
1272
1273                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1274
1275                         skb = priv->RcvCtl[ctx].skb;
1276                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1277                                 pci_unmap_single(mpt_dev->pcidev,
1278                                                  priv->RcvCtl[ctx].dma,
1279                                                  priv->RcvCtl[ctx].len,
1280                                                  PCI_DMA_FROMDEVICE);
1281                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1282                                 skb = priv->RcvCtl[ctx].skb = NULL;
1283                         }
1284
1285                         if (skb == NULL) {
1286                                 skb = dev_alloc_skb(len);
1287                                 if (skb == NULL) {
1288                                         printk (KERN_WARNING
1289                                                 MYNAM "/%s: Can't alloc skb\n",
1290                                                 __FUNCTION__);
1291                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1292                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1293                                         break;
1294                                 }
1295
1296                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1297                                                      len, PCI_DMA_FROMDEVICE);
1298
1299                                 priv->RcvCtl[ctx].skb = skb;
1300                                 priv->RcvCtl[ctx].dma = dma;
1301                                 priv->RcvCtl[ctx].len = len;
1302                         }
1303
1304                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1305
1306                         pTrans->ContextSize   = sizeof(u32);
1307                         pTrans->DetailsLength = 0;
1308                         pTrans->Flags         = 0;
1309                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1310
1311                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1312
1313                         pSimple->FlagsLength = cpu_to_le32(
1314                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1315                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1316                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1317                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1318                         if (sizeof(dma_addr_t) > sizeof(u32))
1319                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1320                         else
1321                                 pSimple->Address.High = 0;
1322
1323                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1324                 }
1325
1326                 if (pSimple == NULL) {
1327 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1328 /**/                            __FUNCTION__);
1329                         mpt_free_msg_frame(LanCtx, mpt_dev->id, mf);
1330                         goto out;
1331                 }
1332
1333                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1334
1335                 pRecvReq->BucketCount = cpu_to_le32(i);
1336
1337 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1338  *      for (i = 0; i < j + 2; i ++)
1339  *          printk (" %08x", le32_to_cpu(msg[i]));
1340  *      printk ("\n");
1341  */
1342
1343                 mpt_put_msg_frame(LanCtx, mpt_dev->id, mf);
1344
1345                 priv->total_posted += i;
1346                 buckets -= i;
1347                 atomic_add(i, &priv->buckets_out);
1348         }
1349
1350 out:
1351         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1352                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1353         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1354         __FUNCTION__, priv->total_posted, priv->total_received));
1355
1356         clear_bit(0, &priv->post_buckets_active);
1357 }
1358
1359 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1360 static struct net_device *
1361 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1362 {
1363         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1364         struct mpt_lan_priv *priv = NULL;
1365         u8 HWaddr[FC_ALEN], *a;
1366
1367         if (!dev)
1368                 return NULL;
1369
1370         dev->mtu = MPT_LAN_MTU;
1371
1372         priv = (struct mpt_lan_priv *) dev->priv;
1373
1374         priv->mpt_dev = mpt_dev;
1375         priv->pnum = pnum;
1376
1377         memset(&priv->post_buckets_task, 0, sizeof(struct mpt_work_struct));
1378         MPT_INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1379         priv->post_buckets_active = 0;
1380
1381         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1382                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1383
1384         atomic_set(&priv->buckets_out, 0);
1385         priv->total_posted = 0;
1386         priv->total_received = 0;
1387         priv->max_buckets_out = max_buckets_out;
1388         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1389                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1390
1391         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1392                         __LINE__,
1393                         mpt_dev->pfacts[0].MaxLanBuckets,
1394                         max_buckets_out,
1395                         priv->max_buckets_out));
1396
1397         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1398         priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1399         priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1400
1401         memset(&priv->stats, 0, sizeof(priv->stats));
1402
1403         /*  Grab pre-fetched LANPage1 stuff. :-) */
1404         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1405
1406         HWaddr[0] = a[5];
1407         HWaddr[1] = a[4];
1408         HWaddr[2] = a[3];
1409         HWaddr[3] = a[2];
1410         HWaddr[4] = a[1];
1411         HWaddr[5] = a[0];
1412
1413         dev->addr_len = FC_ALEN;
1414         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1415         memset(dev->broadcast, 0xff, FC_ALEN);
1416
1417         /* The Tx queue is 127 deep on the 909.
1418          * Give ourselves some breathing room.
1419          */
1420         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1421                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1422
1423         dev->open = mpt_lan_open;
1424         dev->stop = mpt_lan_close;
1425         dev->get_stats = mpt_lan_get_stats;
1426         dev->set_multicast_list = NULL;
1427         dev->change_mtu = mpt_lan_change_mtu;
1428         dev->hard_start_xmit = mpt_lan_sdu_send;
1429
1430 /* Not in 2.3.42. Need 2.3.45+ */
1431         dev->tx_timeout = mpt_lan_tx_timeout;
1432         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1433
1434         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1435                 "and setting initial values\n"));
1436
1437         SET_MODULE_OWNER(dev);
1438
1439         if (register_netdev(dev) != 0) {
1440                 free_netdev(dev);
1441                 dev = NULL;
1442         }
1443         return dev;
1444 }
1445
1446 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1447 static int __init mpt_lan_init (void)
1448 {
1449         struct net_device *dev;
1450         MPT_ADAPTER *p;
1451         int i, j;
1452
1453         show_mptmod_ver(LANAME, LANVER);
1454
1455 #ifdef QLOGIC_NAA_WORKAROUND
1456         /* Init the global r/w lock for the bad_naa list. We want to do this
1457            before any boards are initialized and may be used. */
1458         rwlock_init(&bad_naa_lock);
1459 #endif
1460
1461         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1462                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1463                 return -EBUSY;
1464         }
1465
1466         /* Set the callback index to be used by driver core for turbo replies */
1467         mpt_lan_index = LanCtx;
1468
1469         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1470
1471         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1472                 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1473         } else {
1474                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1475                        "handler with mptbase! The world is at an end! "
1476                        "Everything is fading to black! Goodbye.\n");
1477                 return -EBUSY;
1478         }
1479
1480         for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1481                 mpt_landev[j] = NULL;
1482         }
1483
1484         for (p = mpt_adapter_find_first(); p; p = mpt_adapter_find_next(p)) {
1485                 for (i = 0; i < p->facts.NumberOfPorts; i++) {
1486                         printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1487                                         p->name,
1488                                         p->pfacts[i].PortNumber,
1489                                         p->pfacts[i].ProtocolFlags,
1490                                         MPT_PROTOCOL_FLAGS_c_c_c_c(p->pfacts[i].ProtocolFlags));
1491
1492                         if (!(p->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
1493                                 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1494                                                 p->name);
1495                                 continue;
1496                         }
1497
1498                         dev = mpt_register_lan_device (p, i);
1499                         if (!dev) {
1500                                 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1501                                                 p->name,
1502                                                 p->pfacts[i].PortNumber);
1503                         }
1504                         printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1505                                         p->name, dev->name);
1506                         printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1507                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1508                                         dev->dev_addr[0], dev->dev_addr[1],
1509                                         dev->dev_addr[2], dev->dev_addr[3],
1510                                         dev->dev_addr[4], dev->dev_addr[5]);
1511 //                                      printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1512 //                                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1513 //                                                      NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1514                         j = p->id;
1515                         mpt_landev[j] = dev;
1516                         dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1517                                         dev, j,  mpt_landev[j]));
1518
1519                 }
1520         }
1521
1522         return 0;
1523 }
1524
1525 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1526 static void __exit mpt_lan_exit(void)
1527 {
1528         int i;
1529
1530         mpt_reset_deregister(LanCtx);
1531
1532         for (i = 0; mpt_landev[i] != NULL; i++) {
1533                 struct net_device *dev = mpt_landev[i];
1534
1535                 printk (KERN_INFO ": %s/%s: Fusion MPT LAN device unregistered\n",
1536                                IOC_AND_NETDEV_NAMES_s_s(dev));
1537                 unregister_netdev(dev);
1538                 free_netdev(dev);
1539                 mpt_landev[i] = NULL;
1540         }
1541
1542         if (LanCtx >= 0) {
1543                 mpt_deregister(LanCtx);
1544                 LanCtx = -1;
1545                 mpt_lan_index = 0;
1546         }
1547
1548         /* deregister any send/receive handler structs. I2Oism? */
1549 }
1550
1551 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1552 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,59)
1553 MODULE_PARM(tx_max_out_p, "i");
1554 MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME!
1555 #endif
1556
1557 module_init(mpt_lan_init);
1558 module_exit(mpt_lan_exit);
1559
1560 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1561 static unsigned short
1562 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1563 {
1564         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1565         struct fcllc *fcllc;
1566
1567         skb->mac.raw = skb->data;
1568         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1569
1570         if (fch->dtype == htons(0xffff)) {
1571                 u32 *p = (u32 *) fch;
1572
1573                 swab32s(p + 0);
1574                 swab32s(p + 1);
1575                 swab32s(p + 2);
1576                 swab32s(p + 3);
1577
1578                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1579                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1580                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1581                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1582                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1583         }
1584
1585         if (*fch->daddr & 1) {
1586                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1587                         skb->pkt_type = PACKET_BROADCAST;
1588                 } else {
1589                         skb->pkt_type = PACKET_MULTICAST;
1590                 }
1591         } else {
1592                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1593                         skb->pkt_type = PACKET_OTHERHOST;
1594                 } else {
1595                         skb->pkt_type = PACKET_HOST;
1596                 }
1597         }
1598
1599         fcllc = (struct fcllc *)skb->data;
1600
1601 #ifdef QLOGIC_NAA_WORKAROUND
1602 {
1603         u16 source_naa = fch->stype, found = 0;
1604
1605         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1606            value. */
1607
1608         if ((source_naa & 0xF000) == 0)
1609                 source_naa = swab16(source_naa);
1610
1611         if (fcllc->ethertype == htons(ETH_P_ARP))
1612             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1613                       "%04x.\n", source_naa));
1614
1615         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1616            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1617                 struct NAA_Hosed *nh, *prevnh;
1618                 int i;
1619
1620                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1621                           "system with non-RFC 2625 NAA value (%04x).\n",
1622                           source_naa));
1623
1624                 write_lock_irq(&bad_naa_lock);
1625                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1626                      prevnh=nh, nh=nh->next) {
1627                         if ((nh->ieee[0] == fch->saddr[0]) &&
1628                             (nh->ieee[1] == fch->saddr[1]) &&
1629                             (nh->ieee[2] == fch->saddr[2]) &&
1630                             (nh->ieee[3] == fch->saddr[3]) &&
1631                             (nh->ieee[4] == fch->saddr[4]) &&
1632                             (nh->ieee[5] == fch->saddr[5])) {
1633                                 found = 1;
1634                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1635                                          "q/Rep w/ bad NAA from system already"
1636                                          " in DB.\n"));
1637                                 break;
1638                         }
1639                 }
1640
1641                 if ((!found) && (nh == NULL)) {
1642
1643                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1644                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1645                                  " bad NAA from system not yet in DB.\n"));
1646
1647                         if (nh != NULL) {
1648                                 nh->next = NULL;
1649                                 if (!mpt_bad_naa)
1650                                         mpt_bad_naa = nh;
1651                                 if (prevnh)
1652                                         prevnh->next = nh;
1653
1654                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1655                                 for (i = 0; i < FC_ALEN; i++)
1656                                         nh->ieee[i] = fch->saddr[i];
1657                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1658                                           "%02x:%02x with non-compliant S_NAA value.\n",
1659                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1660                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1661                         } else {
1662                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1663                                         " kmalloc a NAA_Hosed struct.\n");
1664                         }
1665                 } else if (!found) {
1666                         printk (KERN_ERR "mptlan/type_trans: found not"
1667                                 " set, but nh isn't null. Evil "
1668                                 "funkiness abounds.\n");
1669                 }
1670                 write_unlock_irq(&bad_naa_lock);
1671         }
1672 }
1673 #endif
1674
1675         /* Strip the SNAP header from ARP packets since we don't
1676          * pass them through to the 802.2/SNAP layers.
1677          */
1678         if (fcllc->dsap == EXTENDED_SAP &&
1679                 (fcllc->ethertype == htons(ETH_P_IP) ||
1680                  fcllc->ethertype == htons(ETH_P_ARP))) {
1681                 skb_pull(skb, sizeof(struct fcllc));
1682                 return fcllc->ethertype;
1683         }
1684
1685         return htons(ETH_P_802_2);
1686 }
1687
1688 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/