2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
12 * Special thanks goes to the I2O LAN driver people at the
13 * University of Helsinki, who, unbeknownst to them, provided
14 * the inspiration and initial structure for this driver.
16 * A huge debt of gratitude is owed to David S. Miller (DaveM)
17 * for fixing much of the stupid and broken stuff in the early
18 * driver while porting to sparc64 platform. THANK YOU!
20 * A really huge debt of gratitude is owed to Eddie C. Dost
21 * for gobs of hard work fixing and optimizing LAN code.
24 * (see also mptbase.c)
26 * Copyright (c) 2000-2003 LSI Logic Corporation
27 * Originally By: Noah Romer
28 * (mailto:mpt_linux_developer@lsil.com)
30 * $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
34 This program is free software; you can redistribute it and/or modify
35 it under the terms of the GNU General Public License as published by
36 the Free Software Foundation; version 2 of the License.
38 This program is distributed in the hope that it will be useful,
39 but WITHOUT ANY WARRANTY; without even the implied warranty of
40 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 GNU General Public License for more details.
44 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48 solely responsible for determining the appropriateness of using and
49 distributing the Program and assumes all risks associated with its
50 exercise of rights under this Agreement, including but not limited to
51 the risks and costs of program errors, damage to or loss of data,
52 programs or equipment, and unavailability or interruption of operations.
54 DISCLAIMER OF LIABILITY
55 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
63 You should have received a copy of the GNU General Public License
64 along with this program; if not, write to the Free Software
65 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 * Define statements used for debugging
72 //#define MPT_LAN_IO_DEBUG
74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
77 #include <linux/init.h>
78 #include <linux/module.h>
81 #define MYNAM "mptlan"
83 MODULE_LICENSE("GPL");
85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
87 * MPT LAN message sizes without variable part.
89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
92 #define MPT_LAN_TRANSACTION32_SIZE \
93 (sizeof(SGETransaction32_t) - sizeof(u32))
96 * Fusion MPT LAN private structures
102 struct NAA_Hosed *next;
105 struct BufferControl {
111 struct mpt_lan_priv {
112 MPT_ADAPTER *mpt_dev;
113 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
115 atomic_t buckets_out; /* number of unused buckets on IOC */
116 int bucketthresh; /* Send more when this many left */
118 int *mpt_txfidx; /* Free Tx Context list */
120 spinlock_t txfidx_lock;
122 int *mpt_rxfidx; /* Free Rx Context list */
124 spinlock_t rxfidx_lock;
126 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
127 struct BufferControl *SendCtl; /* Send BufferControl structs */
129 int max_buckets_out; /* Max buckets to send to IOC */
130 int tx_max_out; /* IOC's Tx queue len */
134 struct net_device_stats stats; /* Per device statistics */
136 struct mpt_work_struct post_buckets_task;
137 unsigned long post_buckets_active;
140 struct mpt_lan_ohdr {
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
152 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153 MPT_FRAME_HDR *reply);
154 static int mpt_lan_open(struct net_device *dev);
155 static int mpt_lan_reset(struct net_device *dev);
156 static int mpt_lan_close(struct net_device *dev);
157 static void mpt_lan_post_receive_buckets(void *dev_id);
158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
160 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161 static int mpt_lan_receive_post_reply(struct net_device *dev,
162 LANReceivePostReply_t *pRecvRep);
163 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164 static int mpt_lan_send_reply(struct net_device *dev,
165 LANSendReply_t *pSendRep);
166 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169 struct net_device *dev);
171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
173 * Fusion MPT LAN private data
175 static int LanCtx = -1;
177 static u32 max_buckets_out = 127;
178 static u32 tx_max_out_p = 127 - 16;
180 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
182 #ifdef QLOGIC_NAA_WORKAROUND
183 static struct NAA_Hosed *mpt_bad_naa = NULL;
184 rwlock_t bad_naa_lock;
187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
189 * Fusion MPT LAN external data
191 extern int mpt_lan_index;
193 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
195 * lan_reply - Handle all data sent from the hardware.
196 * @ioc: Pointer to MPT_ADAPTER structure
197 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
198 * @reply: Pointer to MPT reply frame
200 * Returns 1 indicating original alloc'd request frame ptr
201 * should be freed, or 0 if it shouldn't.
204 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
206 struct net_device *dev = mpt_landev[ioc->id];
207 int FreeReqFrame = 0;
209 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
210 IOC_AND_NETDEV_NAMES_s_s(dev)));
212 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
216 u32 tmsg = CAST_PTR_TO_U32(reply);
218 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
219 IOC_AND_NETDEV_NAMES_s_s(dev),
222 switch (GET_LAN_FORM(tmsg)) {
224 // NOTE! (Optimization) First case here is now caught in
225 // mptbase.c::mpt_interrupt() routine and callcack here
226 // is now skipped for this case! 20001218 -sralston
228 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
229 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
230 // "MessageContext turbo reply received\n"));
235 case LAN_REPLY_FORM_SEND_SINGLE:
236 // dioprintk((MYNAM "/lan_reply: "
237 // "calling mpt_lan_send_reply (turbo)\n"));
239 // Potential BUG here? -sralston
240 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
241 // If/when mpt_lan_send_turbo would return 1 here,
242 // calling routine (mptbase.c|mpt_interrupt)
243 // would Oops because mf has already been set
244 // to NULL. So after return from this func,
245 // mpt_interrupt() will attempt to put (NULL) mf ptr
246 // item back onto its adapter FreeQ - Oops!:-(
247 // It's Ok, since mpt_lan_send_turbo() *currently*
248 // always returns 0, but..., just in case:
250 (void) mpt_lan_send_turbo(dev, tmsg);
255 case LAN_REPLY_FORM_RECEIVE_SINGLE:
256 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
257 // "rcv-Turbo = %08x\n", tmsg));
258 mpt_lan_receive_post_turbo(dev, tmsg);
262 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
263 "that I don't know what to do with\n");
265 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
273 // msg = (u32 *) reply;
274 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
275 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
276 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
277 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
278 // reply->u.hdr.Function));
280 switch (reply->u.hdr.Function) {
282 case MPI_FUNCTION_LAN_SEND:
284 LANSendReply_t *pSendRep;
286 pSendRep = (LANSendReply_t *) reply;
287 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
291 case MPI_FUNCTION_LAN_RECEIVE:
293 LANReceivePostReply_t *pRecvRep;
295 pRecvRep = (LANReceivePostReply_t *) reply;
296 if (pRecvRep->NumberOfContexts) {
297 mpt_lan_receive_post_reply(dev, pRecvRep);
298 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
301 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
302 "ReceivePostReply received.\n"));
306 case MPI_FUNCTION_LAN_RESET:
307 /* Just a default reply. Might want to check it to
308 * make sure that everything went ok.
313 case MPI_FUNCTION_EVENT_NOTIFICATION:
314 case MPI_FUNCTION_EVENT_ACK:
315 /* UPDATE! 20010120 -sralston
316 * _EVENT_NOTIFICATION should NOT come down this path any more.
317 * Should be routed to mpt_lan_event_process(), but just in case...
323 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
324 "reply that I don't know what to do with\n");
326 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
335 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
337 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
339 struct net_device *dev = mpt_landev[ioc->id];
340 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
342 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
343 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"));
345 if (priv->mpt_rxfidx == NULL)
348 if (reset_phase == MPT_IOC_PRE_RESET) {
352 netif_stop_queue(dev);
354 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
356 atomic_set(&priv->buckets_out, 0);
358 /* Reset Rx Free Tail index and re-populate the queue. */
359 spin_lock_irqsave(&priv->rxfidx_lock, flags);
360 priv->mpt_rxfidx_tail = -1;
361 for (i = 0; i < priv->max_buckets_out; i++)
362 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
363 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
365 mpt_lan_post_receive_buckets(dev);
366 netif_wake_queue(dev);
372 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
374 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
376 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
378 switch (le32_to_cpu(pEvReply->Event)) {
379 case MPI_EVENT_NONE: /* 00 */
380 case MPI_EVENT_LOG_DATA: /* 01 */
381 case MPI_EVENT_STATE_CHANGE: /* 02 */
382 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
383 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
384 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
385 case MPI_EVENT_RESCAN: /* 06 */
386 /* Ok, do we need to do anything here? As far as
387 I can tell, this is when a new device gets added
389 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
390 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
391 case MPI_EVENT_LOGOUT: /* 09 */
392 case MPI_EVENT_EVENT_CHANGE: /* 0A */
398 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
399 * Do NOT do it here now!
405 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
407 mpt_lan_open(struct net_device *dev)
409 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
412 if (mpt_lan_reset(dev) != 0) {
413 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
415 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
418 printk ("The ioc is active. Perhaps it needs to be"
421 printk ("The ioc in inactive, most likely in the "
422 "process of being reset. Please try again in "
426 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
427 if (priv->mpt_txfidx == NULL)
429 priv->mpt_txfidx_tail = -1;
431 priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
433 if (priv->SendCtl == NULL)
435 for (i = 0; i < priv->tx_max_out; i++) {
436 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
437 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
440 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
442 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
444 if (priv->mpt_rxfidx == NULL)
446 priv->mpt_rxfidx_tail = -1;
448 priv->RcvCtl = kmalloc(priv->max_buckets_out *
449 sizeof(struct BufferControl),
451 if (priv->RcvCtl == NULL)
453 for (i = 0; i < priv->max_buckets_out; i++) {
454 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
455 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
458 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
459 /**/ for (i = 0; i < priv->tx_max_out; i++)
460 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
461 /**/ dlprintk(("\n"));
463 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
465 mpt_lan_post_receive_buckets(dev);
466 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
467 IOC_AND_NETDEV_NAMES_s_s(dev));
469 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
470 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
471 " Notifications. This is a bad thing! We're not going "
472 "to go ahead, but I'd be leery of system stability at "
476 netif_start_queue(dev);
477 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
481 kfree(priv->mpt_rxfidx);
482 priv->mpt_rxfidx = NULL;
484 kfree(priv->SendCtl);
485 priv->SendCtl = NULL;
487 kfree(priv->mpt_txfidx);
488 priv->mpt_txfidx = NULL;
492 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
493 /* Send a LanReset message to the FW. This should result in the FW returning
494 any buckets it still has. */
496 mpt_lan_reset(struct net_device *dev)
499 LANResetRequest_t *pResetReq;
500 struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
502 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id);
505 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
506 "Unable to allocate a request frame.\n"));
511 pResetReq = (LANResetRequest_t *) mf;
513 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
514 pResetReq->ChainOffset = 0;
515 pResetReq->Reserved = 0;
516 pResetReq->PortNumber = priv->pnum;
517 pResetReq->MsgFlags = 0;
518 pResetReq->Reserved2 = 0;
520 mpt_put_msg_frame(LanCtx, priv->mpt_dev->id, mf);
525 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
527 mpt_lan_close(struct net_device *dev)
529 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
530 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
531 unsigned int timeout;
534 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
536 mpt_event_deregister(LanCtx);
538 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
539 "since driver was loaded, %d still out\n",
540 priv->total_posted,atomic_read(&priv->buckets_out)));
542 netif_stop_queue(dev);
547 while (atomic_read(&priv->buckets_out) && --timeout) {
548 set_current_state(TASK_INTERRUPTIBLE);
552 for (i = 0; i < priv->max_buckets_out; i++) {
553 if (priv->RcvCtl[i].skb != NULL) {
554 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
555 /**/ "is still out\n", i));
556 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
559 dev_kfree_skb(priv->RcvCtl[i].skb);
563 kfree (priv->RcvCtl);
564 kfree (priv->mpt_rxfidx);
566 for (i = 0; i < priv->tx_max_out; i++) {
567 if (priv->SendCtl[i].skb != NULL) {
568 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
569 priv->SendCtl[i].len,
571 dev_kfree_skb(priv->SendCtl[i].skb);
575 kfree(priv->SendCtl);
576 kfree(priv->mpt_txfidx);
578 atomic_set(&priv->buckets_out, 0);
580 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
581 IOC_AND_NETDEV_NAMES_s_s(dev));
586 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
587 static struct net_device_stats *
588 mpt_lan_get_stats(struct net_device *dev)
590 struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
592 return (struct net_device_stats *) &priv->stats;
595 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
597 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
599 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
605 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
606 /* Tx timeout handler. */
608 mpt_lan_tx_timeout(struct net_device *dev)
610 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
611 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
613 if (mpt_dev->active) {
614 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
615 netif_wake_queue(dev);
619 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
622 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
624 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
625 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
626 struct sk_buff *sent;
630 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
631 sent = priv->SendCtl[ctx].skb;
633 priv->stats.tx_packets++;
634 priv->stats.tx_bytes += sent->len;
636 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
637 IOC_AND_NETDEV_NAMES_s_s(dev),
638 __FUNCTION__, sent));
640 priv->SendCtl[ctx].skb = NULL;
641 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
642 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
643 dev_kfree_skb_irq(sent);
645 spin_lock_irqsave(&priv->txfidx_lock, flags);
646 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
647 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
649 netif_wake_queue(dev);
653 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
655 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
657 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
658 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
659 struct sk_buff *sent;
661 int FreeReqFrame = 0;
666 count = pSendRep->NumberOfContexts;
668 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
669 le16_to_cpu(pSendRep->IOCStatus)));
671 /* Add check for Loginfo Flag in IOCStatus */
673 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
674 case MPI_IOCSTATUS_SUCCESS:
675 priv->stats.tx_packets += count;
678 case MPI_IOCSTATUS_LAN_CANCELED:
679 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
682 case MPI_IOCSTATUS_INVALID_SGL:
683 priv->stats.tx_errors += count;
684 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
685 IOC_AND_NETDEV_NAMES_s_s(dev));
689 priv->stats.tx_errors += count;
693 pContext = &pSendRep->BufferContext;
695 spin_lock_irqsave(&priv->txfidx_lock, flags);
697 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
699 sent = priv->SendCtl[ctx].skb;
700 priv->stats.tx_bytes += sent->len;
702 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
703 IOC_AND_NETDEV_NAMES_s_s(dev),
704 __FUNCTION__, sent));
706 priv->SendCtl[ctx].skb = NULL;
707 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
708 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
709 dev_kfree_skb_irq(sent);
711 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
716 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
719 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
722 netif_wake_queue(dev);
726 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
728 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
730 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
731 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
733 LANSendRequest_t *pSendReq;
734 SGETransaction32_t *pTrans;
735 SGESimple64_t *pSimple;
739 u16 cur_naa = 0x1000;
741 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
744 spin_lock_irqsave(&priv->txfidx_lock, flags);
745 if (priv->mpt_txfidx_tail < 0) {
746 netif_stop_queue(dev);
747 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
749 printk (KERN_ERR "%s: no tx context available: %u\n",
750 __FUNCTION__, priv->mpt_txfidx_tail);
754 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
756 netif_stop_queue(dev);
757 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
759 printk (KERN_ERR "%s: Unable to alloc request frame\n",
764 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
765 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
767 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
768 // IOC_AND_NETDEV_NAMES_s_s(dev)));
770 pSendReq = (LANSendRequest_t *) mf;
772 /* Set the mac.raw pointer, since this apparently isn't getting
773 * done before we get the skb. Pull the data pointer past the mac data.
775 skb->mac.raw = skb->data;
778 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
781 priv->SendCtl[ctx].skb = skb;
782 priv->SendCtl[ctx].dma = dma;
783 priv->SendCtl[ctx].len = skb->len;
786 pSendReq->Reserved = 0;
787 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
788 pSendReq->ChainOffset = 0;
789 pSendReq->Reserved2 = 0;
790 pSendReq->MsgFlags = 0;
791 pSendReq->PortNumber = priv->pnum;
793 /* Transaction Context Element */
794 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
796 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
797 pTrans->ContextSize = sizeof(u32);
798 pTrans->DetailsLength = 2 * sizeof(u32);
800 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
802 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
803 // IOC_AND_NETDEV_NAMES_s_s(dev),
804 // ctx, skb, skb->data));
806 #ifdef QLOGIC_NAA_WORKAROUND
808 struct NAA_Hosed *nh;
810 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
811 RFC 2625. The longer I look at this, the more my opinion of Qlogic
813 read_lock_irq(&bad_naa_lock);
814 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
815 if ((nh->ieee[0] == skb->mac.raw[0]) &&
816 (nh->ieee[1] == skb->mac.raw[1]) &&
817 (nh->ieee[2] == skb->mac.raw[2]) &&
818 (nh->ieee[3] == skb->mac.raw[3]) &&
819 (nh->ieee[4] == skb->mac.raw[4]) &&
820 (nh->ieee[5] == skb->mac.raw[5])) {
822 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
823 "= %04x.\n", cur_naa));
827 read_unlock_irq(&bad_naa_lock);
831 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
832 (skb->mac.raw[0] << 8) |
833 (skb->mac.raw[1] << 0));
834 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
835 (skb->mac.raw[3] << 16) |
836 (skb->mac.raw[4] << 8) |
837 (skb->mac.raw[5] << 0));
839 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
841 /* If we ever decide to send more than one Simple SGE per LANSend, then
842 we will need to make sure that LAST_ELEMENT only gets set on the
843 last one. Otherwise, bad voodoo and evil funkiness will commence. */
844 pSimple->FlagsLength = cpu_to_le32(
845 ((MPI_SGE_FLAGS_LAST_ELEMENT |
846 MPI_SGE_FLAGS_END_OF_BUFFER |
847 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
848 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
849 MPI_SGE_FLAGS_HOST_TO_IOC |
850 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
851 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
853 pSimple->Address.Low = cpu_to_le32((u32) dma);
854 if (sizeof(dma_addr_t) > sizeof(u32))
855 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
857 pSimple->Address.High = 0;
859 mpt_put_msg_frame (LanCtx, mpt_dev->id, mf);
860 dev->trans_start = jiffies;
862 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
863 IOC_AND_NETDEV_NAMES_s_s(dev),
864 le32_to_cpu(pSimple->FlagsLength)));
869 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
871 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
873 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
876 struct mpt_lan_priv *priv = dev->priv;
878 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
881 schedule_work(&priv->post_buckets_task);
883 queue_task(&priv->post_buckets_task, &tq_immediate);
884 mark_bh(IMMEDIATE_BH);
887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
888 schedule_delayed_work(&priv->post_buckets_task, 1);
890 queue_task(&priv->post_buckets_task, &tq_timer);
892 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
895 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
896 IOC_AND_NETDEV_NAMES_s_s(dev) ));
900 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
902 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
904 struct mpt_lan_priv *priv = dev->priv;
906 skb->protocol = mpt_lan_type_trans(skb, dev);
908 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
909 "delivered to upper level.\n",
910 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
912 priv->stats.rx_bytes += skb->len;
913 priv->stats.rx_packets++;
918 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
919 atomic_read(&priv->buckets_out)));
921 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
922 mpt_lan_wake_post_buckets_task(dev, 1);
924 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
925 "remaining, %d received back since sod\n",
926 atomic_read(&priv->buckets_out), priv->total_received));
931 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
934 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
936 struct mpt_lan_priv *priv = dev->priv;
937 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
938 struct sk_buff *skb, *old_skb;
942 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
943 skb = priv->RcvCtl[ctx].skb;
945 len = GET_LAN_PACKET_LENGTH(tmsg);
947 if (len < MPT_LAN_RX_COPYBREAK) {
950 skb = (struct sk_buff *)dev_alloc_skb(len);
952 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
953 IOC_AND_NETDEV_NAMES_s_s(dev),
958 pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
959 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
961 memcpy(skb_put(skb, len), old_skb->data, len);
968 priv->RcvCtl[ctx].skb = NULL;
970 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
971 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
974 spin_lock_irqsave(&priv->rxfidx_lock, flags);
975 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
976 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
978 atomic_dec(&priv->buckets_out);
979 priv->total_received++;
981 return mpt_lan_receive_skb(dev, skb);
984 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
986 mpt_lan_receive_post_free(struct net_device *dev,
987 LANReceivePostReply_t *pRecvRep)
989 struct mpt_lan_priv *priv = dev->priv;
990 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
997 count = pRecvRep->NumberOfContexts;
999 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
1000 "IOC returned %d buckets, freeing them...\n", count));
1002 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1003 for (i = 0; i < count; i++) {
1004 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1006 skb = priv->RcvCtl[ctx].skb;
1008 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1009 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1010 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1011 // priv, &(priv->buckets_out)));
1012 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1014 priv->RcvCtl[ctx].skb = NULL;
1015 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1016 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1017 dev_kfree_skb_any(skb);
1019 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1021 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1023 atomic_sub(count, &priv->buckets_out);
1025 // for (i = 0; i < priv->max_buckets_out; i++)
1026 // if (priv->RcvCtl[i].skb != NULL)
1027 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1028 // "is still out\n", i));
1030 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1033 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1034 /**/ "remaining, %d received back since sod.\n",
1035 /**/ atomic_read(&priv->buckets_out), priv->total_received));
1039 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1041 mpt_lan_receive_post_reply(struct net_device *dev,
1042 LANReceivePostReply_t *pRecvRep)
1044 struct mpt_lan_priv *priv = dev->priv;
1045 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1046 struct sk_buff *skb, *old_skb;
1047 unsigned long flags;
1048 u32 len, ctx, offset;
1049 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1053 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1054 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1055 le16_to_cpu(pRecvRep->IOCStatus)));
1057 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1058 MPI_IOCSTATUS_LAN_CANCELED)
1059 return mpt_lan_receive_post_free(dev, pRecvRep);
1061 len = le32_to_cpu(pRecvRep->PacketLength);
1063 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1064 "ReceivePostReply w/ PacketLength zero!\n",
1065 IOC_AND_NETDEV_NAMES_s_s(dev));
1066 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1067 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1071 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1072 count = pRecvRep->NumberOfContexts;
1073 skb = priv->RcvCtl[ctx].skb;
1075 offset = le32_to_cpu(pRecvRep->PacketOffset);
1076 // if (offset != 0) {
1077 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1078 // "w/ PacketOffset %u\n",
1079 // IOC_AND_NETDEV_NAMES_s_s(dev),
1083 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1084 IOC_AND_NETDEV_NAMES_s_s(dev),
1090 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1091 // "for single packet, concatenating...\n",
1092 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1094 skb = (struct sk_buff *)dev_alloc_skb(len);
1096 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1097 IOC_AND_NETDEV_NAMES_s_s(dev),
1098 __FILE__, __LINE__);
1102 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1103 for (i = 0; i < count; i++) {
1105 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1106 old_skb = priv->RcvCtl[ctx].skb;
1108 l = priv->RcvCtl[ctx].len;
1112 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1113 // IOC_AND_NETDEV_NAMES_s_s(dev),
1116 pci_dma_sync_single(mpt_dev->pcidev,
1117 priv->RcvCtl[ctx].dma,
1118 priv->RcvCtl[ctx].len,
1119 PCI_DMA_FROMDEVICE);
1120 memcpy(skb_put(skb, l), old_skb->data, l);
1122 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1125 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1127 } else if (len < MPT_LAN_RX_COPYBREAK) {
1131 skb = (struct sk_buff *)dev_alloc_skb(len);
1133 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1134 IOC_AND_NETDEV_NAMES_s_s(dev),
1135 __FILE__, __LINE__);
1139 pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1140 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1142 memcpy(skb_put(skb, len), old_skb->data, len);
1144 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1145 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1146 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1149 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1151 priv->RcvCtl[ctx].skb = NULL;
1153 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1154 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1155 priv->RcvCtl[ctx].dma = 0;
1157 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1158 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1163 atomic_sub(count, &priv->buckets_out);
1164 priv->total_received += count;
1166 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1167 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1168 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1169 IOC_AND_NETDEV_NAMES_s_s(dev),
1170 priv->mpt_rxfidx_tail,
1171 MPT_LAN_MAX_BUCKETS_OUT);
1173 panic("Damn it Jim! I'm a doctor, not a programmer! "
1174 "Oh, wait a sec, I am a programmer. "
1175 "And, who's Jim?!?!\n"
1176 "Arrgghh! We've done it again!\n");
1180 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1181 "(priv->buckets_out = %d)\n",
1182 IOC_AND_NETDEV_NAMES_s_s(dev),
1183 atomic_read(&priv->buckets_out));
1184 else if (remaining < 10)
1185 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1186 "(priv->buckets_out = %d)\n",
1187 IOC_AND_NETDEV_NAMES_s_s(dev),
1188 remaining, atomic_read(&priv->buckets_out));
1190 if ((remaining < priv->bucketthresh) &&
1191 ((atomic_read(&priv->buckets_out) - remaining) >
1192 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1194 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1195 "buckets_out count and fw's BucketsRemaining "
1196 "count has crossed the threshold, issuing a "
1197 "LanReset to clear the fw's hashtable. You may "
1198 "want to check your /var/log/messages for \"CRC "
1199 "error\" event notifications.\n");
1202 mpt_lan_wake_post_buckets_task(dev, 0);
1205 return mpt_lan_receive_skb(dev, skb);
1208 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1209 /* Simple SGE's only at the moment */
1212 mpt_lan_post_receive_buckets(void *dev_id)
1214 struct net_device *dev = dev_id;
1215 struct mpt_lan_priv *priv = dev->priv;
1216 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1218 LANReceivePostRequest_t *pRecvReq;
1219 SGETransaction32_t *pTrans;
1220 SGESimple64_t *pSimple;
1221 struct sk_buff *skb;
1223 u32 curr, buckets, count, max;
1224 u32 len = (dev->mtu + dev->hard_header_len + 4);
1225 unsigned long flags;
1228 curr = atomic_read(&priv->buckets_out);
1229 buckets = (priv->max_buckets_out - curr);
1231 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1232 IOC_AND_NETDEV_NAMES_s_s(dev),
1233 __FUNCTION__, buckets, curr));
1235 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1236 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1239 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
1241 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1243 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1244 __FUNCTION__, buckets));
1247 pRecvReq = (LANReceivePostRequest_t *) mf;
1253 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1254 pRecvReq->ChainOffset = 0;
1255 pRecvReq->MsgFlags = 0;
1256 pRecvReq->PortNumber = priv->pnum;
1258 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1261 for (i = 0; i < count; i++) {
1264 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1265 if (priv->mpt_rxfidx_tail < 0) {
1266 printk (KERN_ERR "%s: Can't alloc context\n",
1268 spin_unlock_irqrestore(&priv->rxfidx_lock,
1273 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1275 skb = priv->RcvCtl[ctx].skb;
1276 if (skb && (priv->RcvCtl[ctx].len != len)) {
1277 pci_unmap_single(mpt_dev->pcidev,
1278 priv->RcvCtl[ctx].dma,
1279 priv->RcvCtl[ctx].len,
1280 PCI_DMA_FROMDEVICE);
1281 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1282 skb = priv->RcvCtl[ctx].skb = NULL;
1286 skb = dev_alloc_skb(len);
1288 printk (KERN_WARNING
1289 MYNAM "/%s: Can't alloc skb\n",
1291 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1292 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1296 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1297 len, PCI_DMA_FROMDEVICE);
1299 priv->RcvCtl[ctx].skb = skb;
1300 priv->RcvCtl[ctx].dma = dma;
1301 priv->RcvCtl[ctx].len = len;
1304 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1306 pTrans->ContextSize = sizeof(u32);
1307 pTrans->DetailsLength = 0;
1309 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1311 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1313 pSimple->FlagsLength = cpu_to_le32(
1314 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1315 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1316 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1317 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1318 if (sizeof(dma_addr_t) > sizeof(u32))
1319 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1321 pSimple->Address.High = 0;
1323 pTrans = (SGETransaction32_t *) (pSimple + 1);
1326 if (pSimple == NULL) {
1327 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1329 mpt_free_msg_frame(LanCtx, mpt_dev->id, mf);
1333 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1335 pRecvReq->BucketCount = cpu_to_le32(i);
1337 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1338 * for (i = 0; i < j + 2; i ++)
1339 * printk (" %08x", le32_to_cpu(msg[i]));
1343 mpt_put_msg_frame(LanCtx, mpt_dev->id, mf);
1345 priv->total_posted += i;
1347 atomic_add(i, &priv->buckets_out);
1351 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1352 __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1353 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1354 __FUNCTION__, priv->total_posted, priv->total_received));
1356 clear_bit(0, &priv->post_buckets_active);
1359 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1360 static struct net_device *
1361 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1363 struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1364 struct mpt_lan_priv *priv = NULL;
1365 u8 HWaddr[FC_ALEN], *a;
1370 dev->mtu = MPT_LAN_MTU;
1372 priv = (struct mpt_lan_priv *) dev->priv;
1374 priv->mpt_dev = mpt_dev;
1377 memset(&priv->post_buckets_task, 0, sizeof(struct mpt_work_struct));
1378 MPT_INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1379 priv->post_buckets_active = 0;
1381 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1382 __LINE__, dev->mtu + dev->hard_header_len + 4));
1384 atomic_set(&priv->buckets_out, 0);
1385 priv->total_posted = 0;
1386 priv->total_received = 0;
1387 priv->max_buckets_out = max_buckets_out;
1388 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1389 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1391 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1393 mpt_dev->pfacts[0].MaxLanBuckets,
1395 priv->max_buckets_out));
1397 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1398 priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1399 priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1401 memset(&priv->stats, 0, sizeof(priv->stats));
1403 /* Grab pre-fetched LANPage1 stuff. :-) */
1404 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1413 dev->addr_len = FC_ALEN;
1414 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1415 memset(dev->broadcast, 0xff, FC_ALEN);
1417 /* The Tx queue is 127 deep on the 909.
1418 * Give ourselves some breathing room.
1420 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1421 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1423 dev->open = mpt_lan_open;
1424 dev->stop = mpt_lan_close;
1425 dev->get_stats = mpt_lan_get_stats;
1426 dev->set_multicast_list = NULL;
1427 dev->change_mtu = mpt_lan_change_mtu;
1428 dev->hard_start_xmit = mpt_lan_sdu_send;
1430 /* Not in 2.3.42. Need 2.3.45+ */
1431 dev->tx_timeout = mpt_lan_tx_timeout;
1432 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1434 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1435 "and setting initial values\n"));
1437 SET_MODULE_OWNER(dev);
1439 if (register_netdev(dev) != 0) {
1446 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1447 static int __init mpt_lan_init (void)
1449 struct net_device *dev;
1453 show_mptmod_ver(LANAME, LANVER);
1455 #ifdef QLOGIC_NAA_WORKAROUND
1456 /* Init the global r/w lock for the bad_naa list. We want to do this
1457 before any boards are initialized and may be used. */
1458 rwlock_init(&bad_naa_lock);
1461 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1462 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1466 /* Set the callback index to be used by driver core for turbo replies */
1467 mpt_lan_index = LanCtx;
1469 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1471 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1472 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1474 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1475 "handler with mptbase! The world is at an end! "
1476 "Everything is fading to black! Goodbye.\n");
1480 for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1481 mpt_landev[j] = NULL;
1484 for (p = mpt_adapter_find_first(); p; p = mpt_adapter_find_next(p)) {
1485 for (i = 0; i < p->facts.NumberOfPorts; i++) {
1486 printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1488 p->pfacts[i].PortNumber,
1489 p->pfacts[i].ProtocolFlags,
1490 MPT_PROTOCOL_FLAGS_c_c_c_c(p->pfacts[i].ProtocolFlags));
1492 if (!(p->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
1493 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1498 dev = mpt_register_lan_device (p, i);
1500 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1502 p->pfacts[i].PortNumber);
1504 printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1505 p->name, dev->name);
1506 printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1507 IOC_AND_NETDEV_NAMES_s_s(dev),
1508 dev->dev_addr[0], dev->dev_addr[1],
1509 dev->dev_addr[2], dev->dev_addr[3],
1510 dev->dev_addr[4], dev->dev_addr[5]);
1511 // printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1512 // IOC_AND_NETDEV_NAMES_s_s(dev),
1513 // NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1515 mpt_landev[j] = dev;
1516 dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1517 dev, j, mpt_landev[j]));
1525 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1526 static void __exit mpt_lan_exit(void)
1530 mpt_reset_deregister(LanCtx);
1532 for (i = 0; mpt_landev[i] != NULL; i++) {
1533 struct net_device *dev = mpt_landev[i];
1535 printk (KERN_INFO ": %s/%s: Fusion MPT LAN device unregistered\n",
1536 IOC_AND_NETDEV_NAMES_s_s(dev));
1537 unregister_netdev(dev);
1539 mpt_landev[i] = NULL;
1543 mpt_deregister(LanCtx);
1548 /* deregister any send/receive handler structs. I2Oism? */
1551 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1552 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,59)
1553 MODULE_PARM(tx_max_out_p, "i");
1554 MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME!
1557 module_init(mpt_lan_init);
1558 module_exit(mpt_lan_exit);
1560 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1561 static unsigned short
1562 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1564 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1565 struct fcllc *fcllc;
1567 skb->mac.raw = skb->data;
1568 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1570 if (fch->dtype == htons(0xffff)) {
1571 u32 *p = (u32 *) fch;
1578 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1579 NETDEV_PTR_TO_IOC_NAME_s(dev));
1580 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1581 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1582 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1585 if (*fch->daddr & 1) {
1586 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1587 skb->pkt_type = PACKET_BROADCAST;
1589 skb->pkt_type = PACKET_MULTICAST;
1592 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1593 skb->pkt_type = PACKET_OTHERHOST;
1595 skb->pkt_type = PACKET_HOST;
1599 fcllc = (struct fcllc *)skb->data;
1601 #ifdef QLOGIC_NAA_WORKAROUND
1603 u16 source_naa = fch->stype, found = 0;
1605 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1608 if ((source_naa & 0xF000) == 0)
1609 source_naa = swab16(source_naa);
1611 if (fcllc->ethertype == htons(ETH_P_ARP))
1612 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1613 "%04x.\n", source_naa));
1615 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1616 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1617 struct NAA_Hosed *nh, *prevnh;
1620 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1621 "system with non-RFC 2625 NAA value (%04x).\n",
1624 write_lock_irq(&bad_naa_lock);
1625 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1626 prevnh=nh, nh=nh->next) {
1627 if ((nh->ieee[0] == fch->saddr[0]) &&
1628 (nh->ieee[1] == fch->saddr[1]) &&
1629 (nh->ieee[2] == fch->saddr[2]) &&
1630 (nh->ieee[3] == fch->saddr[3]) &&
1631 (nh->ieee[4] == fch->saddr[4]) &&
1632 (nh->ieee[5] == fch->saddr[5])) {
1634 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1635 "q/Rep w/ bad NAA from system already"
1641 if ((!found) && (nh == NULL)) {
1643 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1644 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1645 " bad NAA from system not yet in DB.\n"));
1654 nh->NAA = source_naa; /* Set the S_NAA value. */
1655 for (i = 0; i < FC_ALEN; i++)
1656 nh->ieee[i] = fch->saddr[i];
1657 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1658 "%02x:%02x with non-compliant S_NAA value.\n",
1659 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1660 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1662 printk (KERN_ERR "mptlan/type_trans: Unable to"
1663 " kmalloc a NAA_Hosed struct.\n");
1665 } else if (!found) {
1666 printk (KERN_ERR "mptlan/type_trans: found not"
1667 " set, but nh isn't null. Evil "
1668 "funkiness abounds.\n");
1670 write_unlock_irq(&bad_naa_lock);
1675 /* Strip the SNAP header from ARP packets since we don't
1676 * pass them through to the 802.2/SNAP layers.
1678 if (fcllc->dsap == EXTENDED_SAP &&
1679 (fcllc->ethertype == htons(ETH_P_IP) ||
1680 fcllc->ethertype == htons(ETH_P_ARP))) {
1681 skb_pull(skb, sizeof(struct fcllc));
1682 return fcllc->ethertype;
1685 return htons(ETH_P_802_2);
1688 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/