1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #include <linux/if_ether.h>
30 #include <linux/gfp.h>
31 #include <linux/if_vlan.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <scsi/fc/fc_fcoe.h>
36 #include <scsi/libfc.h>
37 #include <scsi/libfcoe.h>
40 * ixgbe_fcoe_clear_ddp - clear the given ddp context
41 * @ddp - ptr to the ixgbe_fcoe_ddp
46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
57 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
58 * @netdev: the corresponding net_device
59 * @xid: the xid that corresponding ddp will be freed
61 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
62 * and it is expected to be called by ULD, i.e., FCP layer of libfc
63 * to release the corresponding ddp context when the I/O is done.
65 * Returns : data length already ddp-ed in bytes
67 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
70 struct ixgbe_fcoe *fcoe;
71 struct ixgbe_adapter *adapter;
72 struct ixgbe_fcoe_ddp *ddp;
78 if (xid >= IXGBE_FCOE_DDP_MAX)
81 adapter = netdev_priv(netdev);
82 fcoe = &adapter->fcoe;
83 ddp = &fcoe->ddp[xid];
88 /* if there an error, force to invalidate ddp context */
90 spin_lock_bh(&fcoe->lock);
91 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
92 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
93 (xid | IXGBE_FCFLTRW_WE));
94 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
95 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
96 (xid | IXGBE_FCDMARW_WE));
98 /* guaranteed to be invalidated after 100us */
99 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
100 (xid | IXGBE_FCDMARW_RE));
101 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
102 spin_unlock_bh(&fcoe->lock);
103 if (fcbuff & IXGBE_FCBUFF_VALID)
107 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
110 pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
114 ixgbe_fcoe_clear_ddp(ddp);
121 * ixgbe_fcoe_ddp_setup - called to set up ddp context
122 * @netdev: the corresponding net_device
123 * @xid: the exchange id requesting ddp
124 * @sgl: the scatter-gather list for this request
125 * @sgc: the number of scatter-gather items
127 * Returns : 1 for success and 0 for no ddp
129 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
130 struct scatterlist *sgl, unsigned int sgc,
133 struct ixgbe_adapter *adapter;
135 struct ixgbe_fcoe *fcoe;
136 struct ixgbe_fcoe_ddp *ddp;
137 struct scatterlist *sg;
138 unsigned int i, j, dmacount;
140 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
141 unsigned int firstoff = 0;
142 unsigned int lastsize;
143 unsigned int thisoff = 0;
144 unsigned int thislen = 0;
145 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
147 struct pci_pool *pool;
153 adapter = netdev_priv(netdev);
154 if (xid >= IXGBE_FCOE_DDP_MAX) {
155 e_warn(drv, "xid=0x%x out-of-range\n", xid);
159 /* no DDP if we are already down or resetting */
160 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
161 test_bit(__IXGBE_RESETTING, &adapter->state))
164 fcoe = &adapter->fcoe;
166 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
170 ddp = &fcoe->ddp[xid];
172 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
173 xid, ddp->sgl, ddp->sgc);
176 ixgbe_fcoe_clear_ddp(ddp);
178 /* setup dma from scsi command sgl */
179 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
181 e_err(drv, "xid 0x%x DMA map error\n", xid);
185 /* alloc the udl from per cpu ddp pool */
187 pool = *per_cpu_ptr(fcoe->pool, cpu);
188 ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
190 e_err(drv, "failed allocated ddp context\n");
191 goto out_noddp_unmap;
198 for_each_sg(sgl, sg, dmacount, i) {
199 addr = sg_dma_address(sg);
200 len = sg_dma_len(sg);
202 /* max number of buffers allowed in one DDP context */
203 if (j >= IXGBE_BUFFCNT_MAX) {
204 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
208 /* get the offset of length of current buffer */
209 thisoff = addr & ((dma_addr_t)bufflen - 1);
210 thislen = min((bufflen - thisoff), len);
212 * all but the 1st buffer (j == 0)
213 * must be aligned on bufflen
215 if ((j != 0) && (thisoff))
218 * all but the last buffer
219 * ((i == (dmacount - 1)) && (thislen == len))
220 * must end at bufflen
222 if (((i != (dmacount - 1)) || (thislen != len))
223 && ((thislen + thisoff) != bufflen))
226 ddp->udl[j] = (u64)(addr - thisoff);
227 /* only the first buffer may have none-zero offset */
235 /* only the last buffer may have non-full bufflen */
236 lastsize = thisoff + thislen;
239 * lastsize can not be buffer len.
240 * If it is then adding another buffer with lastsize = 1.
242 if (lastsize == bufflen) {
243 if (j >= IXGBE_BUFFCNT_MAX) {
244 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
248 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
254 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
255 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
256 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
257 /* Set WRCONTX bit to allow DDP for target */
259 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
260 fcbuff |= (IXGBE_FCBUFF_VALID);
263 fcdmarw |= IXGBE_FCDMARW_WE;
264 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
267 fcfltrw |= IXGBE_FCFLTRW_WE;
269 /* program DMA context */
271 spin_lock_bh(&fcoe->lock);
273 /* turn on last frame indication for target mode as FCP_RSPtarget is
274 * supposed to send FCP_RSP when it is done. */
275 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
276 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
277 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
278 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
279 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
282 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
283 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
284 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
285 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
286 /* program filter context */
287 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
288 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
289 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
291 spin_unlock_bh(&fcoe->lock);
296 pci_pool_free(pool, ddp->udl, ddp->udp);
297 ixgbe_fcoe_clear_ddp(ddp);
300 pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
306 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
307 * @netdev: the corresponding net_device
308 * @xid: the exchange id requesting ddp
309 * @sgl: the scatter-gather list for this request
310 * @sgc: the number of scatter-gather items
312 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
313 * and is expected to be called from ULD, e.g., FCP layer of libfc
314 * to set up ddp for the corresponding xid of the given sglist for
315 * the corresponding I/O.
317 * Returns : 1 for success and 0 for no ddp
319 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
320 struct scatterlist *sgl, unsigned int sgc)
322 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
326 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
327 * @netdev: the corresponding net_device
328 * @xid: the exchange id requesting ddp
329 * @sgl: the scatter-gather list for this request
330 * @sgc: the number of scatter-gather items
332 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
333 * and is expected to be called from ULD, e.g., FCP layer of libfc
334 * to set up ddp for the corresponding xid of the given sglist for
335 * the corresponding I/O. The DDP in target mode is a write I/O request
336 * from the initiator.
338 * Returns : 1 for success and 0 for no ddp
340 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
341 struct scatterlist *sgl, unsigned int sgc)
343 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
347 * ixgbe_fcoe_ddp - check ddp status and mark it done
348 * @adapter: ixgbe adapter
349 * @rx_desc: advanced rx descriptor
350 * @skb: the skb holding the received data
352 * This checks ddp status.
354 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
355 * not passing the skb to ULD, > 0 indicates is the length of data
358 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
359 union ixgbe_adv_rx_desc *rx_desc,
363 struct ixgbe_fcoe *fcoe;
364 struct ixgbe_fcoe_ddp *ddp;
365 struct fc_frame_header *fh;
366 struct fcoe_crc_eof *crc;
367 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
372 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
373 skb->ip_summed = CHECKSUM_NONE;
375 skb->ip_summed = CHECKSUM_UNNECESSARY;
377 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
378 fh = (struct fc_frame_header *)(skb->data +
379 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
381 fh = (struct fc_frame_header *)(skb->data +
382 sizeof(struct fcoe_hdr));
384 fctl = ntoh24(fh->fh_f_ctl);
385 if (fctl & FC_FC_EX_CTX)
386 xid = be16_to_cpu(fh->fh_ox_id);
388 xid = be16_to_cpu(fh->fh_rx_id);
390 if (xid >= IXGBE_FCOE_DDP_MAX)
393 fcoe = &adapter->fcoe;
394 ddp = &fcoe->ddp[xid];
398 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
399 IXGBE_RXDADV_ERR_FCERR);
403 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
404 /* return 0 to bypass going to ULD for DDPed data */
405 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
406 /* update length of DDPed data */
407 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
410 /* unmap the sg list when FCPRSP is received */
411 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
412 pci_unmap_sg(adapter->pdev, ddp->sgl,
413 ddp->sgc, DMA_FROM_DEVICE);
418 /* if DDP length is present pass it through to ULD */
419 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
420 /* update length of DDPed data */
421 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
425 /* no match will return as an error */
426 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
431 /* In target mode, check the last data frame of the sequence.
432 * For DDP in target mode, data is already DDPed but the header
433 * indication of the last data frame ould allow is to tell if we
434 * got all the data and the ULP can send FCP_RSP back, as this is
435 * not a full fcoe frame, we fill the trailer here so it won't be
436 * dropped by the ULP stack.
438 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
439 (fctl & FC_FC_END_SEQ)) {
440 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
441 crc->fcoe_eof = FC_EOF_T;
448 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
449 * @tx_ring: tx desc ring
450 * @first: first tx_buffer structure containing skb, tx_flags, and protocol
451 * @hdr_len: hdr_len to be returned
453 * This sets up large send offload for FCoE
455 * Returns : 0 indicates success, < 0 for error
457 int ixgbe_fso(struct ixgbe_ring *tx_ring,
458 struct ixgbe_tx_buffer *first,
461 struct sk_buff *skb = first->skb;
462 struct fc_frame_header *fh;
464 u32 fcoe_sof_eof = 0;
468 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
469 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
470 skb_shinfo(skb)->gso_type);
474 /* resets the header to point fcoe/fc */
475 skb_set_network_header(skb, skb->mac_len);
476 skb_set_transport_header(skb, skb->mac_len +
477 sizeof(struct fcoe_hdr));
479 /* sets up SOF and ORIS */
480 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
483 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
486 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
487 IXGBE_ADVTXD_FCOEF_ORIS;
492 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
495 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
499 /* the first byte of the last dword is EOF */
500 skb_copy_bits(skb, skb->len - 4, &eof, 1);
501 /* sets up EOF and ORIE */
504 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
509 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
510 IXGBE_ADVTXD_FCOEF_ORIE;
512 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
515 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
518 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
521 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
525 /* sets up PARINC indicating data offset */
526 fh = (struct fc_frame_header *)skb_transport_header(skb);
527 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
528 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
530 /* include trailer in headlen as it is replicated per frame */
531 *hdr_len = sizeof(struct fcoe_crc_eof);
533 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
534 if (skb_is_gso(skb)) {
535 *hdr_len += skb_transport_offset(skb) +
536 sizeof(struct fc_frame_header);
537 /* update gso_segs and bytecount */
538 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
539 skb_shinfo(skb)->gso_size);
540 first->bytecount += (first->gso_segs - 1) * *hdr_len;
541 first->tx_flags |= IXGBE_TX_FLAGS_FSO;
544 /* set flag indicating FCOE to ixgbe_tx_map call */
545 first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
547 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
548 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
549 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
551 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
552 vlan_macip_lens = skb_transport_offset(skb) +
553 sizeof(struct fc_frame_header);
554 vlan_macip_lens |= (skb_transport_offset(skb) - 4)
555 << IXGBE_ADVTXD_MACLEN_SHIFT;
556 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
558 /* write context desc */
559 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
560 IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
565 static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
568 struct pci_pool **pool;
570 for_each_possible_cpu(cpu) {
571 pool = per_cpu_ptr(fcoe->pool, cpu);
573 pci_pool_destroy(*pool);
575 free_percpu(fcoe->pool);
579 static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
581 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
583 struct pci_pool **pool;
586 fcoe->pool = alloc_percpu(struct pci_pool *);
590 /* allocate pci pool for each cpu */
591 for_each_possible_cpu(cpu) {
592 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
593 pool = per_cpu_ptr(fcoe->pool, cpu);
594 *pool = pci_pool_create(pool_name,
595 adapter->pdev, IXGBE_FCPTR_MAX,
596 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
598 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
599 ixgbe_fcoe_ddp_pools_free(fcoe);
606 * ixgbe_configure_fcoe - configures registers for fcoe at start
607 * @adapter: ptr to ixgbe adapter
609 * This sets up FCoE related registers
613 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
615 int i, fcoe_q, fcoe_i;
616 struct ixgbe_hw *hw = &adapter->hw;
617 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
618 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
622 spin_lock_init(&fcoe->lock);
624 ixgbe_fcoe_ddp_pools_alloc(adapter);
626 e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
630 /* Extra buffer to be shared by all DDPs for HW work around */
631 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
632 if (fcoe->extra_ddp_buffer == NULL) {
633 e_err(drv, "failed to allocated extra DDP buffer\n");
637 fcoe->extra_ddp_buffer_dma =
638 dma_map_single(&adapter->pdev->dev,
639 fcoe->extra_ddp_buffer,
642 if (dma_mapping_error(&adapter->pdev->dev,
643 fcoe->extra_ddp_buffer_dma)) {
644 e_err(drv, "failed to map extra DDP buffer\n");
645 goto out_extra_ddp_buffer;
648 /* Alloc per cpu mem to count the ddp alloc failure number */
649 fcoe->pcpu_noddp = alloc_percpu(u64);
650 if (!fcoe->pcpu_noddp) {
651 e_err(drv, "failed to alloc noddp counter\n");
652 goto out_pcpu_noddp_alloc_fail;
655 fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
656 if (!fcoe->pcpu_noddp_ext_buff) {
657 e_err(drv, "failed to alloc noddp extra buff cnt\n");
658 goto out_pcpu_noddp_extra_buff_alloc_fail;
661 for_each_possible_cpu(cpu) {
662 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
663 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
667 /* Enable L2 eth type filter for FCoE */
668 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
669 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
670 /* Enable L2 eth type filter for FIP */
671 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
672 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
673 if (adapter->ring_feature[RING_F_FCOE].indices) {
674 /* Use multiple rx queues for FCoE by redirection table */
675 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
676 fcoe_i = f->mask + i % f->indices;
677 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
678 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
679 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
681 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
682 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
684 /* Use single rx queue for FCoE */
686 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
687 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
688 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
689 IXGBE_ETQS_QUEUE_EN |
690 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
692 /* send FIP frames to the first FCoE queue */
694 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
695 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
696 IXGBE_ETQS_QUEUE_EN |
697 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
699 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
700 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
702 out_pcpu_noddp_extra_buff_alloc_fail:
703 free_percpu(fcoe->pcpu_noddp);
704 out_pcpu_noddp_alloc_fail:
705 dma_unmap_single(&adapter->pdev->dev,
706 fcoe->extra_ddp_buffer_dma,
709 out_extra_ddp_buffer:
710 kfree(fcoe->extra_ddp_buffer);
712 ixgbe_fcoe_ddp_pools_free(fcoe);
716 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
717 * @adapter : ixgbe adapter
719 * Cleans up outstanding ddp context resources
723 void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
726 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
731 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
732 ixgbe_fcoe_ddp_put(adapter->netdev, i);
733 dma_unmap_single(&adapter->pdev->dev,
734 fcoe->extra_ddp_buffer_dma,
737 free_percpu(fcoe->pcpu_noddp);
738 free_percpu(fcoe->pcpu_noddp_ext_buff);
739 kfree(fcoe->extra_ddp_buffer);
740 ixgbe_fcoe_ddp_pools_free(fcoe);
744 * ixgbe_fcoe_enable - turn on FCoE offload feature
745 * @netdev: the corresponding netdev
747 * Turns on FCoE offload feature in 82599.
749 * Returns : 0 indicates success or -EINVAL on failure
751 int ixgbe_fcoe_enable(struct net_device *netdev)
754 struct ixgbe_adapter *adapter = netdev_priv(netdev);
755 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
758 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
761 atomic_inc(&fcoe->refcnt);
762 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
765 e_info(drv, "Enabling FCoE offload features.\n");
766 if (netif_running(netdev))
767 netdev->netdev_ops->ndo_stop(netdev);
769 ixgbe_clear_interrupt_scheme(adapter);
771 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
772 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
773 netdev->features |= NETIF_F_FCOE_CRC;
774 netdev->features |= NETIF_F_FSO;
775 netdev->features |= NETIF_F_FCOE_MTU;
776 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
778 ixgbe_init_interrupt_scheme(adapter);
779 netdev_features_change(netdev);
781 if (netif_running(netdev))
782 netdev->netdev_ops->ndo_open(netdev);
790 * ixgbe_fcoe_disable - turn off FCoE offload feature
791 * @netdev: the corresponding netdev
793 * Turns off FCoE offload feature in 82599.
795 * Returns : 0 indicates success or -EINVAL on failure
797 int ixgbe_fcoe_disable(struct net_device *netdev)
800 struct ixgbe_adapter *adapter = netdev_priv(netdev);
801 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
803 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
806 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
809 if (!atomic_dec_and_test(&fcoe->refcnt))
812 e_info(drv, "Disabling FCoE offload features.\n");
813 netdev->features &= ~NETIF_F_FCOE_CRC;
814 netdev->features &= ~NETIF_F_FSO;
815 netdev->features &= ~NETIF_F_FCOE_MTU;
816 netdev->fcoe_ddp_xid = 0;
817 netdev_features_change(netdev);
819 if (netif_running(netdev))
820 netdev->netdev_ops->ndo_stop(netdev);
822 ixgbe_clear_interrupt_scheme(adapter);
823 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
824 adapter->ring_feature[RING_F_FCOE].indices = 0;
825 ixgbe_cleanup_fcoe(adapter);
826 ixgbe_init_interrupt_scheme(adapter);
828 if (netif_running(netdev))
829 netdev->netdev_ops->ndo_open(netdev);
837 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
838 * @netdev : ixgbe adapter
839 * @wwn : the world wide name
840 * @type: the type of world wide name
842 * Returns the node or port world wide name if both the prefix and the san
843 * mac address are valid, then the wwn is formed based on the NAA-2 for
844 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
846 * Returns : 0 on success
848 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
852 struct ixgbe_adapter *adapter = netdev_priv(netdev);
853 struct ixgbe_mac_info *mac = &adapter->hw.mac;
856 case NETDEV_FCOE_WWNN:
857 prefix = mac->wwnn_prefix;
859 case NETDEV_FCOE_WWPN:
860 prefix = mac->wwpn_prefix;
866 if ((prefix != 0xffff) &&
867 is_valid_ether_addr(mac->san_addr)) {
868 *wwn = ((u64) prefix << 48) |
869 ((u64) mac->san_addr[0] << 40) |
870 ((u64) mac->san_addr[1] << 32) |
871 ((u64) mac->san_addr[2] << 24) |
872 ((u64) mac->san_addr[3] << 16) |
873 ((u64) mac->san_addr[4] << 8) |
874 ((u64) mac->san_addr[5]);
881 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
882 * @netdev : ixgbe adapter
883 * @info : HBA information
885 * Returns ixgbe HBA information
887 * Returns : 0 on success
889 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
890 struct netdev_fcoe_hbainfo *info)
892 struct ixgbe_adapter *adapter = netdev_priv(netdev);
893 struct ixgbe_hw *hw = &adapter->hw;
900 /* Don't return information on unsupported devices */
901 if (hw->mac.type != ixgbe_mac_82599EB &&
902 hw->mac.type != ixgbe_mac_X540)
906 snprintf(info->manufacturer, sizeof(info->manufacturer),
907 "Intel Corporation");
911 /* Get the PCI-e Device Serial Number Capability */
912 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
915 for (i = 0; i < 8; i++)
916 pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
918 snprintf(info->serial_number, sizeof(info->serial_number),
919 "%02X%02X%02X%02X%02X%02X%02X%02X",
920 buf[7], buf[6], buf[5], buf[4],
921 buf[3], buf[2], buf[1], buf[0]);
923 snprintf(info->serial_number, sizeof(info->serial_number),
926 /* Hardware Version */
927 snprintf(info->hardware_version,
928 sizeof(info->hardware_version),
929 "Rev %d", hw->revision_id);
930 /* Driver Name/Version */
931 snprintf(info->driver_version,
932 sizeof(info->driver_version),
935 ixgbe_driver_version);
936 /* Firmware Version */
937 snprintf(info->firmware_version,
938 sizeof(info->firmware_version),
940 (adapter->eeprom_verh << 16) |
941 adapter->eeprom_verl);
944 if (hw->mac.type == ixgbe_mac_82599EB) {
945 snprintf(info->model,
949 snprintf(info->model,
954 /* Model Description */
955 snprintf(info->model_description,
956 sizeof(info->model_description),
958 ixgbe_default_device_descr);