pch_gbe: fix transmit races
[linux-flexiantxendom0-3.2.10.git] / drivers / net / ethernet / oki-semi / pch_gbe / pch_gbe_main.c
1 /*
2  * Copyright (C) 1999 - 2010 Intel Corporation.
3  * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
4  *
5  * This code was derived from the Intel e1000e Linux driver.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
19  */
20
21 #include "pch_gbe.h"
22 #include "pch_gbe_api.h"
23 #include <linux/module.h>
24 #ifdef CONFIG_PCH_PTP
25 #include <linux/net_tstamp.h>
26 #include <linux/ptp_classify.h>
27 #endif
28
29 #define DRV_VERSION     "1.00"
30 const char pch_driver_version[] = DRV_VERSION;
31
32 #define PCI_DEVICE_ID_INTEL_IOH1_GBE    0x8802          /* Pci device ID */
33 #define PCH_GBE_MAR_ENTRIES             16
34 #define PCH_GBE_SHORT_PKT               64
35 #define DSC_INIT16                      0xC000
36 #define PCH_GBE_DMA_ALIGN               0
37 #define PCH_GBE_DMA_PADDING             2
38 #define PCH_GBE_WATCHDOG_PERIOD         (1 * HZ)        /* watchdog time */
39 #define PCH_GBE_COPYBREAK_DEFAULT       256
40 #define PCH_GBE_PCI_BAR                 1
41 #define PCH_GBE_RESERVE_MEMORY          0x200000        /* 2MB */
42
43 /* Macros for ML7223 */
44 #define PCI_VENDOR_ID_ROHM                      0x10db
45 #define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
46
47 /* Macros for ML7831 */
48 #define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
49
50 #define PCH_GBE_TX_WEIGHT         64
51 #define PCH_GBE_RX_WEIGHT         64
52 #define PCH_GBE_RX_BUFFER_WRITE   16
53
54 /* Initialize the wake-on-LAN settings */
55 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
56
57 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
58         PCH_GBE_CHIP_TYPE_INTERNAL | \
59         PCH_GBE_RGMII_MODE_RGMII     \
60         )
61
62 /* Ethertype field values */
63 #define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
64 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
65 #define PCH_GBE_FRAME_SIZE_2048         2048
66 #define PCH_GBE_FRAME_SIZE_4096         4096
67 #define PCH_GBE_FRAME_SIZE_8192         8192
68
69 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
70 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
71 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
72 #define PCH_GBE_DESC_UNUSED(R) \
73         ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
74         (R)->next_to_clean - (R)->next_to_use - 1)
75
76 /* Pause packet value */
77 #define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
78 #define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
79 #define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
80 #define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
81
82 #define PCH_GBE_ETH_ALEN            6
83
84 /* This defines the bits that are set in the Interrupt Mask
85  * Set/Read Register.  Each bit is documented below:
86  *   o RXT0   = Receiver Timer Interrupt (ring 0)
87  *   o TXDW   = Transmit Descriptor Written Back
88  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
89  *   o RXSEQ  = Receive Sequence Error
90  *   o LSC    = Link Status Change
91  */
92 #define PCH_GBE_INT_ENABLE_MASK ( \
93         PCH_GBE_INT_RX_DMA_CMPLT |    \
94         PCH_GBE_INT_RX_DSC_EMP   |    \
95         PCH_GBE_INT_RX_FIFO_ERR  |    \
96         PCH_GBE_INT_WOL_DET      |    \
97         PCH_GBE_INT_TX_CMPLT          \
98         )
99
100 #define PCH_GBE_INT_DISABLE_ALL         0
101
102 #ifdef CONFIG_PCH_PTP
103 /* Macros for ieee1588 */
104 #define TICKS_NS_SHIFT  5
105
106 /* 0x40 Time Synchronization Channel Control Register Bits */
107 #define MASTER_MODE   (1<<0)
108 #define SLAVE_MODE    (0<<0)
109 #define V2_MODE       (1<<31)
110 #define CAP_MODE0     (0<<16)
111 #define CAP_MODE2     (1<<17)
112
113 /* 0x44 Time Synchronization Channel Event Register Bits */
114 #define TX_SNAPSHOT_LOCKED (1<<0)
115 #define RX_SNAPSHOT_LOCKED (1<<1)
116 #endif
117
118 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
119
120 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
121 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122                                int data);
123
124 #ifdef CONFIG_PCH_PTP
125 static struct sock_filter ptp_filter[] = {
126         PTP_FILTER
127 };
128
129 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
130 {
131         u8 *data = skb->data;
132         unsigned int offset;
133         u16 *hi, *id;
134         u32 lo;
135
136         if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
137                 (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
138                 return 0;
139         }
140
141         offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
142
143         if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
144                 return 0;
145
146         hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
147         id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
148
149         memcpy(&lo, &hi[1], sizeof(lo));
150
151         return (uid_hi == *hi &&
152                 uid_lo == lo &&
153                 seqid  == *id);
154 }
155
156 static void pch_rx_timestamp(
157                         struct pch_gbe_adapter *adapter, struct sk_buff *skb)
158 {
159         struct skb_shared_hwtstamps *shhwtstamps;
160         struct pci_dev *pdev;
161         u64 ns;
162         u32 hi, lo, val;
163         u16 uid, seq;
164
165         if (!adapter->hwts_rx_en)
166                 return;
167
168         /* Get ieee1588's dev information */
169         pdev = adapter->ptp_pdev;
170
171         val = pch_ch_event_read(pdev);
172
173         if (!(val & RX_SNAPSHOT_LOCKED))
174                 return;
175
176         lo = pch_src_uuid_lo_read(pdev);
177         hi = pch_src_uuid_hi_read(pdev);
178
179         uid = hi & 0xffff;
180         seq = (hi >> 16) & 0xffff;
181
182         if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
183                 goto out;
184
185         ns = pch_rx_snap_read(pdev);
186         ns <<= TICKS_NS_SHIFT;
187
188         shhwtstamps = skb_hwtstamps(skb);
189         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
190         shhwtstamps->hwtstamp = ns_to_ktime(ns);
191 out:
192         pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
193 }
194
195 static void pch_tx_timestamp(
196                         struct pch_gbe_adapter *adapter, struct sk_buff *skb)
197 {
198         struct skb_shared_hwtstamps shhwtstamps;
199         struct pci_dev *pdev;
200         struct skb_shared_info *shtx;
201         u64 ns;
202         u32 cnt, val;
203
204         shtx = skb_shinfo(skb);
205         if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
206                 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207         else
208                 return;
209
210         /* Get ieee1588's dev information */
211         pdev = adapter->ptp_pdev;
212
213         /*
214          * This really stinks, but we have to poll for the Tx time stamp.
215          * Usually, the time stamp is ready after 4 to 6 microseconds.
216          */
217         for (cnt = 0; cnt < 100; cnt++) {
218                 val = pch_ch_event_read(pdev);
219                 if (val & TX_SNAPSHOT_LOCKED)
220                         break;
221                 udelay(1);
222         }
223         if (!(val & TX_SNAPSHOT_LOCKED)) {
224                 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
225                 return;
226         }
227
228         ns = pch_tx_snap_read(pdev);
229         ns <<= TICKS_NS_SHIFT;
230
231         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
232         shhwtstamps.hwtstamp = ns_to_ktime(ns);
233         skb_tstamp_tx(skb, &shhwtstamps);
234
235         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
236 }
237
238 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
239 {
240         struct hwtstamp_config cfg;
241         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
242         struct pci_dev *pdev;
243
244         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
245                 return -EFAULT;
246
247         if (cfg.flags) /* reserved for future extensions */
248                 return -EINVAL;
249
250         /* Get ieee1588's dev information */
251         pdev = adapter->ptp_pdev;
252
253         switch (cfg.tx_type) {
254         case HWTSTAMP_TX_OFF:
255                 adapter->hwts_tx_en = 0;
256                 break;
257         case HWTSTAMP_TX_ON:
258                 adapter->hwts_tx_en = 1;
259                 break;
260         default:
261                 return -ERANGE;
262         }
263
264         switch (cfg.rx_filter) {
265         case HWTSTAMP_FILTER_NONE:
266                 adapter->hwts_rx_en = 0;
267                 break;
268         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
269                 adapter->hwts_rx_en = 0;
270                 pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
271                 break;
272         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
273                 adapter->hwts_rx_en = 1;
274                 pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
275                 break;
276         case HWTSTAMP_FILTER_PTP_V2_EVENT:
277                 adapter->hwts_rx_en = 1;
278                 pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
279                 break;
280         default:
281                 return -ERANGE;
282         }
283
284         /* Clear out any old time stamps. */
285         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
286
287         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
288 }
289 #endif
290
291 inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
292 {
293         iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
294 }
295
296 /**
297  * pch_gbe_mac_read_mac_addr - Read MAC address
298  * @hw:             Pointer to the HW structure
299  * Returns
300  *      0:                      Successful.
301  */
302 s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
303 {
304         u32  adr1a, adr1b;
305
306         adr1a = ioread32(&hw->reg->mac_adr[0].high);
307         adr1b = ioread32(&hw->reg->mac_adr[0].low);
308
309         hw->mac.addr[0] = (u8)(adr1a & 0xFF);
310         hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
311         hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
312         hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
313         hw->mac.addr[4] = (u8)(adr1b & 0xFF);
314         hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
315
316         pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
317         return 0;
318 }
319
320 /**
321  * pch_gbe_wait_clr_bit - Wait to clear a bit
322  * @reg:        Pointer of register
323  * @busy:       Busy bit
324  */
325 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
326 {
327         u32 tmp;
328         /* wait busy */
329         tmp = 1000;
330         while ((ioread32(reg) & bit) && --tmp)
331                 cpu_relax();
332         if (!tmp)
333                 pr_err("Error: busy bit is not cleared\n");
334 }
335
336 /**
337  * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
338  * @reg:        Pointer of register
339  * @busy:       Busy bit
340  */
341 static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
342 {
343         u32 tmp;
344         int ret = -1;
345         /* wait busy */
346         tmp = 20;
347         while ((ioread32(reg) & bit) && --tmp)
348                 udelay(5);
349         if (!tmp)
350                 pr_err("Error: busy bit is not cleared\n");
351         else
352                 ret = 0;
353         return ret;
354 }
355
356 /**
357  * pch_gbe_mac_mar_set - Set MAC address register
358  * @hw:     Pointer to the HW structure
359  * @addr:   Pointer to the MAC address
360  * @index:  MAC address array register
361  */
362 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
363 {
364         u32 mar_low, mar_high, adrmask;
365
366         pr_debug("index : 0x%x\n", index);
367
368         /*
369          * HW expects these in little endian so we reverse the byte order
370          * from network order (big endian) to little endian
371          */
372         mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
373                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
374         mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
375         /* Stop the MAC Address of index. */
376         adrmask = ioread32(&hw->reg->ADDR_MASK);
377         iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
378         /* wait busy */
379         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
380         /* Set the MAC address to the MAC address 1A/1B register */
381         iowrite32(mar_high, &hw->reg->mac_adr[index].high);
382         iowrite32(mar_low, &hw->reg->mac_adr[index].low);
383         /* Start the MAC address of index */
384         iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
385         /* wait busy */
386         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
387 }
388
389 /**
390  * pch_gbe_mac_reset_hw - Reset hardware
391  * @hw: Pointer to the HW structure
392  */
393 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
394 {
395         /* Read the MAC address. and store to the private data */
396         pch_gbe_mac_read_mac_addr(hw);
397         iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
398 #ifdef PCH_GBE_MAC_IFOP_RGMII
399         iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
400 #endif
401         pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
402         /* Setup the receive address */
403         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
404         return;
405 }
406
407 static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
408 {
409         /* Read the MAC address. and store to the private data */
410         pch_gbe_mac_read_mac_addr(hw);
411         iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
412         pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
413         /* Setup the MAC address */
414         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
415         return;
416 }
417
418 /**
419  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
420  * @hw: Pointer to the HW structure
421  * @mar_count: Receive address registers
422  */
423 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
424 {
425         u32 i;
426
427         /* Setup the receive address */
428         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
429
430         /* Zero out the other receive addresses */
431         for (i = 1; i < mar_count; i++) {
432                 iowrite32(0, &hw->reg->mac_adr[i].high);
433                 iowrite32(0, &hw->reg->mac_adr[i].low);
434         }
435         iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
436         /* wait busy */
437         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
438 }
439
440
441 /**
442  * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
443  * @hw:             Pointer to the HW structure
444  * @mc_addr_list:   Array of multicast addresses to program
445  * @mc_addr_count:  Number of multicast addresses to program
446  * @mar_used_count: The first MAC Address register free to program
447  * @mar_total_num:  Total number of supported MAC Address Registers
448  */
449 static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
450                                             u8 *mc_addr_list, u32 mc_addr_count,
451                                             u32 mar_used_count, u32 mar_total_num)
452 {
453         u32 i, adrmask;
454
455         /* Load the first set of multicast addresses into the exact
456          * filters (RAR).  If there are not enough to fill the RAR
457          * array, clear the filters.
458          */
459         for (i = mar_used_count; i < mar_total_num; i++) {
460                 if (mc_addr_count) {
461                         pch_gbe_mac_mar_set(hw, mc_addr_list, i);
462                         mc_addr_count--;
463                         mc_addr_list += PCH_GBE_ETH_ALEN;
464                 } else {
465                         /* Clear MAC address mask */
466                         adrmask = ioread32(&hw->reg->ADDR_MASK);
467                         iowrite32((adrmask | (0x0001 << i)),
468                                         &hw->reg->ADDR_MASK);
469                         /* wait busy */
470                         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
471                         /* Clear MAC address */
472                         iowrite32(0, &hw->reg->mac_adr[i].high);
473                         iowrite32(0, &hw->reg->mac_adr[i].low);
474                 }
475         }
476 }
477
478 /**
479  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
480  * @hw:             Pointer to the HW structure
481  * Returns
482  *      0:                      Successful.
483  *      Negative value:         Failed.
484  */
485 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
486 {
487         struct pch_gbe_mac_info *mac = &hw->mac;
488         u32 rx_fctrl;
489
490         pr_debug("mac->fc = %u\n", mac->fc);
491
492         rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
493
494         switch (mac->fc) {
495         case PCH_GBE_FC_NONE:
496                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
497                 mac->tx_fc_enable = false;
498                 break;
499         case PCH_GBE_FC_RX_PAUSE:
500                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
501                 mac->tx_fc_enable = false;
502                 break;
503         case PCH_GBE_FC_TX_PAUSE:
504                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
505                 mac->tx_fc_enable = true;
506                 break;
507         case PCH_GBE_FC_FULL:
508                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
509                 mac->tx_fc_enable = true;
510                 break;
511         default:
512                 pr_err("Flow control param set incorrectly\n");
513                 return -EINVAL;
514         }
515         if (mac->link_duplex == DUPLEX_HALF)
516                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
517         iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
518         pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
519                  ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
520         return 0;
521 }
522
523 /**
524  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
525  * @hw:     Pointer to the HW structure
526  * @wu_evt: Wake up event
527  */
528 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
529 {
530         u32 addr_mask;
531
532         pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
533                  wu_evt, ioread32(&hw->reg->ADDR_MASK));
534
535         if (wu_evt) {
536                 /* Set Wake-On-Lan address mask */
537                 addr_mask = ioread32(&hw->reg->ADDR_MASK);
538                 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
539                 /* wait busy */
540                 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
541                 iowrite32(0, &hw->reg->WOL_ST);
542                 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
543                 iowrite32(0x02, &hw->reg->TCPIP_ACC);
544                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
545         } else {
546                 iowrite32(0, &hw->reg->WOL_CTRL);
547                 iowrite32(0, &hw->reg->WOL_ST);
548         }
549         return;
550 }
551
552 /**
553  * pch_gbe_mac_ctrl_miim - Control MIIM interface
554  * @hw:   Pointer to the HW structure
555  * @addr: Address of PHY
556  * @dir:  Operetion. (Write or Read)
557  * @reg:  Access register of PHY
558  * @data: Write data.
559  *
560  * Returns: Read date.
561  */
562 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
563                         u16 data)
564 {
565         u32 data_out = 0;
566         unsigned int i;
567         unsigned long flags;
568
569         spin_lock_irqsave(&hw->miim_lock, flags);
570
571         for (i = 100; i; --i) {
572                 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
573                         break;
574                 udelay(20);
575         }
576         if (i == 0) {
577                 pr_err("pch-gbe.miim won't go Ready\n");
578                 spin_unlock_irqrestore(&hw->miim_lock, flags);
579                 return 0;       /* No way to indicate timeout error */
580         }
581         iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
582                   (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
583                   dir | data), &hw->reg->MIIM);
584         for (i = 0; i < 100; i++) {
585                 udelay(20);
586                 data_out = ioread32(&hw->reg->MIIM);
587                 if ((data_out & PCH_GBE_MIIM_OPER_READY))
588                         break;
589         }
590         spin_unlock_irqrestore(&hw->miim_lock, flags);
591
592         pr_debug("PHY %s: reg=%d, data=0x%04X\n",
593                  dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
594                  dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
595         return (u16) data_out;
596 }
597
598 /**
599  * pch_gbe_mac_set_pause_packet - Set pause packet
600  * @hw:   Pointer to the HW structure
601  */
602 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
603 {
604         unsigned long tmp2, tmp3;
605
606         /* Set Pause packet */
607         tmp2 = hw->mac.addr[1];
608         tmp2 = (tmp2 << 8) | hw->mac.addr[0];
609         tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
610
611         tmp3 = hw->mac.addr[5];
612         tmp3 = (tmp3 << 8) | hw->mac.addr[4];
613         tmp3 = (tmp3 << 8) | hw->mac.addr[3];
614         tmp3 = (tmp3 << 8) | hw->mac.addr[2];
615
616         iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
617         iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
618         iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
619         iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
620         iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
621
622         /* Transmit Pause Packet */
623         iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
624
625         pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
626                  ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
627                  ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
628                  ioread32(&hw->reg->PAUSE_PKT5));
629
630         return;
631 }
632
633
634 /**
635  * pch_gbe_alloc_queues - Allocate memory for all rings
636  * @adapter:  Board private structure to initialize
637  * Returns
638  *      0:      Successfully
639  *      Negative value: Failed
640  */
641 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
642 {
643         adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
644         if (!adapter->tx_ring)
645                 return -ENOMEM;
646
647         adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
648         if (!adapter->rx_ring) {
649                 kfree(adapter->tx_ring);
650                 return -ENOMEM;
651         }
652         return 0;
653 }
654
655 /**
656  * pch_gbe_init_stats - Initialize status
657  * @adapter:  Board private structure to initialize
658  */
659 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
660 {
661         memset(&adapter->stats, 0, sizeof(adapter->stats));
662         return;
663 }
664
665 /**
666  * pch_gbe_init_phy - Initialize PHY
667  * @adapter:  Board private structure to initialize
668  * Returns
669  *      0:      Successfully
670  *      Negative value: Failed
671  */
672 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
673 {
674         struct net_device *netdev = adapter->netdev;
675         u32 addr;
676         u16 bmcr, stat;
677
678         /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
679         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
680                 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
681                 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
682                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
683                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
684                 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
685                         break;
686         }
687         adapter->hw.phy.addr = adapter->mii.phy_id;
688         pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
689         if (addr == 32)
690                 return -EAGAIN;
691         /* Selected the phy and isolate the rest */
692         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
693                 if (addr != adapter->mii.phy_id) {
694                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
695                                            BMCR_ISOLATE);
696                 } else {
697                         bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
698                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
699                                            bmcr & ~BMCR_ISOLATE);
700                 }
701         }
702
703         /* MII setup */
704         adapter->mii.phy_id_mask = 0x1F;
705         adapter->mii.reg_num_mask = 0x1F;
706         adapter->mii.dev = adapter->netdev;
707         adapter->mii.mdio_read = pch_gbe_mdio_read;
708         adapter->mii.mdio_write = pch_gbe_mdio_write;
709         adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
710         return 0;
711 }
712
713 /**
714  * pch_gbe_mdio_read - The read function for mii
715  * @netdev: Network interface device structure
716  * @addr:   Phy ID
717  * @reg:    Access location
718  * Returns
719  *      0:      Successfully
720  *      Negative value: Failed
721  */
722 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
723 {
724         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
725         struct pch_gbe_hw *hw = &adapter->hw;
726
727         return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
728                                      (u16) 0);
729 }
730
731 /**
732  * pch_gbe_mdio_write - The write function for mii
733  * @netdev: Network interface device structure
734  * @addr:   Phy ID (not used)
735  * @reg:    Access location
736  * @data:   Write data
737  */
738 static void pch_gbe_mdio_write(struct net_device *netdev,
739                                int addr, int reg, int data)
740 {
741         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
742         struct pch_gbe_hw *hw = &adapter->hw;
743
744         pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
745 }
746
747 /**
748  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
749  * @work:  Pointer of board private structure
750  */
751 static void pch_gbe_reset_task(struct work_struct *work)
752 {
753         struct pch_gbe_adapter *adapter;
754         adapter = container_of(work, struct pch_gbe_adapter, reset_task);
755
756         rtnl_lock();
757         pch_gbe_reinit_locked(adapter);
758         rtnl_unlock();
759 }
760
761 /**
762  * pch_gbe_reinit_locked- Re-initialization
763  * @adapter:  Board private structure
764  */
765 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
766 {
767         pch_gbe_down(adapter);
768         pch_gbe_up(adapter);
769 }
770
771 /**
772  * pch_gbe_reset - Reset GbE
773  * @adapter:  Board private structure
774  */
775 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
776 {
777         pch_gbe_mac_reset_hw(&adapter->hw);
778         /* Setup the receive address. */
779         pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
780         if (pch_gbe_hal_init_hw(&adapter->hw))
781                 pr_err("Hardware Error\n");
782 }
783
784 /**
785  * pch_gbe_free_irq - Free an interrupt
786  * @adapter:  Board private structure
787  */
788 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
789 {
790         struct net_device *netdev = adapter->netdev;
791
792         free_irq(adapter->pdev->irq, netdev);
793         if (adapter->have_msi) {
794                 pci_disable_msi(adapter->pdev);
795                 pr_debug("call pci_disable_msi\n");
796         }
797 }
798
799 /**
800  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
801  * @adapter:  Board private structure
802  */
803 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
804 {
805         struct pch_gbe_hw *hw = &adapter->hw;
806
807         atomic_inc(&adapter->irq_sem);
808         iowrite32(0, &hw->reg->INT_EN);
809         ioread32(&hw->reg->INT_ST);
810         synchronize_irq(adapter->pdev->irq);
811
812         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
813 }
814
815 /**
816  * pch_gbe_irq_enable - Enable default interrupt generation settings
817  * @adapter:  Board private structure
818  */
819 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
820 {
821         struct pch_gbe_hw *hw = &adapter->hw;
822
823         if (likely(atomic_dec_and_test(&adapter->irq_sem)))
824                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
825         ioread32(&hw->reg->INT_ST);
826         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
827 }
828
829
830
831 /**
832  * pch_gbe_setup_tctl - configure the Transmit control registers
833  * @adapter:  Board private structure
834  */
835 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
836 {
837         struct pch_gbe_hw *hw = &adapter->hw;
838         u32 tx_mode, tcpip;
839
840         tx_mode = PCH_GBE_TM_LONG_PKT |
841                 PCH_GBE_TM_ST_AND_FD |
842                 PCH_GBE_TM_SHORT_PKT |
843                 PCH_GBE_TM_TH_TX_STRT_8 |
844                 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
845
846         iowrite32(tx_mode, &hw->reg->TX_MODE);
847
848         tcpip = ioread32(&hw->reg->TCPIP_ACC);
849         tcpip |= PCH_GBE_TX_TCPIPACC_EN;
850         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
851         return;
852 }
853
854 /**
855  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
856  * @adapter:  Board private structure
857  */
858 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
859 {
860         struct pch_gbe_hw *hw = &adapter->hw;
861         u32 tdba, tdlen, dctrl;
862
863         pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
864                  (unsigned long long)adapter->tx_ring->dma,
865                  adapter->tx_ring->size);
866
867         /* Setup the HW Tx Head and Tail descriptor pointers */
868         tdba = adapter->tx_ring->dma;
869         tdlen = adapter->tx_ring->size - 0x10;
870         iowrite32(tdba, &hw->reg->TX_DSC_BASE);
871         iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
872         iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
873
874         /* Enables Transmission DMA */
875         dctrl = ioread32(&hw->reg->DMA_CTRL);
876         dctrl |= PCH_GBE_TX_DMA_EN;
877         iowrite32(dctrl, &hw->reg->DMA_CTRL);
878 }
879
880 /**
881  * pch_gbe_setup_rctl - Configure the receive control registers
882  * @adapter:  Board private structure
883  */
884 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
885 {
886         struct pch_gbe_hw *hw = &adapter->hw;
887         u32 rx_mode, tcpip;
888
889         rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
890         PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
891
892         iowrite32(rx_mode, &hw->reg->RX_MODE);
893
894         tcpip = ioread32(&hw->reg->TCPIP_ACC);
895
896         tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
897         tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
898         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
899         return;
900 }
901
902 /**
903  * pch_gbe_configure_rx - Configure Receive Unit after Reset
904  * @adapter:  Board private structure
905  */
906 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
907 {
908         struct pch_gbe_hw *hw = &adapter->hw;
909         u32 rdba, rdlen, rctl, rxdma;
910
911         pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
912                  (unsigned long long)adapter->rx_ring->dma,
913                  adapter->rx_ring->size);
914
915         pch_gbe_mac_force_mac_fc(hw);
916
917         /* Disables Receive MAC */
918         rctl = ioread32(&hw->reg->MAC_RX_EN);
919         iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
920
921         /* Disables Receive DMA */
922         rxdma = ioread32(&hw->reg->DMA_CTRL);
923         rxdma &= ~PCH_GBE_RX_DMA_EN;
924         iowrite32(rxdma, &hw->reg->DMA_CTRL);
925
926         pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
927                  ioread32(&hw->reg->MAC_RX_EN),
928                  ioread32(&hw->reg->DMA_CTRL));
929
930         /* Setup the HW Rx Head and Tail Descriptor Pointers and
931          * the Base and Length of the Rx Descriptor Ring */
932         rdba = adapter->rx_ring->dma;
933         rdlen = adapter->rx_ring->size - 0x10;
934         iowrite32(rdba, &hw->reg->RX_DSC_BASE);
935         iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
936         iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
937 }
938
939 /**
940  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
941  * @adapter:     Board private structure
942  * @buffer_info: Buffer information structure
943  */
944 static void pch_gbe_unmap_and_free_tx_resource(
945         struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
946 {
947         if (buffer_info->mapped) {
948                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
949                                  buffer_info->length, DMA_TO_DEVICE);
950                 buffer_info->mapped = false;
951         }
952         if (buffer_info->skb) {
953                 dev_kfree_skb_any(buffer_info->skb);
954                 buffer_info->skb = NULL;
955         }
956 }
957
958 /**
959  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
960  * @adapter:      Board private structure
961  * @buffer_info:  Buffer information structure
962  */
963 static void pch_gbe_unmap_and_free_rx_resource(
964                                         struct pch_gbe_adapter *adapter,
965                                         struct pch_gbe_buffer *buffer_info)
966 {
967         if (buffer_info->mapped) {
968                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
969                                  buffer_info->length, DMA_FROM_DEVICE);
970                 buffer_info->mapped = false;
971         }
972         if (buffer_info->skb) {
973                 dev_kfree_skb_any(buffer_info->skb);
974                 buffer_info->skb = NULL;
975         }
976 }
977
978 /**
979  * pch_gbe_clean_tx_ring - Free Tx Buffers
980  * @adapter:  Board private structure
981  * @tx_ring:  Ring to be cleaned
982  */
983 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
984                                    struct pch_gbe_tx_ring *tx_ring)
985 {
986         struct pch_gbe_hw *hw = &adapter->hw;
987         struct pch_gbe_buffer *buffer_info;
988         unsigned long size;
989         unsigned int i;
990
991         /* Free all the Tx ring sk_buffs */
992         for (i = 0; i < tx_ring->count; i++) {
993                 buffer_info = &tx_ring->buffer_info[i];
994                 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
995         }
996         pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
997
998         size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
999         memset(tx_ring->buffer_info, 0, size);
1000
1001         /* Zero out the descriptor ring */
1002         memset(tx_ring->desc, 0, tx_ring->size);
1003         tx_ring->next_to_use = 0;
1004         tx_ring->next_to_clean = 0;
1005         iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
1006         iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
1007 }
1008
1009 /**
1010  * pch_gbe_clean_rx_ring - Free Rx Buffers
1011  * @adapter:  Board private structure
1012  * @rx_ring:  Ring to free buffers from
1013  */
1014 static void
1015 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1016                       struct pch_gbe_rx_ring *rx_ring)
1017 {
1018         struct pch_gbe_hw *hw = &adapter->hw;
1019         struct pch_gbe_buffer *buffer_info;
1020         unsigned long size;
1021         unsigned int i;
1022
1023         /* Free all the Rx ring sk_buffs */
1024         for (i = 0; i < rx_ring->count; i++) {
1025                 buffer_info = &rx_ring->buffer_info[i];
1026                 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1027         }
1028         pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1029         size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1030         memset(rx_ring->buffer_info, 0, size);
1031
1032         /* Zero out the descriptor ring */
1033         memset(rx_ring->desc, 0, rx_ring->size);
1034         rx_ring->next_to_clean = 0;
1035         rx_ring->next_to_use = 0;
1036         iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1037         iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1038 }
1039
1040 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1041                                     u16 duplex)
1042 {
1043         struct pch_gbe_hw *hw = &adapter->hw;
1044         unsigned long rgmii = 0;
1045
1046         /* Set the RGMII control. */
1047 #ifdef PCH_GBE_MAC_IFOP_RGMII
1048         switch (speed) {
1049         case SPEED_10:
1050                 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1051                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1052                 break;
1053         case SPEED_100:
1054                 rgmii = (PCH_GBE_RGMII_RATE_25M |
1055                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1056                 break;
1057         case SPEED_1000:
1058                 rgmii = (PCH_GBE_RGMII_RATE_125M |
1059                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1060                 break;
1061         }
1062         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1063 #else   /* GMII */
1064         rgmii = 0;
1065         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1066 #endif
1067 }
1068 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1069                               u16 duplex)
1070 {
1071         struct net_device *netdev = adapter->netdev;
1072         struct pch_gbe_hw *hw = &adapter->hw;
1073         unsigned long mode = 0;
1074
1075         /* Set the communication mode */
1076         switch (speed) {
1077         case SPEED_10:
1078                 mode = PCH_GBE_MODE_MII_ETHER;
1079                 netdev->tx_queue_len = 10;
1080                 break;
1081         case SPEED_100:
1082                 mode = PCH_GBE_MODE_MII_ETHER;
1083                 netdev->tx_queue_len = 100;
1084                 break;
1085         case SPEED_1000:
1086                 mode = PCH_GBE_MODE_GMII_ETHER;
1087                 break;
1088         }
1089         if (duplex == DUPLEX_FULL)
1090                 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1091         else
1092                 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1093         iowrite32(mode, &hw->reg->MODE);
1094 }
1095
1096 /**
1097  * pch_gbe_watchdog - Watchdog process
1098  * @data:  Board private structure
1099  */
1100 static void pch_gbe_watchdog(unsigned long data)
1101 {
1102         struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1103         struct net_device *netdev = adapter->netdev;
1104         struct pch_gbe_hw *hw = &adapter->hw;
1105
1106         pr_debug("right now = %ld\n", jiffies);
1107
1108         pch_gbe_update_stats(adapter);
1109         if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1110                 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1111                 netdev->tx_queue_len = adapter->tx_queue_len;
1112                 /* mii library handles link maintenance tasks */
1113                 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1114                         pr_err("ethtool get setting Error\n");
1115                         mod_timer(&adapter->watchdog_timer,
1116                                   round_jiffies(jiffies +
1117                                                 PCH_GBE_WATCHDOG_PERIOD));
1118                         return;
1119                 }
1120                 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1121                 hw->mac.link_duplex = cmd.duplex;
1122                 /* Set the RGMII control. */
1123                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1124                                                 hw->mac.link_duplex);
1125                 /* Set the communication mode */
1126                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1127                                  hw->mac.link_duplex);
1128                 netdev_dbg(netdev,
1129                            "Link is Up %d Mbps %s-Duplex\n",
1130                            hw->mac.link_speed,
1131                            cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1132                 netif_carrier_on(netdev);
1133                 netif_wake_queue(netdev);
1134         } else if ((!mii_link_ok(&adapter->mii)) &&
1135                    (netif_carrier_ok(netdev))) {
1136                 netdev_dbg(netdev, "NIC Link is Down\n");
1137                 hw->mac.link_speed = SPEED_10;
1138                 hw->mac.link_duplex = DUPLEX_HALF;
1139                 netif_carrier_off(netdev);
1140                 netif_stop_queue(netdev);
1141         }
1142         mod_timer(&adapter->watchdog_timer,
1143                   round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1144 }
1145
1146 /**
1147  * pch_gbe_tx_queue - Carry out queuing of the transmission data
1148  * @adapter:  Board private structure
1149  * @tx_ring:  Tx descriptor ring structure
1150  * @skb:      Sockt buffer structure
1151  */
1152 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1153                               struct pch_gbe_tx_ring *tx_ring,
1154                               struct sk_buff *skb)
1155 {
1156         struct pch_gbe_hw *hw = &adapter->hw;
1157         struct pch_gbe_tx_desc *tx_desc;
1158         struct pch_gbe_buffer *buffer_info;
1159         struct sk_buff *tmp_skb;
1160         unsigned int frame_ctrl;
1161         unsigned int ring_num;
1162
1163         /*-- Set frame control --*/
1164         frame_ctrl = 0;
1165         if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1166                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1167         if (skb->ip_summed == CHECKSUM_NONE)
1168                 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1169
1170         /* Performs checksum processing */
1171         /*
1172          * It is because the hardware accelerator does not support a checksum,
1173          * when the received data size is less than 64 bytes.
1174          */
1175         if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1176                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1177                               PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1178                 if (skb->protocol == htons(ETH_P_IP)) {
1179                         struct iphdr *iph = ip_hdr(skb);
1180                         unsigned int offset;
1181                         iph->check = 0;
1182                         iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
1183                         offset = skb_transport_offset(skb);
1184                         if (iph->protocol == IPPROTO_TCP) {
1185                                 skb->csum = 0;
1186                                 tcp_hdr(skb)->check = 0;
1187                                 skb->csum = skb_checksum(skb, offset,
1188                                                          skb->len - offset, 0);
1189                                 tcp_hdr(skb)->check =
1190                                         csum_tcpudp_magic(iph->saddr,
1191                                                           iph->daddr,
1192                                                           skb->len - offset,
1193                                                           IPPROTO_TCP,
1194                                                           skb->csum);
1195                         } else if (iph->protocol == IPPROTO_UDP) {
1196                                 skb->csum = 0;
1197                                 udp_hdr(skb)->check = 0;
1198                                 skb->csum =
1199                                         skb_checksum(skb, offset,
1200                                                      skb->len - offset, 0);
1201                                 udp_hdr(skb)->check =
1202                                         csum_tcpudp_magic(iph->saddr,
1203                                                           iph->daddr,
1204                                                           skb->len - offset,
1205                                                           IPPROTO_UDP,
1206                                                           skb->csum);
1207                         }
1208                 }
1209         }
1210
1211         ring_num = tx_ring->next_to_use;
1212         if (unlikely((ring_num + 1) == tx_ring->count))
1213                 tx_ring->next_to_use = 0;
1214         else
1215                 tx_ring->next_to_use = ring_num + 1;
1216
1217
1218         buffer_info = &tx_ring->buffer_info[ring_num];
1219         tmp_skb = buffer_info->skb;
1220
1221         /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1222         memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1223         tmp_skb->data[ETH_HLEN] = 0x00;
1224         tmp_skb->data[ETH_HLEN + 1] = 0x00;
1225         tmp_skb->len = skb->len;
1226         memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1227                (skb->len - ETH_HLEN));
1228         /*-- Set Buffer information --*/
1229         buffer_info->length = tmp_skb->len;
1230         buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1231                                           buffer_info->length,
1232                                           DMA_TO_DEVICE);
1233         if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1234                 pr_err("TX DMA map failed\n");
1235                 buffer_info->dma = 0;
1236                 buffer_info->time_stamp = 0;
1237                 tx_ring->next_to_use = ring_num;
1238                 return;
1239         }
1240         buffer_info->mapped = true;
1241         buffer_info->time_stamp = jiffies;
1242
1243         /*-- Set Tx descriptor --*/
1244         tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1245         tx_desc->buffer_addr = (buffer_info->dma);
1246         tx_desc->length = (tmp_skb->len);
1247         tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1248         tx_desc->tx_frame_ctrl = (frame_ctrl);
1249         tx_desc->gbec_status = (DSC_INIT16);
1250
1251         if (unlikely(++ring_num == tx_ring->count))
1252                 ring_num = 0;
1253
1254         /* Update software pointer of TX descriptor */
1255         iowrite32(tx_ring->dma +
1256                   (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1257                   &hw->reg->TX_DSC_SW_P);
1258
1259 #ifdef CONFIG_PCH_PTP
1260         pch_tx_timestamp(adapter, skb);
1261 #endif
1262
1263         dev_kfree_skb_any(skb);
1264 }
1265
1266 /**
1267  * pch_gbe_update_stats - Update the board statistics counters
1268  * @adapter:  Board private structure
1269  */
1270 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1271 {
1272         struct net_device *netdev = adapter->netdev;
1273         struct pci_dev *pdev = adapter->pdev;
1274         struct pch_gbe_hw_stats *stats = &adapter->stats;
1275         unsigned long flags;
1276
1277         /*
1278          * Prevent stats update while adapter is being reset, or if the pci
1279          * connection is down.
1280          */
1281         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1282                 return;
1283
1284         spin_lock_irqsave(&adapter->stats_lock, flags);
1285
1286         /* Update device status "adapter->stats" */
1287         stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1288         stats->tx_errors = stats->tx_length_errors +
1289             stats->tx_aborted_errors +
1290             stats->tx_carrier_errors + stats->tx_timeout_count;
1291
1292         /* Update network device status "adapter->net_stats" */
1293         netdev->stats.rx_packets = stats->rx_packets;
1294         netdev->stats.rx_bytes = stats->rx_bytes;
1295         netdev->stats.rx_dropped = stats->rx_dropped;
1296         netdev->stats.tx_packets = stats->tx_packets;
1297         netdev->stats.tx_bytes = stats->tx_bytes;
1298         netdev->stats.tx_dropped = stats->tx_dropped;
1299         /* Fill out the OS statistics structure */
1300         netdev->stats.multicast = stats->multicast;
1301         netdev->stats.collisions = stats->collisions;
1302         /* Rx Errors */
1303         netdev->stats.rx_errors = stats->rx_errors;
1304         netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1305         netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1306         /* Tx Errors */
1307         netdev->stats.tx_errors = stats->tx_errors;
1308         netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1309         netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1310
1311         spin_unlock_irqrestore(&adapter->stats_lock, flags);
1312 }
1313
1314 static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1315 {
1316         struct pch_gbe_hw *hw = &adapter->hw;
1317         u32 rxdma;
1318         u16 value;
1319         int ret;
1320
1321         /* Disable Receive DMA */
1322         rxdma = ioread32(&hw->reg->DMA_CTRL);
1323         rxdma &= ~PCH_GBE_RX_DMA_EN;
1324         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1325         /* Wait Rx DMA BUS is IDLE */
1326         ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1327         if (ret) {
1328                 /* Disable Bus master */
1329                 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1330                 value &= ~PCI_COMMAND_MASTER;
1331                 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1332                 /* Stop Receive */
1333                 pch_gbe_mac_reset_rx(hw);
1334                 /* Enable Bus master */
1335                 value |= PCI_COMMAND_MASTER;
1336                 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1337         } else {
1338                 /* Stop Receive */
1339                 pch_gbe_mac_reset_rx(hw);
1340         }
1341 }
1342
1343 static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1344 {
1345         u32 rxdma;
1346
1347         /* Enables Receive DMA */
1348         rxdma = ioread32(&hw->reg->DMA_CTRL);
1349         rxdma |= PCH_GBE_RX_DMA_EN;
1350         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1351         /* Enables Receive */
1352         iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1353         return;
1354 }
1355
1356 /**
1357  * pch_gbe_intr - Interrupt Handler
1358  * @irq:   Interrupt number
1359  * @data:  Pointer to a network interface device structure
1360  * Returns
1361  *      - IRQ_HANDLED:  Our interrupt
1362  *      - IRQ_NONE:     Not our interrupt
1363  */
1364 static irqreturn_t pch_gbe_intr(int irq, void *data)
1365 {
1366         struct net_device *netdev = data;
1367         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1368         struct pch_gbe_hw *hw = &adapter->hw;
1369         u32 int_st;
1370         u32 int_en;
1371
1372         /* Check request status */
1373         int_st = ioread32(&hw->reg->INT_ST);
1374         int_st = int_st & ioread32(&hw->reg->INT_EN);
1375         /* When request status is no interruption factor */
1376         if (unlikely(!int_st))
1377                 return IRQ_NONE;        /* Not our interrupt. End processing. */
1378         pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1379         if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1380                 adapter->stats.intr_rx_frame_err_count++;
1381         if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1382                 if (!adapter->rx_stop_flag) {
1383                         adapter->stats.intr_rx_fifo_err_count++;
1384                         pr_debug("Rx fifo over run\n");
1385                         adapter->rx_stop_flag = true;
1386                         int_en = ioread32(&hw->reg->INT_EN);
1387                         iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1388                                   &hw->reg->INT_EN);
1389                         pch_gbe_stop_receive(adapter);
1390                         int_st |= ioread32(&hw->reg->INT_ST);
1391                         int_st = int_st & ioread32(&hw->reg->INT_EN);
1392                 }
1393         if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1394                 adapter->stats.intr_rx_dma_err_count++;
1395         if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1396                 adapter->stats.intr_tx_fifo_err_count++;
1397         if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1398                 adapter->stats.intr_tx_dma_err_count++;
1399         if (int_st & PCH_GBE_INT_TCPIP_ERR)
1400                 adapter->stats.intr_tcpip_err_count++;
1401         /* When Rx descriptor is empty  */
1402         if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1403                 adapter->stats.intr_rx_dsc_empty_count++;
1404                 pr_debug("Rx descriptor is empty\n");
1405                 int_en = ioread32(&hw->reg->INT_EN);
1406                 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1407                 if (hw->mac.tx_fc_enable) {
1408                         /* Set Pause packet */
1409                         pch_gbe_mac_set_pause_packet(hw);
1410                 }
1411         }
1412
1413         /* When request status is Receive interruption */
1414         if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1415             (adapter->rx_stop_flag)) {
1416                 if (likely(napi_schedule_prep(&adapter->napi))) {
1417                         /* Enable only Rx Descriptor empty */
1418                         atomic_inc(&adapter->irq_sem);
1419                         int_en = ioread32(&hw->reg->INT_EN);
1420                         int_en &=
1421                             ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1422                         iowrite32(int_en, &hw->reg->INT_EN);
1423                         /* Start polling for NAPI */
1424                         __napi_schedule(&adapter->napi);
1425                 }
1426         }
1427         pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
1428                  IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1429         return IRQ_HANDLED;
1430 }
1431
1432 /**
1433  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1434  * @adapter:       Board private structure
1435  * @rx_ring:       Rx descriptor ring
1436  * @cleaned_count: Cleaned count
1437  */
1438 static void
1439 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1440                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1441 {
1442         struct net_device *netdev = adapter->netdev;
1443         struct pci_dev *pdev = adapter->pdev;
1444         struct pch_gbe_hw *hw = &adapter->hw;
1445         struct pch_gbe_rx_desc *rx_desc;
1446         struct pch_gbe_buffer *buffer_info;
1447         struct sk_buff *skb;
1448         unsigned int i;
1449         unsigned int bufsz;
1450
1451         bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1452         i = rx_ring->next_to_use;
1453
1454         while ((cleaned_count--)) {
1455                 buffer_info = &rx_ring->buffer_info[i];
1456                 skb = netdev_alloc_skb(netdev, bufsz);
1457                 if (unlikely(!skb)) {
1458                         /* Better luck next round */
1459                         adapter->stats.rx_alloc_buff_failed++;
1460                         break;
1461                 }
1462                 /* align */
1463                 skb_reserve(skb, NET_IP_ALIGN);
1464                 buffer_info->skb = skb;
1465
1466                 buffer_info->dma = dma_map_single(&pdev->dev,
1467                                                   buffer_info->rx_buffer,
1468                                                   buffer_info->length,
1469                                                   DMA_FROM_DEVICE);
1470                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1471                         dev_kfree_skb(skb);
1472                         buffer_info->skb = NULL;
1473                         buffer_info->dma = 0;
1474                         adapter->stats.rx_alloc_buff_failed++;
1475                         break; /* while !buffer_info->skb */
1476                 }
1477                 buffer_info->mapped = true;
1478                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1479                 rx_desc->buffer_addr = (buffer_info->dma);
1480                 rx_desc->gbec_status = DSC_INIT16;
1481
1482                 pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1483                          i, (unsigned long long)buffer_info->dma,
1484                          buffer_info->length);
1485
1486                 if (unlikely(++i == rx_ring->count))
1487                         i = 0;
1488         }
1489         if (likely(rx_ring->next_to_use != i)) {
1490                 rx_ring->next_to_use = i;
1491                 if (unlikely(i-- == 0))
1492                         i = (rx_ring->count - 1);
1493                 iowrite32(rx_ring->dma +
1494                           (int)sizeof(struct pch_gbe_rx_desc) * i,
1495                           &hw->reg->RX_DSC_SW_P);
1496         }
1497         return;
1498 }
1499
1500 static int
1501 pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1502                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1503 {
1504         struct pci_dev *pdev = adapter->pdev;
1505         struct pch_gbe_buffer *buffer_info;
1506         unsigned int i;
1507         unsigned int bufsz;
1508         unsigned int size;
1509
1510         bufsz = adapter->rx_buffer_len;
1511
1512         size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1513         rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1514                                                 &rx_ring->rx_buff_pool_logic,
1515                                                 GFP_KERNEL);
1516         if (!rx_ring->rx_buff_pool) {
1517                 pr_err("Unable to allocate memory for the receive pool buffer\n");
1518                 return -ENOMEM;
1519         }
1520         memset(rx_ring->rx_buff_pool, 0, size);
1521         rx_ring->rx_buff_pool_size = size;
1522         for (i = 0; i < rx_ring->count; i++) {
1523                 buffer_info = &rx_ring->buffer_info[i];
1524                 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1525                 buffer_info->length = bufsz;
1526         }
1527         return 0;
1528 }
1529
1530 /**
1531  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1532  * @adapter:   Board private structure
1533  * @tx_ring:   Tx descriptor ring
1534  */
1535 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1536                                         struct pch_gbe_tx_ring *tx_ring)
1537 {
1538         struct pch_gbe_buffer *buffer_info;
1539         struct sk_buff *skb;
1540         unsigned int i;
1541         unsigned int bufsz;
1542         struct pch_gbe_tx_desc *tx_desc;
1543
1544         bufsz =
1545             adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1546
1547         for (i = 0; i < tx_ring->count; i++) {
1548                 buffer_info = &tx_ring->buffer_info[i];
1549                 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1550                 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1551                 buffer_info->skb = skb;
1552                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1553                 tx_desc->gbec_status = (DSC_INIT16);
1554         }
1555         return;
1556 }
1557
1558 /**
1559  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1560  * @adapter:   Board private structure
1561  * @tx_ring:   Tx descriptor ring
1562  * Returns
1563  *      true:  Cleaned the descriptor
1564  *      false: Not cleaned the descriptor
1565  */
1566 static bool
1567 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1568                  struct pch_gbe_tx_ring *tx_ring)
1569 {
1570         struct pch_gbe_tx_desc *tx_desc;
1571         struct pch_gbe_buffer *buffer_info;
1572         struct sk_buff *skb;
1573         unsigned int i;
1574         unsigned int cleaned_count = 0;
1575         bool cleaned = true;
1576
1577         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1578
1579         i = tx_ring->next_to_clean;
1580         tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1581         pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
1582                  tx_desc->gbec_status, tx_desc->dma_status);
1583
1584         while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1585                 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1586                 buffer_info = &tx_ring->buffer_info[i];
1587                 skb = buffer_info->skb;
1588
1589                 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1590                         adapter->stats.tx_aborted_errors++;
1591                         pr_err("Transfer Abort Error\n");
1592                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1593                           ) {
1594                         adapter->stats.tx_carrier_errors++;
1595                         pr_err("Transfer Carrier Sense Error\n");
1596                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1597                           ) {
1598                         adapter->stats.tx_aborted_errors++;
1599                         pr_err("Transfer Collision Abort Error\n");
1600                 } else if ((tx_desc->gbec_status &
1601                             (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1602                              PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1603                         adapter->stats.collisions++;
1604                         adapter->stats.tx_packets++;
1605                         adapter->stats.tx_bytes += skb->len;
1606                         pr_debug("Transfer Collision\n");
1607                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1608                           ) {
1609                         adapter->stats.tx_packets++;
1610                         adapter->stats.tx_bytes += skb->len;
1611                 }
1612                 if (buffer_info->mapped) {
1613                         pr_debug("unmap buffer_info->dma : %d\n", i);
1614                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1615                                          buffer_info->length, DMA_TO_DEVICE);
1616                         buffer_info->mapped = false;
1617                 }
1618                 if (buffer_info->skb) {
1619                         pr_debug("trim buffer_info->skb : %d\n", i);
1620                         skb_trim(buffer_info->skb, 0);
1621                 }
1622                 tx_desc->gbec_status = DSC_INIT16;
1623                 if (unlikely(++i == tx_ring->count))
1624                         i = 0;
1625                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1626
1627                 /* weight of a sort for tx, to avoid endless transmit cleanup */
1628                 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1629                         cleaned = false;
1630                         break;
1631                 }
1632         }
1633         pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1634                  cleaned_count);
1635         /* Recover from running out of Tx resources in xmit_frame */
1636         spin_lock(&tx_ring->tx_lock);
1637         if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1638                 netif_wake_queue(adapter->netdev);
1639                 adapter->stats.tx_restart_count++;
1640                 pr_debug("Tx wake queue\n");
1641         }
1642
1643         tx_ring->next_to_clean = i;
1644
1645         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1646         spin_unlock(&tx_ring->tx_lock);
1647         return cleaned;
1648 }
1649
1650 /**
1651  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1652  * @adapter:     Board private structure
1653  * @rx_ring:     Rx descriptor ring
1654  * @work_done:   Completed count
1655  * @work_to_do:  Request count
1656  * Returns
1657  *      true:  Cleaned the descriptor
1658  *      false: Not cleaned the descriptor
1659  */
1660 static bool
1661 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1662                  struct pch_gbe_rx_ring *rx_ring,
1663                  int *work_done, int work_to_do)
1664 {
1665         struct net_device *netdev = adapter->netdev;
1666         struct pci_dev *pdev = adapter->pdev;
1667         struct pch_gbe_buffer *buffer_info;
1668         struct pch_gbe_rx_desc *rx_desc;
1669         u32 length;
1670         unsigned int i;
1671         unsigned int cleaned_count = 0;
1672         bool cleaned = false;
1673         struct sk_buff *skb;
1674         u8 dma_status;
1675         u16 gbec_status;
1676         u32 tcp_ip_status;
1677
1678         i = rx_ring->next_to_clean;
1679
1680         while (*work_done < work_to_do) {
1681                 /* Check Rx descriptor status */
1682                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1683                 if (rx_desc->gbec_status == DSC_INIT16)
1684                         break;
1685                 cleaned = true;
1686                 cleaned_count++;
1687
1688                 dma_status = rx_desc->dma_status;
1689                 gbec_status = rx_desc->gbec_status;
1690                 tcp_ip_status = rx_desc->tcp_ip_status;
1691                 rx_desc->gbec_status = DSC_INIT16;
1692                 buffer_info = &rx_ring->buffer_info[i];
1693                 skb = buffer_info->skb;
1694                 buffer_info->skb = NULL;
1695
1696                 /* unmap dma */
1697                 dma_unmap_single(&pdev->dev, buffer_info->dma,
1698                                    buffer_info->length, DMA_FROM_DEVICE);
1699                 buffer_info->mapped = false;
1700
1701                 pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
1702                          "TCP:0x%08x]  BufInf = 0x%p\n",
1703                          i, dma_status, gbec_status, tcp_ip_status,
1704                          buffer_info);
1705                 /* Error check */
1706                 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1707                         adapter->stats.rx_frame_errors++;
1708                         pr_err("Receive Not Octal Error\n");
1709                 } else if (unlikely(gbec_status &
1710                                 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1711                         adapter->stats.rx_frame_errors++;
1712                         pr_err("Receive Nibble Error\n");
1713                 } else if (unlikely(gbec_status &
1714                                 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1715                         adapter->stats.rx_crc_errors++;
1716                         pr_err("Receive CRC Error\n");
1717                 } else {
1718                         /* get receive length */
1719                         /* length convert[-3], length includes FCS length */
1720                         length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1721                         if (rx_desc->rx_words_eob & 0x02)
1722                                 length = length - 4;
1723                         /*
1724                          * buffer_info->rx_buffer: [Header:14][payload]
1725                          * skb->data: [Reserve:2][Header:14][payload]
1726                          */
1727                         memcpy(skb->data, buffer_info->rx_buffer, length);
1728
1729                         /* update status of driver */
1730                         adapter->stats.rx_bytes += length;
1731                         adapter->stats.rx_packets++;
1732                         if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1733                                 adapter->stats.multicast++;
1734                         /* Write meta date of skb */
1735                         skb_put(skb, length);
1736
1737 #ifdef CONFIG_PCH_PTP
1738                         pch_rx_timestamp(adapter, skb);
1739 #endif
1740
1741                         skb->protocol = eth_type_trans(skb, netdev);
1742                         if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1743                                 skb->ip_summed = CHECKSUM_NONE;
1744                         else
1745                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1746
1747                         napi_gro_receive(&adapter->napi, skb);
1748                         (*work_done)++;
1749                         pr_debug("Receive skb->ip_summed: %d length: %d\n",
1750                                  skb->ip_summed, length);
1751                 }
1752                 /* return some buffers to hardware, one at a time is too slow */
1753                 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1754                         pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1755                                                  cleaned_count);
1756                         cleaned_count = 0;
1757                 }
1758                 if (++i == rx_ring->count)
1759                         i = 0;
1760         }
1761         rx_ring->next_to_clean = i;
1762         if (cleaned_count)
1763                 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1764         return cleaned;
1765 }
1766
1767 /**
1768  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1769  * @adapter:  Board private structure
1770  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1771  * Returns
1772  *      0:              Successfully
1773  *      Negative value: Failed
1774  */
1775 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1776                                 struct pch_gbe_tx_ring *tx_ring)
1777 {
1778         struct pci_dev *pdev = adapter->pdev;
1779         struct pch_gbe_tx_desc *tx_desc;
1780         int size;
1781         int desNo;
1782
1783         size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1784         tx_ring->buffer_info = vzalloc(size);
1785         if (!tx_ring->buffer_info)
1786                 return -ENOMEM;
1787
1788         tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1789
1790         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1791                                            &tx_ring->dma, GFP_KERNEL);
1792         if (!tx_ring->desc) {
1793                 vfree(tx_ring->buffer_info);
1794                 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1795                 return -ENOMEM;
1796         }
1797         memset(tx_ring->desc, 0, tx_ring->size);
1798
1799         tx_ring->next_to_use = 0;
1800         tx_ring->next_to_clean = 0;
1801         spin_lock_init(&tx_ring->tx_lock);
1802
1803         for (desNo = 0; desNo < tx_ring->count; desNo++) {
1804                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1805                 tx_desc->gbec_status = DSC_INIT16;
1806         }
1807         pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
1808                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1809                  tx_ring->desc, (unsigned long long)tx_ring->dma,
1810                  tx_ring->next_to_clean, tx_ring->next_to_use);
1811         return 0;
1812 }
1813
1814 /**
1815  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1816  * @adapter:  Board private structure
1817  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1818  * Returns
1819  *      0:              Successfully
1820  *      Negative value: Failed
1821  */
1822 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1823                                 struct pch_gbe_rx_ring *rx_ring)
1824 {
1825         struct pci_dev *pdev = adapter->pdev;
1826         struct pch_gbe_rx_desc *rx_desc;
1827         int size;
1828         int desNo;
1829
1830         size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1831         rx_ring->buffer_info = vzalloc(size);
1832         if (!rx_ring->buffer_info)
1833                 return -ENOMEM;
1834
1835         rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1836         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1837                                            &rx_ring->dma, GFP_KERNEL);
1838
1839         if (!rx_ring->desc) {
1840                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1841                 vfree(rx_ring->buffer_info);
1842                 return -ENOMEM;
1843         }
1844         memset(rx_ring->desc, 0, rx_ring->size);
1845         rx_ring->next_to_clean = 0;
1846         rx_ring->next_to_use = 0;
1847         for (desNo = 0; desNo < rx_ring->count; desNo++) {
1848                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1849                 rx_desc->gbec_status = DSC_INIT16;
1850         }
1851         pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
1852                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1853                  rx_ring->desc, (unsigned long long)rx_ring->dma,
1854                  rx_ring->next_to_clean, rx_ring->next_to_use);
1855         return 0;
1856 }
1857
1858 /**
1859  * pch_gbe_free_tx_resources - Free Tx Resources
1860  * @adapter:  Board private structure
1861  * @tx_ring:  Tx descriptor ring for a specific queue
1862  */
1863 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1864                                 struct pch_gbe_tx_ring *tx_ring)
1865 {
1866         struct pci_dev *pdev = adapter->pdev;
1867
1868         pch_gbe_clean_tx_ring(adapter, tx_ring);
1869         vfree(tx_ring->buffer_info);
1870         tx_ring->buffer_info = NULL;
1871         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1872         tx_ring->desc = NULL;
1873 }
1874
1875 /**
1876  * pch_gbe_free_rx_resources - Free Rx Resources
1877  * @adapter:  Board private structure
1878  * @rx_ring:  Ring to clean the resources from
1879  */
1880 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1881                                 struct pch_gbe_rx_ring *rx_ring)
1882 {
1883         struct pci_dev *pdev = adapter->pdev;
1884
1885         pch_gbe_clean_rx_ring(adapter, rx_ring);
1886         vfree(rx_ring->buffer_info);
1887         rx_ring->buffer_info = NULL;
1888         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1889         rx_ring->desc = NULL;
1890 }
1891
1892 /**
1893  * pch_gbe_request_irq - Allocate an interrupt line
1894  * @adapter:  Board private structure
1895  * Returns
1896  *      0:              Successfully
1897  *      Negative value: Failed
1898  */
1899 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1900 {
1901         struct net_device *netdev = adapter->netdev;
1902         int err;
1903         int flags;
1904
1905         flags = IRQF_SHARED;
1906         adapter->have_msi = false;
1907         err = pci_enable_msi(adapter->pdev);
1908         pr_debug("call pci_enable_msi\n");
1909         if (err) {
1910                 pr_debug("call pci_enable_msi - Error: %d\n", err);
1911         } else {
1912                 flags = 0;
1913                 adapter->have_msi = true;
1914         }
1915         err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1916                           flags, netdev->name, netdev);
1917         if (err)
1918                 pr_err("Unable to allocate interrupt Error: %d\n", err);
1919         pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
1920                  adapter->have_msi, flags, err);
1921         return err;
1922 }
1923
1924
1925 static void pch_gbe_set_multi(struct net_device *netdev);
1926 /**
1927  * pch_gbe_up - Up GbE network device
1928  * @adapter:  Board private structure
1929  * Returns
1930  *      0:              Successfully
1931  *      Negative value: Failed
1932  */
1933 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1934 {
1935         struct net_device *netdev = adapter->netdev;
1936         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1937         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1938         int err;
1939
1940         /* Ensure we have a valid MAC */
1941         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1942                 pr_err("Error: Invalid MAC address\n");
1943                 return -EINVAL;
1944         }
1945
1946         /* hardware has been reset, we need to reload some things */
1947         pch_gbe_set_multi(netdev);
1948
1949         pch_gbe_setup_tctl(adapter);
1950         pch_gbe_configure_tx(adapter);
1951         pch_gbe_setup_rctl(adapter);
1952         pch_gbe_configure_rx(adapter);
1953
1954         err = pch_gbe_request_irq(adapter);
1955         if (err) {
1956                 pr_err("Error: can't bring device up\n");
1957                 return err;
1958         }
1959         err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1960         if (err) {
1961                 pr_err("Error: can't bring device up\n");
1962                 return err;
1963         }
1964         pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1965         pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1966         adapter->tx_queue_len = netdev->tx_queue_len;
1967         pch_gbe_start_receive(&adapter->hw);
1968
1969         mod_timer(&adapter->watchdog_timer, jiffies);
1970
1971         napi_enable(&adapter->napi);
1972         pch_gbe_irq_enable(adapter);
1973         netif_start_queue(adapter->netdev);
1974
1975         return 0;
1976 }
1977
1978 /**
1979  * pch_gbe_down - Down GbE network device
1980  * @adapter:  Board private structure
1981  */
1982 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1983 {
1984         struct net_device *netdev = adapter->netdev;
1985         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1986
1987         /* signal that we're down so the interrupt handler does not
1988          * reschedule our watchdog timer */
1989         napi_disable(&adapter->napi);
1990         atomic_set(&adapter->irq_sem, 0);
1991
1992         pch_gbe_irq_disable(adapter);
1993         pch_gbe_free_irq(adapter);
1994
1995         del_timer_sync(&adapter->watchdog_timer);
1996
1997         netdev->tx_queue_len = adapter->tx_queue_len;
1998         netif_carrier_off(netdev);
1999         netif_stop_queue(netdev);
2000
2001         pch_gbe_reset(adapter);
2002         pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
2003         pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
2004
2005         pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
2006                             rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2007         rx_ring->rx_buff_pool_logic = 0;
2008         rx_ring->rx_buff_pool_size = 0;
2009         rx_ring->rx_buff_pool = NULL;
2010 }
2011
2012 /**
2013  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2014  * @adapter:  Board private structure to initialize
2015  * Returns
2016  *      0:              Successfully
2017  *      Negative value: Failed
2018  */
2019 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2020 {
2021         struct pch_gbe_hw *hw = &adapter->hw;
2022         struct net_device *netdev = adapter->netdev;
2023
2024         adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2025         hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2026         hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2027
2028         /* Initialize the hardware-specific values */
2029         if (pch_gbe_hal_setup_init_funcs(hw)) {
2030                 pr_err("Hardware Initialization Failure\n");
2031                 return -EIO;
2032         }
2033         if (pch_gbe_alloc_queues(adapter)) {
2034                 pr_err("Unable to allocate memory for queues\n");
2035                 return -ENOMEM;
2036         }
2037         spin_lock_init(&adapter->hw.miim_lock);
2038         spin_lock_init(&adapter->stats_lock);
2039         spin_lock_init(&adapter->ethtool_lock);
2040         atomic_set(&adapter->irq_sem, 0);
2041         pch_gbe_irq_disable(adapter);
2042
2043         pch_gbe_init_stats(adapter);
2044
2045         pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
2046                  (u32) adapter->rx_buffer_len,
2047                  hw->mac.min_frame_size, hw->mac.max_frame_size);
2048         return 0;
2049 }
2050
2051 /**
2052  * pch_gbe_open - Called when a network interface is made active
2053  * @netdev:     Network interface device structure
2054  * Returns
2055  *      0:              Successfully
2056  *      Negative value: Failed
2057  */
2058 static int pch_gbe_open(struct net_device *netdev)
2059 {
2060         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2061         struct pch_gbe_hw *hw = &adapter->hw;
2062         int err;
2063
2064         /* allocate transmit descriptors */
2065         err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2066         if (err)
2067                 goto err_setup_tx;
2068         /* allocate receive descriptors */
2069         err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2070         if (err)
2071                 goto err_setup_rx;
2072         pch_gbe_hal_power_up_phy(hw);
2073         err = pch_gbe_up(adapter);
2074         if (err)
2075                 goto err_up;
2076         pr_debug("Success End\n");
2077         return 0;
2078
2079 err_up:
2080         if (!adapter->wake_up_evt)
2081                 pch_gbe_hal_power_down_phy(hw);
2082         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2083 err_setup_rx:
2084         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2085 err_setup_tx:
2086         pch_gbe_reset(adapter);
2087         pr_err("Error End\n");
2088         return err;
2089 }
2090
2091 /**
2092  * pch_gbe_stop - Disables a network interface
2093  * @netdev:  Network interface device structure
2094  * Returns
2095  *      0: Successfully
2096  */
2097 static int pch_gbe_stop(struct net_device *netdev)
2098 {
2099         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2100         struct pch_gbe_hw *hw = &adapter->hw;
2101
2102         pch_gbe_down(adapter);
2103         if (!adapter->wake_up_evt)
2104                 pch_gbe_hal_power_down_phy(hw);
2105         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2106         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2107         return 0;
2108 }
2109
2110 /**
2111  * pch_gbe_xmit_frame - Packet transmitting start
2112  * @skb:     Socket buffer structure
2113  * @netdev:  Network interface device structure
2114  * Returns
2115  *      - NETDEV_TX_OK:   Normal end
2116  *      - NETDEV_TX_BUSY: Error end
2117  */
2118 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2119 {
2120         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2121         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2122         unsigned long flags;
2123
2124         if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
2125                 pr_err("Transfer length Error: skb len: %d > max: %d\n",
2126                        skb->len, adapter->hw.mac.max_frame_size);
2127                 dev_kfree_skb_any(skb);
2128                 adapter->stats.tx_length_errors++;
2129                 return NETDEV_TX_OK;
2130         }
2131         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2132                 /* Collision - tell upper layer to requeue */
2133                 return NETDEV_TX_LOCKED;
2134         }
2135         if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2136                 netif_stop_queue(netdev);
2137                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2138                 pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
2139                          tx_ring->next_to_use, tx_ring->next_to_clean);
2140                 return NETDEV_TX_BUSY;
2141         }
2142
2143         /* CRC,ITAG no support */
2144         pch_gbe_tx_queue(adapter, tx_ring, skb);
2145         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2146         return NETDEV_TX_OK;
2147 }
2148
2149 /**
2150  * pch_gbe_get_stats - Get System Network Statistics
2151  * @netdev:  Network interface device structure
2152  * Returns:  The current stats
2153  */
2154 static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2155 {
2156         /* only return the current stats */
2157         return &netdev->stats;
2158 }
2159
2160 /**
2161  * pch_gbe_set_multi - Multicast and Promiscuous mode set
2162  * @netdev:   Network interface device structure
2163  */
2164 static void pch_gbe_set_multi(struct net_device *netdev)
2165 {
2166         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2167         struct pch_gbe_hw *hw = &adapter->hw;
2168         struct netdev_hw_addr *ha;
2169         u8 *mta_list;
2170         u32 rctl;
2171         int i;
2172         int mc_count;
2173
2174         pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2175
2176         /* Check for Promiscuous and All Multicast modes */
2177         rctl = ioread32(&hw->reg->RX_MODE);
2178         mc_count = netdev_mc_count(netdev);
2179         if ((netdev->flags & IFF_PROMISC)) {
2180                 rctl &= ~PCH_GBE_ADD_FIL_EN;
2181                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2182         } else if ((netdev->flags & IFF_ALLMULTI)) {
2183                 /* all the multicasting receive permissions */
2184                 rctl |= PCH_GBE_ADD_FIL_EN;
2185                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2186         } else {
2187                 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2188                         /* all the multicasting receive permissions */
2189                         rctl |= PCH_GBE_ADD_FIL_EN;
2190                         rctl &= ~PCH_GBE_MLT_FIL_EN;
2191                 } else {
2192                         rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2193                 }
2194         }
2195         iowrite32(rctl, &hw->reg->RX_MODE);
2196
2197         if (mc_count >= PCH_GBE_MAR_ENTRIES)
2198                 return;
2199         mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2200         if (!mta_list)
2201                 return;
2202
2203         /* The shared function expects a packed array of only addresses. */
2204         i = 0;
2205         netdev_for_each_mc_addr(ha, netdev) {
2206                 if (i == mc_count)
2207                         break;
2208                 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2209         }
2210         pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2211                                         PCH_GBE_MAR_ENTRIES);
2212         kfree(mta_list);
2213
2214         pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
2215                  ioread32(&hw->reg->RX_MODE), mc_count);
2216 }
2217
2218 /**
2219  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2220  * @netdev: Network interface device structure
2221  * @addr:   Pointer to an address structure
2222  * Returns
2223  *      0:              Successfully
2224  *      -EADDRNOTAVAIL: Failed
2225  */
2226 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2227 {
2228         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2229         struct sockaddr *skaddr = addr;
2230         int ret_val;
2231
2232         if (!is_valid_ether_addr(skaddr->sa_data)) {
2233                 ret_val = -EADDRNOTAVAIL;
2234         } else {
2235                 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2236                 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2237                 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2238                 ret_val = 0;
2239         }
2240         pr_debug("ret_val : 0x%08x\n", ret_val);
2241         pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2242         pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2243         pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2244                  ioread32(&adapter->hw.reg->mac_adr[0].high),
2245                  ioread32(&adapter->hw.reg->mac_adr[0].low));
2246         return ret_val;
2247 }
2248
2249 /**
2250  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2251  * @netdev:   Network interface device structure
2252  * @new_mtu:  New value for maximum frame size
2253  * Returns
2254  *      0:              Successfully
2255  *      -EINVAL:        Failed
2256  */
2257 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2258 {
2259         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2260         int max_frame;
2261         unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2262         int err;
2263
2264         max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2265         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2266                 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2267                 pr_err("Invalid MTU setting\n");
2268                 return -EINVAL;
2269         }
2270         if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2271                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2272         else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2273                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2274         else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2275                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2276         else
2277                 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2278
2279         if (netif_running(netdev)) {
2280                 pch_gbe_down(adapter);
2281                 err = pch_gbe_up(adapter);
2282                 if (err) {
2283                         adapter->rx_buffer_len = old_rx_buffer_len;
2284                         pch_gbe_up(adapter);
2285                         return -ENOMEM;
2286                 } else {
2287                         netdev->mtu = new_mtu;
2288                         adapter->hw.mac.max_frame_size = max_frame;
2289                 }
2290         } else {
2291                 pch_gbe_reset(adapter);
2292                 netdev->mtu = new_mtu;
2293                 adapter->hw.mac.max_frame_size = max_frame;
2294         }
2295
2296         pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2297                  max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2298                  adapter->hw.mac.max_frame_size);
2299         return 0;
2300 }
2301
2302 /**
2303  * pch_gbe_set_features - Reset device after features changed
2304  * @netdev:   Network interface device structure
2305  * @features:  New features
2306  * Returns
2307  *      0:              HW state updated successfully
2308  */
2309 static int pch_gbe_set_features(struct net_device *netdev,
2310         netdev_features_t features)
2311 {
2312         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2313         netdev_features_t changed = features ^ netdev->features;
2314
2315         if (!(changed & NETIF_F_RXCSUM))
2316                 return 0;
2317
2318         if (netif_running(netdev))
2319                 pch_gbe_reinit_locked(adapter);
2320         else
2321                 pch_gbe_reset(adapter);
2322
2323         return 0;
2324 }
2325
2326 /**
2327  * pch_gbe_ioctl - Controls register through a MII interface
2328  * @netdev:   Network interface device structure
2329  * @ifr:      Pointer to ifr structure
2330  * @cmd:      Control command
2331  * Returns
2332  *      0:      Successfully
2333  *      Negative value: Failed
2334  */
2335 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2336 {
2337         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2338
2339         pr_debug("cmd : 0x%04x\n", cmd);
2340
2341 #ifdef CONFIG_PCH_PTP
2342         if (cmd == SIOCSHWTSTAMP)
2343                 return hwtstamp_ioctl(netdev, ifr, cmd);
2344 #endif
2345
2346         return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2347 }
2348
2349 /**
2350  * pch_gbe_tx_timeout - Respond to a Tx Hang
2351  * @netdev:   Network interface device structure
2352  */
2353 static void pch_gbe_tx_timeout(struct net_device *netdev)
2354 {
2355         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2356
2357         /* Do the reset outside of interrupt context */
2358         adapter->stats.tx_timeout_count++;
2359         schedule_work(&adapter->reset_task);
2360 }
2361
2362 /**
2363  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2364  * @napi:    Pointer of polling device struct
2365  * @budget:  The maximum number of a packet
2366  * Returns
2367  *      false:  Exit the polling mode
2368  *      true:   Continue the polling mode
2369  */
2370 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2371 {
2372         struct pch_gbe_adapter *adapter =
2373             container_of(napi, struct pch_gbe_adapter, napi);
2374         int work_done = 0;
2375         bool poll_end_flag = false;
2376         bool cleaned = false;
2377         u32 int_en;
2378
2379         pr_debug("budget : %d\n", budget);
2380
2381         pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2382         cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2383
2384         if (!cleaned)
2385                 work_done = budget;
2386         /* If no Tx and not enough Rx work done,
2387          * exit the polling mode
2388          */
2389         if (work_done < budget)
2390                 poll_end_flag = true;
2391
2392         if (poll_end_flag) {
2393                 napi_complete(napi);
2394                 if (adapter->rx_stop_flag) {
2395                         adapter->rx_stop_flag = false;
2396                         pch_gbe_start_receive(&adapter->hw);
2397                 }
2398                 pch_gbe_irq_enable(adapter);
2399         } else
2400                 if (adapter->rx_stop_flag) {
2401                         adapter->rx_stop_flag = false;
2402                         pch_gbe_start_receive(&adapter->hw);
2403                         int_en = ioread32(&adapter->hw.reg->INT_EN);
2404                         iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2405                                 &adapter->hw.reg->INT_EN);
2406                 }
2407
2408         pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
2409                  poll_end_flag, work_done, budget);
2410
2411         return work_done;
2412 }
2413
2414 #ifdef CONFIG_NET_POLL_CONTROLLER
2415 /**
2416  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2417  * @netdev:  Network interface device structure
2418  */
2419 static void pch_gbe_netpoll(struct net_device *netdev)
2420 {
2421         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2422
2423         disable_irq(adapter->pdev->irq);
2424         pch_gbe_intr(adapter->pdev->irq, netdev);
2425         enable_irq(adapter->pdev->irq);
2426 }
2427 #endif
2428
2429 static const struct net_device_ops pch_gbe_netdev_ops = {
2430         .ndo_open = pch_gbe_open,
2431         .ndo_stop = pch_gbe_stop,
2432         .ndo_start_xmit = pch_gbe_xmit_frame,
2433         .ndo_get_stats = pch_gbe_get_stats,
2434         .ndo_set_mac_address = pch_gbe_set_mac,
2435         .ndo_tx_timeout = pch_gbe_tx_timeout,
2436         .ndo_change_mtu = pch_gbe_change_mtu,
2437         .ndo_set_features = pch_gbe_set_features,
2438         .ndo_do_ioctl = pch_gbe_ioctl,
2439         .ndo_set_rx_mode = pch_gbe_set_multi,
2440 #ifdef CONFIG_NET_POLL_CONTROLLER
2441         .ndo_poll_controller = pch_gbe_netpoll,
2442 #endif
2443 };
2444
2445 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2446                                                 pci_channel_state_t state)
2447 {
2448         struct net_device *netdev = pci_get_drvdata(pdev);
2449         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2450
2451         netif_device_detach(netdev);
2452         if (netif_running(netdev))
2453                 pch_gbe_down(adapter);
2454         pci_disable_device(pdev);
2455         /* Request a slot slot reset. */
2456         return PCI_ERS_RESULT_NEED_RESET;
2457 }
2458
2459 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2460 {
2461         struct net_device *netdev = pci_get_drvdata(pdev);
2462         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2463         struct pch_gbe_hw *hw = &adapter->hw;
2464
2465         if (pci_enable_device(pdev)) {
2466                 pr_err("Cannot re-enable PCI device after reset\n");
2467                 return PCI_ERS_RESULT_DISCONNECT;
2468         }
2469         pci_set_master(pdev);
2470         pci_enable_wake(pdev, PCI_D0, 0);
2471         pch_gbe_hal_power_up_phy(hw);
2472         pch_gbe_reset(adapter);
2473         /* Clear wake up status */
2474         pch_gbe_mac_set_wol_event(hw, 0);
2475
2476         return PCI_ERS_RESULT_RECOVERED;
2477 }
2478
2479 static void pch_gbe_io_resume(struct pci_dev *pdev)
2480 {
2481         struct net_device *netdev = pci_get_drvdata(pdev);
2482         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2483
2484         if (netif_running(netdev)) {
2485                 if (pch_gbe_up(adapter)) {
2486                         pr_debug("can't bring device back up after reset\n");
2487                         return;
2488                 }
2489         }
2490         netif_device_attach(netdev);
2491 }
2492
2493 static int __pch_gbe_suspend(struct pci_dev *pdev)
2494 {
2495         struct net_device *netdev = pci_get_drvdata(pdev);
2496         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2497         struct pch_gbe_hw *hw = &adapter->hw;
2498         u32 wufc = adapter->wake_up_evt;
2499         int retval = 0;
2500
2501         netif_device_detach(netdev);
2502         if (netif_running(netdev))
2503                 pch_gbe_down(adapter);
2504         if (wufc) {
2505                 pch_gbe_set_multi(netdev);
2506                 pch_gbe_setup_rctl(adapter);
2507                 pch_gbe_configure_rx(adapter);
2508                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2509                                         hw->mac.link_duplex);
2510                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2511                                         hw->mac.link_duplex);
2512                 pch_gbe_mac_set_wol_event(hw, wufc);
2513                 pci_disable_device(pdev);
2514         } else {
2515                 pch_gbe_hal_power_down_phy(hw);
2516                 pch_gbe_mac_set_wol_event(hw, wufc);
2517                 pci_disable_device(pdev);
2518         }
2519         return retval;
2520 }
2521
2522 #ifdef CONFIG_PM
2523 static int pch_gbe_suspend(struct device *device)
2524 {
2525         struct pci_dev *pdev = to_pci_dev(device);
2526
2527         return __pch_gbe_suspend(pdev);
2528 }
2529
2530 static int pch_gbe_resume(struct device *device)
2531 {
2532         struct pci_dev *pdev = to_pci_dev(device);
2533         struct net_device *netdev = pci_get_drvdata(pdev);
2534         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2535         struct pch_gbe_hw *hw = &adapter->hw;
2536         u32 err;
2537
2538         err = pci_enable_device(pdev);
2539         if (err) {
2540                 pr_err("Cannot enable PCI device from suspend\n");
2541                 return err;
2542         }
2543         pci_set_master(pdev);
2544         pch_gbe_hal_power_up_phy(hw);
2545         pch_gbe_reset(adapter);
2546         /* Clear wake on lan control and status */
2547         pch_gbe_mac_set_wol_event(hw, 0);
2548
2549         if (netif_running(netdev))
2550                 pch_gbe_up(adapter);
2551         netif_device_attach(netdev);
2552
2553         return 0;
2554 }
2555 #endif /* CONFIG_PM */
2556
2557 static void pch_gbe_shutdown(struct pci_dev *pdev)
2558 {
2559         __pch_gbe_suspend(pdev);
2560         if (system_state == SYSTEM_POWER_OFF) {
2561                 pci_wake_from_d3(pdev, true);
2562                 pci_set_power_state(pdev, PCI_D3hot);
2563         }
2564 }
2565
2566 static void pch_gbe_remove(struct pci_dev *pdev)
2567 {
2568         struct net_device *netdev = pci_get_drvdata(pdev);
2569         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2570
2571         cancel_work_sync(&adapter->reset_task);
2572         unregister_netdev(netdev);
2573
2574         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2575
2576         kfree(adapter->tx_ring);
2577         kfree(adapter->rx_ring);
2578
2579         iounmap(adapter->hw.reg);
2580         pci_release_regions(pdev);
2581         free_netdev(netdev);
2582         pci_disable_device(pdev);
2583 }
2584
2585 static int pch_gbe_probe(struct pci_dev *pdev,
2586                           const struct pci_device_id *pci_id)
2587 {
2588         struct net_device *netdev;
2589         struct pch_gbe_adapter *adapter;
2590         int ret;
2591
2592         ret = pci_enable_device(pdev);
2593         if (ret)
2594                 return ret;
2595
2596         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2597                 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2598                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2599                 if (ret) {
2600                         ret = pci_set_consistent_dma_mask(pdev,
2601                                                           DMA_BIT_MASK(32));
2602                         if (ret) {
2603                                 dev_err(&pdev->dev, "ERR: No usable DMA "
2604                                         "configuration, aborting\n");
2605                                 goto err_disable_device;
2606                         }
2607                 }
2608         }
2609
2610         ret = pci_request_regions(pdev, KBUILD_MODNAME);
2611         if (ret) {
2612                 dev_err(&pdev->dev,
2613                         "ERR: Can't reserve PCI I/O and memory resources\n");
2614                 goto err_disable_device;
2615         }
2616         pci_set_master(pdev);
2617
2618         netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2619         if (!netdev) {
2620                 ret = -ENOMEM;
2621                 goto err_release_pci;
2622         }
2623         SET_NETDEV_DEV(netdev, &pdev->dev);
2624
2625         pci_set_drvdata(pdev, netdev);
2626         adapter = netdev_priv(netdev);
2627         adapter->netdev = netdev;
2628         adapter->pdev = pdev;
2629         adapter->hw.back = adapter;
2630         adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2631         if (!adapter->hw.reg) {
2632                 ret = -EIO;
2633                 dev_err(&pdev->dev, "Can't ioremap\n");
2634                 goto err_free_netdev;
2635         }
2636
2637 #ifdef CONFIG_PCH_PTP
2638         adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2639                                                PCI_DEVFN(12, 4));
2640         if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2641                 pr_err("Bad ptp filter\n");
2642                 return -EINVAL;
2643         }
2644 #endif
2645
2646         netdev->netdev_ops = &pch_gbe_netdev_ops;
2647         netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2648         netif_napi_add(netdev, &adapter->napi,
2649                        pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2650         netdev->hw_features = NETIF_F_RXCSUM |
2651                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2652         netdev->features = netdev->hw_features;
2653         pch_gbe_set_ethtool_ops(netdev);
2654
2655         pch_gbe_mac_load_mac_addr(&adapter->hw);
2656         pch_gbe_mac_reset_hw(&adapter->hw);
2657
2658         /* setup the private structure */
2659         ret = pch_gbe_sw_init(adapter);
2660         if (ret)
2661                 goto err_iounmap;
2662
2663         /* Initialize PHY */
2664         ret = pch_gbe_init_phy(adapter);
2665         if (ret) {
2666                 dev_err(&pdev->dev, "PHY initialize error\n");
2667                 goto err_free_adapter;
2668         }
2669         pch_gbe_hal_get_bus_info(&adapter->hw);
2670
2671         /* Read the MAC address. and store to the private data */
2672         ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2673         if (ret) {
2674                 dev_err(&pdev->dev, "MAC address Read Error\n");
2675                 goto err_free_adapter;
2676         }
2677
2678         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2679         if (!is_valid_ether_addr(netdev->dev_addr)) {
2680                 /*
2681                  * If the MAC is invalid (or just missing), display a warning
2682                  * but do not abort setting up the device. pch_gbe_up will
2683                  * prevent the interface from being brought up until a valid MAC
2684                  * is set.
2685                  */
2686                 dev_err(&pdev->dev, "Invalid MAC address, "
2687                                     "interface disabled.\n");
2688         }
2689         setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2690                     (unsigned long)adapter);
2691
2692         INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2693
2694         pch_gbe_check_options(adapter);
2695
2696         /* initialize the wol settings based on the eeprom settings */
2697         adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2698         dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2699
2700         /* reset the hardware with the new settings */
2701         pch_gbe_reset(adapter);
2702
2703         ret = register_netdev(netdev);
2704         if (ret)
2705                 goto err_free_adapter;
2706         /* tell the stack to leave us alone until pch_gbe_open() is called */
2707         netif_carrier_off(netdev);
2708         netif_stop_queue(netdev);
2709
2710         dev_dbg(&pdev->dev, "PCH Network Connection\n");
2711
2712         device_set_wakeup_enable(&pdev->dev, 1);
2713         return 0;
2714
2715 err_free_adapter:
2716         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2717         kfree(adapter->tx_ring);
2718         kfree(adapter->rx_ring);
2719 err_iounmap:
2720         iounmap(adapter->hw.reg);
2721 err_free_netdev:
2722         free_netdev(netdev);
2723 err_release_pci:
2724         pci_release_regions(pdev);
2725 err_disable_device:
2726         pci_disable_device(pdev);
2727         return ret;
2728 }
2729
2730 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2731         {.vendor = PCI_VENDOR_ID_INTEL,
2732          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2733          .subvendor = PCI_ANY_ID,
2734          .subdevice = PCI_ANY_ID,
2735          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2736          .class_mask = (0xFFFF00)
2737          },
2738         {.vendor = PCI_VENDOR_ID_ROHM,
2739          .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2740          .subvendor = PCI_ANY_ID,
2741          .subdevice = PCI_ANY_ID,
2742          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2743          .class_mask = (0xFFFF00)
2744          },
2745         {.vendor = PCI_VENDOR_ID_ROHM,
2746          .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2747          .subvendor = PCI_ANY_ID,
2748          .subdevice = PCI_ANY_ID,
2749          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2750          .class_mask = (0xFFFF00)
2751          },
2752         /* required last entry */
2753         {0}
2754 };
2755
2756 #ifdef CONFIG_PM
2757 static const struct dev_pm_ops pch_gbe_pm_ops = {
2758         .suspend = pch_gbe_suspend,
2759         .resume = pch_gbe_resume,
2760         .freeze = pch_gbe_suspend,
2761         .thaw = pch_gbe_resume,
2762         .poweroff = pch_gbe_suspend,
2763         .restore = pch_gbe_resume,
2764 };
2765 #endif
2766
2767 static struct pci_error_handlers pch_gbe_err_handler = {
2768         .error_detected = pch_gbe_io_error_detected,
2769         .slot_reset = pch_gbe_io_slot_reset,
2770         .resume = pch_gbe_io_resume
2771 };
2772
2773 static struct pci_driver pch_gbe_driver = {
2774         .name = KBUILD_MODNAME,
2775         .id_table = pch_gbe_pcidev_id,
2776         .probe = pch_gbe_probe,
2777         .remove = pch_gbe_remove,
2778 #ifdef CONFIG_PM
2779         .driver.pm = &pch_gbe_pm_ops,
2780 #endif
2781         .shutdown = pch_gbe_shutdown,
2782         .err_handler = &pch_gbe_err_handler
2783 };
2784
2785
2786 static int __init pch_gbe_init_module(void)
2787 {
2788         int ret;
2789
2790         ret = pci_register_driver(&pch_gbe_driver);
2791         if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2792                 if (copybreak == 0) {
2793                         pr_info("copybreak disabled\n");
2794                 } else {
2795                         pr_info("copybreak enabled for packets <= %u bytes\n",
2796                                 copybreak);
2797                 }
2798         }
2799         return ret;
2800 }
2801
2802 static void __exit pch_gbe_exit_module(void)
2803 {
2804         pci_unregister_driver(&pch_gbe_driver);
2805 }
2806
2807 module_init(pch_gbe_init_module);
2808 module_exit(pch_gbe_exit_module);
2809
2810 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2811 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2812 MODULE_LICENSE("GPL");
2813 MODULE_VERSION(DRV_VERSION);
2814 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2815
2816 module_param(copybreak, uint, 0644);
2817 MODULE_PARM_DESC(copybreak,
2818         "Maximum size of packet that is copied to a new buffer on receive");
2819
2820 /* pch_gbe_main.c */