8035e5ff6e060d4c208208857159c7e32c0484f0
[linux-flexiantxendom0-3.2.10.git] / drivers / net / ethernet / oki-semi / pch_gbe / pch_gbe_main.c
1 /*
2  * Copyright (C) 1999 - 2010 Intel Corporation.
3  * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
4  *
5  * This code was derived from the Intel e1000e Linux driver.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
19  */
20
21 #include "pch_gbe.h"
22 #include "pch_gbe_api.h"
23 #include <linux/module.h>
24 #ifdef CONFIG_PCH_PTP
25 #include <linux/net_tstamp.h>
26 #include <linux/ptp_classify.h>
27 #endif
28
29 #define DRV_VERSION     "1.00"
30 const char pch_driver_version[] = DRV_VERSION;
31
32 #define PCI_DEVICE_ID_INTEL_IOH1_GBE    0x8802          /* Pci device ID */
33 #define PCH_GBE_MAR_ENTRIES             16
34 #define PCH_GBE_SHORT_PKT               64
35 #define DSC_INIT16                      0xC000
36 #define PCH_GBE_DMA_ALIGN               0
37 #define PCH_GBE_DMA_PADDING             2
38 #define PCH_GBE_WATCHDOG_PERIOD         (1 * HZ)        /* watchdog time */
39 #define PCH_GBE_COPYBREAK_DEFAULT       256
40 #define PCH_GBE_PCI_BAR                 1
41 #define PCH_GBE_RESERVE_MEMORY          0x200000        /* 2MB */
42
43 /* Macros for ML7223 */
44 #define PCI_VENDOR_ID_ROHM                      0x10db
45 #define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
46
47 /* Macros for ML7831 */
48 #define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
49
50 #define PCH_GBE_TX_WEIGHT         64
51 #define PCH_GBE_RX_WEIGHT         64
52 #define PCH_GBE_RX_BUFFER_WRITE   16
53
54 /* Initialize the wake-on-LAN settings */
55 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
56
57 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
58         PCH_GBE_CHIP_TYPE_INTERNAL | \
59         PCH_GBE_RGMII_MODE_RGMII     \
60         )
61
62 /* Ethertype field values */
63 #define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
64 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
65 #define PCH_GBE_FRAME_SIZE_2048         2048
66 #define PCH_GBE_FRAME_SIZE_4096         4096
67 #define PCH_GBE_FRAME_SIZE_8192         8192
68
69 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
70 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
71 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
72 #define PCH_GBE_DESC_UNUSED(R) \
73         ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
74         (R)->next_to_clean - (R)->next_to_use - 1)
75
76 /* Pause packet value */
77 #define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
78 #define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
79 #define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
80 #define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
81
82 #define PCH_GBE_ETH_ALEN            6
83
84 /* This defines the bits that are set in the Interrupt Mask
85  * Set/Read Register.  Each bit is documented below:
86  *   o RXT0   = Receiver Timer Interrupt (ring 0)
87  *   o TXDW   = Transmit Descriptor Written Back
88  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
89  *   o RXSEQ  = Receive Sequence Error
90  *   o LSC    = Link Status Change
91  */
92 #define PCH_GBE_INT_ENABLE_MASK ( \
93         PCH_GBE_INT_RX_DMA_CMPLT |    \
94         PCH_GBE_INT_RX_DSC_EMP   |    \
95         PCH_GBE_INT_RX_FIFO_ERR  |    \
96         PCH_GBE_INT_WOL_DET      |    \
97         PCH_GBE_INT_TX_CMPLT          \
98         )
99
100 #define PCH_GBE_INT_DISABLE_ALL         0
101
102 #ifdef CONFIG_PCH_PTP
103 /* Macros for ieee1588 */
104 #define TICKS_NS_SHIFT  5
105
106 /* 0x40 Time Synchronization Channel Control Register Bits */
107 #define MASTER_MODE   (1<<0)
108 #define SLAVE_MODE    (0<<0)
109 #define V2_MODE       (1<<31)
110 #define CAP_MODE0     (0<<16)
111 #define CAP_MODE2     (1<<17)
112
113 /* 0x44 Time Synchronization Channel Event Register Bits */
114 #define TX_SNAPSHOT_LOCKED (1<<0)
115 #define RX_SNAPSHOT_LOCKED (1<<1)
116 #endif
117
118 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
119
120 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
121 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122                                int data);
123
124 #ifdef CONFIG_PCH_PTP
125 static struct sock_filter ptp_filter[] = {
126         PTP_FILTER
127 };
128
129 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
130 {
131         u8 *data = skb->data;
132         unsigned int offset;
133         u16 *hi, *id;
134         u32 lo;
135
136         if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
137                 (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
138                 return 0;
139         }
140
141         offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
142
143         if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
144                 return 0;
145
146         hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
147         id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
148
149         memcpy(&lo, &hi[1], sizeof(lo));
150
151         return (uid_hi == *hi &&
152                 uid_lo == lo &&
153                 seqid  == *id);
154 }
155
156 static void pch_rx_timestamp(
157                         struct pch_gbe_adapter *adapter, struct sk_buff *skb)
158 {
159         struct skb_shared_hwtstamps *shhwtstamps;
160         struct pci_dev *pdev;
161         u64 ns;
162         u32 hi, lo, val;
163         u16 uid, seq;
164
165         if (!adapter->hwts_rx_en)
166                 return;
167
168         /* Get ieee1588's dev information */
169         pdev = adapter->ptp_pdev;
170
171         val = pch_ch_event_read(pdev);
172
173         if (!(val & RX_SNAPSHOT_LOCKED))
174                 return;
175
176         lo = pch_src_uuid_lo_read(pdev);
177         hi = pch_src_uuid_hi_read(pdev);
178
179         uid = hi & 0xffff;
180         seq = (hi >> 16) & 0xffff;
181
182         if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
183                 goto out;
184
185         ns = pch_rx_snap_read(pdev);
186         ns <<= TICKS_NS_SHIFT;
187
188         shhwtstamps = skb_hwtstamps(skb);
189         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
190         shhwtstamps->hwtstamp = ns_to_ktime(ns);
191 out:
192         pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
193 }
194
195 static void pch_tx_timestamp(
196                         struct pch_gbe_adapter *adapter, struct sk_buff *skb)
197 {
198         struct skb_shared_hwtstamps shhwtstamps;
199         struct pci_dev *pdev;
200         struct skb_shared_info *shtx;
201         u64 ns;
202         u32 cnt, val;
203
204         shtx = skb_shinfo(skb);
205         if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
206                 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207         else
208                 return;
209
210         /* Get ieee1588's dev information */
211         pdev = adapter->ptp_pdev;
212
213         /*
214          * This really stinks, but we have to poll for the Tx time stamp.
215          * Usually, the time stamp is ready after 4 to 6 microseconds.
216          */
217         for (cnt = 0; cnt < 100; cnt++) {
218                 val = pch_ch_event_read(pdev);
219                 if (val & TX_SNAPSHOT_LOCKED)
220                         break;
221                 udelay(1);
222         }
223         if (!(val & TX_SNAPSHOT_LOCKED)) {
224                 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
225                 return;
226         }
227
228         ns = pch_tx_snap_read(pdev);
229         ns <<= TICKS_NS_SHIFT;
230
231         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
232         shhwtstamps.hwtstamp = ns_to_ktime(ns);
233         skb_tstamp_tx(skb, &shhwtstamps);
234
235         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
236 }
237
238 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
239 {
240         struct hwtstamp_config cfg;
241         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
242         struct pci_dev *pdev;
243
244         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
245                 return -EFAULT;
246
247         if (cfg.flags) /* reserved for future extensions */
248                 return -EINVAL;
249
250         /* Get ieee1588's dev information */
251         pdev = adapter->ptp_pdev;
252
253         switch (cfg.tx_type) {
254         case HWTSTAMP_TX_OFF:
255                 adapter->hwts_tx_en = 0;
256                 break;
257         case HWTSTAMP_TX_ON:
258                 adapter->hwts_tx_en = 1;
259                 break;
260         default:
261                 return -ERANGE;
262         }
263
264         switch (cfg.rx_filter) {
265         case HWTSTAMP_FILTER_NONE:
266                 adapter->hwts_rx_en = 0;
267                 break;
268         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
269                 adapter->hwts_rx_en = 0;
270                 pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
271                 break;
272         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
273                 adapter->hwts_rx_en = 1;
274                 pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
275                 break;
276         case HWTSTAMP_FILTER_PTP_V2_EVENT:
277                 adapter->hwts_rx_en = 1;
278                 pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
279                 break;
280         default:
281                 return -ERANGE;
282         }
283
284         /* Clear out any old time stamps. */
285         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
286
287         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
288 }
289 #endif
290
291 inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
292 {
293         iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
294 }
295
296 /**
297  * pch_gbe_mac_read_mac_addr - Read MAC address
298  * @hw:             Pointer to the HW structure
299  * Returns
300  *      0:                      Successful.
301  */
302 s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
303 {
304         u32  adr1a, adr1b;
305
306         adr1a = ioread32(&hw->reg->mac_adr[0].high);
307         adr1b = ioread32(&hw->reg->mac_adr[0].low);
308
309         hw->mac.addr[0] = (u8)(adr1a & 0xFF);
310         hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
311         hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
312         hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
313         hw->mac.addr[4] = (u8)(adr1b & 0xFF);
314         hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
315
316         pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
317         return 0;
318 }
319
320 /**
321  * pch_gbe_wait_clr_bit - Wait to clear a bit
322  * @reg:        Pointer of register
323  * @busy:       Busy bit
324  */
325 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
326 {
327         u32 tmp;
328         /* wait busy */
329         tmp = 1000;
330         while ((ioread32(reg) & bit) && --tmp)
331                 cpu_relax();
332         if (!tmp)
333                 pr_err("Error: busy bit is not cleared\n");
334 }
335
336 /**
337  * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
338  * @reg:        Pointer of register
339  * @busy:       Busy bit
340  */
341 static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
342 {
343         u32 tmp;
344         int ret = -1;
345         /* wait busy */
346         tmp = 20;
347         while ((ioread32(reg) & bit) && --tmp)
348                 udelay(5);
349         if (!tmp)
350                 pr_err("Error: busy bit is not cleared\n");
351         else
352                 ret = 0;
353         return ret;
354 }
355
356 /**
357  * pch_gbe_mac_mar_set - Set MAC address register
358  * @hw:     Pointer to the HW structure
359  * @addr:   Pointer to the MAC address
360  * @index:  MAC address array register
361  */
362 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
363 {
364         u32 mar_low, mar_high, adrmask;
365
366         pr_debug("index : 0x%x\n", index);
367
368         /*
369          * HW expects these in little endian so we reverse the byte order
370          * from network order (big endian) to little endian
371          */
372         mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
373                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
374         mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
375         /* Stop the MAC Address of index. */
376         adrmask = ioread32(&hw->reg->ADDR_MASK);
377         iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
378         /* wait busy */
379         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
380         /* Set the MAC address to the MAC address 1A/1B register */
381         iowrite32(mar_high, &hw->reg->mac_adr[index].high);
382         iowrite32(mar_low, &hw->reg->mac_adr[index].low);
383         /* Start the MAC address of index */
384         iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
385         /* wait busy */
386         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
387 }
388
389 /**
390  * pch_gbe_mac_reset_hw - Reset hardware
391  * @hw: Pointer to the HW structure
392  */
393 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
394 {
395         /* Read the MAC address. and store to the private data */
396         pch_gbe_mac_read_mac_addr(hw);
397         iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
398 #ifdef PCH_GBE_MAC_IFOP_RGMII
399         iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
400 #endif
401         pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
402         /* Setup the receive address */
403         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
404         return;
405 }
406
407 static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
408 {
409         /* Read the MAC address. and store to the private data */
410         pch_gbe_mac_read_mac_addr(hw);
411         iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
412         pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
413         /* Setup the MAC address */
414         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
415         return;
416 }
417
418 /**
419  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
420  * @hw: Pointer to the HW structure
421  * @mar_count: Receive address registers
422  */
423 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
424 {
425         u32 i;
426
427         /* Setup the receive address */
428         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
429
430         /* Zero out the other receive addresses */
431         for (i = 1; i < mar_count; i++) {
432                 iowrite32(0, &hw->reg->mac_adr[i].high);
433                 iowrite32(0, &hw->reg->mac_adr[i].low);
434         }
435         iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
436         /* wait busy */
437         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
438 }
439
440
441 /**
442  * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
443  * @hw:             Pointer to the HW structure
444  * @mc_addr_list:   Array of multicast addresses to program
445  * @mc_addr_count:  Number of multicast addresses to program
446  * @mar_used_count: The first MAC Address register free to program
447  * @mar_total_num:  Total number of supported MAC Address Registers
448  */
449 static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
450                                             u8 *mc_addr_list, u32 mc_addr_count,
451                                             u32 mar_used_count, u32 mar_total_num)
452 {
453         u32 i, adrmask;
454
455         /* Load the first set of multicast addresses into the exact
456          * filters (RAR).  If there are not enough to fill the RAR
457          * array, clear the filters.
458          */
459         for (i = mar_used_count; i < mar_total_num; i++) {
460                 if (mc_addr_count) {
461                         pch_gbe_mac_mar_set(hw, mc_addr_list, i);
462                         mc_addr_count--;
463                         mc_addr_list += PCH_GBE_ETH_ALEN;
464                 } else {
465                         /* Clear MAC address mask */
466                         adrmask = ioread32(&hw->reg->ADDR_MASK);
467                         iowrite32((adrmask | (0x0001 << i)),
468                                         &hw->reg->ADDR_MASK);
469                         /* wait busy */
470                         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
471                         /* Clear MAC address */
472                         iowrite32(0, &hw->reg->mac_adr[i].high);
473                         iowrite32(0, &hw->reg->mac_adr[i].low);
474                 }
475         }
476 }
477
478 /**
479  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
480  * @hw:             Pointer to the HW structure
481  * Returns
482  *      0:                      Successful.
483  *      Negative value:         Failed.
484  */
485 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
486 {
487         struct pch_gbe_mac_info *mac = &hw->mac;
488         u32 rx_fctrl;
489
490         pr_debug("mac->fc = %u\n", mac->fc);
491
492         rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
493
494         switch (mac->fc) {
495         case PCH_GBE_FC_NONE:
496                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
497                 mac->tx_fc_enable = false;
498                 break;
499         case PCH_GBE_FC_RX_PAUSE:
500                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
501                 mac->tx_fc_enable = false;
502                 break;
503         case PCH_GBE_FC_TX_PAUSE:
504                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
505                 mac->tx_fc_enable = true;
506                 break;
507         case PCH_GBE_FC_FULL:
508                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
509                 mac->tx_fc_enable = true;
510                 break;
511         default:
512                 pr_err("Flow control param set incorrectly\n");
513                 return -EINVAL;
514         }
515         if (mac->link_duplex == DUPLEX_HALF)
516                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
517         iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
518         pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
519                  ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
520         return 0;
521 }
522
523 /**
524  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
525  * @hw:     Pointer to the HW structure
526  * @wu_evt: Wake up event
527  */
528 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
529 {
530         u32 addr_mask;
531
532         pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
533                  wu_evt, ioread32(&hw->reg->ADDR_MASK));
534
535         if (wu_evt) {
536                 /* Set Wake-On-Lan address mask */
537                 addr_mask = ioread32(&hw->reg->ADDR_MASK);
538                 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
539                 /* wait busy */
540                 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
541                 iowrite32(0, &hw->reg->WOL_ST);
542                 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
543                 iowrite32(0x02, &hw->reg->TCPIP_ACC);
544                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
545         } else {
546                 iowrite32(0, &hw->reg->WOL_CTRL);
547                 iowrite32(0, &hw->reg->WOL_ST);
548         }
549         return;
550 }
551
552 /**
553  * pch_gbe_mac_ctrl_miim - Control MIIM interface
554  * @hw:   Pointer to the HW structure
555  * @addr: Address of PHY
556  * @dir:  Operetion. (Write or Read)
557  * @reg:  Access register of PHY
558  * @data: Write data.
559  *
560  * Returns: Read date.
561  */
562 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
563                         u16 data)
564 {
565         u32 data_out = 0;
566         unsigned int i;
567         unsigned long flags;
568
569         spin_lock_irqsave(&hw->miim_lock, flags);
570
571         for (i = 100; i; --i) {
572                 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
573                         break;
574                 udelay(20);
575         }
576         if (i == 0) {
577                 pr_err("pch-gbe.miim won't go Ready\n");
578                 spin_unlock_irqrestore(&hw->miim_lock, flags);
579                 return 0;       /* No way to indicate timeout error */
580         }
581         iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
582                   (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
583                   dir | data), &hw->reg->MIIM);
584         for (i = 0; i < 100; i++) {
585                 udelay(20);
586                 data_out = ioread32(&hw->reg->MIIM);
587                 if ((data_out & PCH_GBE_MIIM_OPER_READY))
588                         break;
589         }
590         spin_unlock_irqrestore(&hw->miim_lock, flags);
591
592         pr_debug("PHY %s: reg=%d, data=0x%04X\n",
593                  dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
594                  dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
595         return (u16) data_out;
596 }
597
598 /**
599  * pch_gbe_mac_set_pause_packet - Set pause packet
600  * @hw:   Pointer to the HW structure
601  */
602 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
603 {
604         unsigned long tmp2, tmp3;
605
606         /* Set Pause packet */
607         tmp2 = hw->mac.addr[1];
608         tmp2 = (tmp2 << 8) | hw->mac.addr[0];
609         tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
610
611         tmp3 = hw->mac.addr[5];
612         tmp3 = (tmp3 << 8) | hw->mac.addr[4];
613         tmp3 = (tmp3 << 8) | hw->mac.addr[3];
614         tmp3 = (tmp3 << 8) | hw->mac.addr[2];
615
616         iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
617         iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
618         iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
619         iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
620         iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
621
622         /* Transmit Pause Packet */
623         iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
624
625         pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
626                  ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
627                  ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
628                  ioread32(&hw->reg->PAUSE_PKT5));
629
630         return;
631 }
632
633
634 /**
635  * pch_gbe_alloc_queues - Allocate memory for all rings
636  * @adapter:  Board private structure to initialize
637  * Returns
638  *      0:      Successfully
639  *      Negative value: Failed
640  */
641 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
642 {
643         int size;
644
645         size = (int)sizeof(struct pch_gbe_tx_ring);
646         adapter->tx_ring = kzalloc(size, GFP_KERNEL);
647         if (!adapter->tx_ring)
648                 return -ENOMEM;
649         size = (int)sizeof(struct pch_gbe_rx_ring);
650         adapter->rx_ring = kzalloc(size, GFP_KERNEL);
651         if (!adapter->rx_ring) {
652                 kfree(adapter->tx_ring);
653                 return -ENOMEM;
654         }
655         return 0;
656 }
657
658 /**
659  * pch_gbe_init_stats - Initialize status
660  * @adapter:  Board private structure to initialize
661  */
662 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
663 {
664         memset(&adapter->stats, 0, sizeof(adapter->stats));
665         return;
666 }
667
668 /**
669  * pch_gbe_init_phy - Initialize PHY
670  * @adapter:  Board private structure to initialize
671  * Returns
672  *      0:      Successfully
673  *      Negative value: Failed
674  */
675 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
676 {
677         struct net_device *netdev = adapter->netdev;
678         u32 addr;
679         u16 bmcr, stat;
680
681         /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
682         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
683                 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
684                 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
685                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
686                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
687                 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
688                         break;
689         }
690         adapter->hw.phy.addr = adapter->mii.phy_id;
691         pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
692         if (addr == 32)
693                 return -EAGAIN;
694         /* Selected the phy and isolate the rest */
695         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
696                 if (addr != adapter->mii.phy_id) {
697                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
698                                            BMCR_ISOLATE);
699                 } else {
700                         bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
701                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
702                                            bmcr & ~BMCR_ISOLATE);
703                 }
704         }
705
706         /* MII setup */
707         adapter->mii.phy_id_mask = 0x1F;
708         adapter->mii.reg_num_mask = 0x1F;
709         adapter->mii.dev = adapter->netdev;
710         adapter->mii.mdio_read = pch_gbe_mdio_read;
711         adapter->mii.mdio_write = pch_gbe_mdio_write;
712         adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
713         return 0;
714 }
715
716 /**
717  * pch_gbe_mdio_read - The read function for mii
718  * @netdev: Network interface device structure
719  * @addr:   Phy ID
720  * @reg:    Access location
721  * Returns
722  *      0:      Successfully
723  *      Negative value: Failed
724  */
725 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
726 {
727         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
728         struct pch_gbe_hw *hw = &adapter->hw;
729
730         return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
731                                      (u16) 0);
732 }
733
734 /**
735  * pch_gbe_mdio_write - The write function for mii
736  * @netdev: Network interface device structure
737  * @addr:   Phy ID (not used)
738  * @reg:    Access location
739  * @data:   Write data
740  */
741 static void pch_gbe_mdio_write(struct net_device *netdev,
742                                int addr, int reg, int data)
743 {
744         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
745         struct pch_gbe_hw *hw = &adapter->hw;
746
747         pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
748 }
749
750 /**
751  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
752  * @work:  Pointer of board private structure
753  */
754 static void pch_gbe_reset_task(struct work_struct *work)
755 {
756         struct pch_gbe_adapter *adapter;
757         adapter = container_of(work, struct pch_gbe_adapter, reset_task);
758
759         rtnl_lock();
760         pch_gbe_reinit_locked(adapter);
761         rtnl_unlock();
762 }
763
764 /**
765  * pch_gbe_reinit_locked- Re-initialization
766  * @adapter:  Board private structure
767  */
768 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
769 {
770         pch_gbe_down(adapter);
771         pch_gbe_up(adapter);
772 }
773
774 /**
775  * pch_gbe_reset - Reset GbE
776  * @adapter:  Board private structure
777  */
778 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
779 {
780         pch_gbe_mac_reset_hw(&adapter->hw);
781         /* Setup the receive address. */
782         pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
783         if (pch_gbe_hal_init_hw(&adapter->hw))
784                 pr_err("Hardware Error\n");
785 }
786
787 /**
788  * pch_gbe_free_irq - Free an interrupt
789  * @adapter:  Board private structure
790  */
791 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
792 {
793         struct net_device *netdev = adapter->netdev;
794
795         free_irq(adapter->pdev->irq, netdev);
796         if (adapter->have_msi) {
797                 pci_disable_msi(adapter->pdev);
798                 pr_debug("call pci_disable_msi\n");
799         }
800 }
801
802 /**
803  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
804  * @adapter:  Board private structure
805  */
806 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
807 {
808         struct pch_gbe_hw *hw = &adapter->hw;
809
810         atomic_inc(&adapter->irq_sem);
811         iowrite32(0, &hw->reg->INT_EN);
812         ioread32(&hw->reg->INT_ST);
813         synchronize_irq(adapter->pdev->irq);
814
815         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
816 }
817
818 /**
819  * pch_gbe_irq_enable - Enable default interrupt generation settings
820  * @adapter:  Board private structure
821  */
822 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
823 {
824         struct pch_gbe_hw *hw = &adapter->hw;
825
826         if (likely(atomic_dec_and_test(&adapter->irq_sem)))
827                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
828         ioread32(&hw->reg->INT_ST);
829         pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
830 }
831
832
833
834 /**
835  * pch_gbe_setup_tctl - configure the Transmit control registers
836  * @adapter:  Board private structure
837  */
838 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
839 {
840         struct pch_gbe_hw *hw = &adapter->hw;
841         u32 tx_mode, tcpip;
842
843         tx_mode = PCH_GBE_TM_LONG_PKT |
844                 PCH_GBE_TM_ST_AND_FD |
845                 PCH_GBE_TM_SHORT_PKT |
846                 PCH_GBE_TM_TH_TX_STRT_8 |
847                 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
848
849         iowrite32(tx_mode, &hw->reg->TX_MODE);
850
851         tcpip = ioread32(&hw->reg->TCPIP_ACC);
852         tcpip |= PCH_GBE_TX_TCPIPACC_EN;
853         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
854         return;
855 }
856
857 /**
858  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
859  * @adapter:  Board private structure
860  */
861 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
862 {
863         struct pch_gbe_hw *hw = &adapter->hw;
864         u32 tdba, tdlen, dctrl;
865
866         pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
867                  (unsigned long long)adapter->tx_ring->dma,
868                  adapter->tx_ring->size);
869
870         /* Setup the HW Tx Head and Tail descriptor pointers */
871         tdba = adapter->tx_ring->dma;
872         tdlen = adapter->tx_ring->size - 0x10;
873         iowrite32(tdba, &hw->reg->TX_DSC_BASE);
874         iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
875         iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
876
877         /* Enables Transmission DMA */
878         dctrl = ioread32(&hw->reg->DMA_CTRL);
879         dctrl |= PCH_GBE_TX_DMA_EN;
880         iowrite32(dctrl, &hw->reg->DMA_CTRL);
881 }
882
883 /**
884  * pch_gbe_setup_rctl - Configure the receive control registers
885  * @adapter:  Board private structure
886  */
887 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
888 {
889         struct pch_gbe_hw *hw = &adapter->hw;
890         u32 rx_mode, tcpip;
891
892         rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
893         PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
894
895         iowrite32(rx_mode, &hw->reg->RX_MODE);
896
897         tcpip = ioread32(&hw->reg->TCPIP_ACC);
898
899         tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
900         tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
901         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
902         return;
903 }
904
905 /**
906  * pch_gbe_configure_rx - Configure Receive Unit after Reset
907  * @adapter:  Board private structure
908  */
909 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
910 {
911         struct pch_gbe_hw *hw = &adapter->hw;
912         u32 rdba, rdlen, rctl, rxdma;
913
914         pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
915                  (unsigned long long)adapter->rx_ring->dma,
916                  adapter->rx_ring->size);
917
918         pch_gbe_mac_force_mac_fc(hw);
919
920         /* Disables Receive MAC */
921         rctl = ioread32(&hw->reg->MAC_RX_EN);
922         iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
923
924         /* Disables Receive DMA */
925         rxdma = ioread32(&hw->reg->DMA_CTRL);
926         rxdma &= ~PCH_GBE_RX_DMA_EN;
927         iowrite32(rxdma, &hw->reg->DMA_CTRL);
928
929         pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
930                  ioread32(&hw->reg->MAC_RX_EN),
931                  ioread32(&hw->reg->DMA_CTRL));
932
933         /* Setup the HW Rx Head and Tail Descriptor Pointers and
934          * the Base and Length of the Rx Descriptor Ring */
935         rdba = adapter->rx_ring->dma;
936         rdlen = adapter->rx_ring->size - 0x10;
937         iowrite32(rdba, &hw->reg->RX_DSC_BASE);
938         iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
939         iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
940 }
941
942 /**
943  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
944  * @adapter:     Board private structure
945  * @buffer_info: Buffer information structure
946  */
947 static void pch_gbe_unmap_and_free_tx_resource(
948         struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
949 {
950         if (buffer_info->mapped) {
951                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
952                                  buffer_info->length, DMA_TO_DEVICE);
953                 buffer_info->mapped = false;
954         }
955         if (buffer_info->skb) {
956                 dev_kfree_skb_any(buffer_info->skb);
957                 buffer_info->skb = NULL;
958         }
959 }
960
961 /**
962  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
963  * @adapter:      Board private structure
964  * @buffer_info:  Buffer information structure
965  */
966 static void pch_gbe_unmap_and_free_rx_resource(
967                                         struct pch_gbe_adapter *adapter,
968                                         struct pch_gbe_buffer *buffer_info)
969 {
970         if (buffer_info->mapped) {
971                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
972                                  buffer_info->length, DMA_FROM_DEVICE);
973                 buffer_info->mapped = false;
974         }
975         if (buffer_info->skb) {
976                 dev_kfree_skb_any(buffer_info->skb);
977                 buffer_info->skb = NULL;
978         }
979 }
980
981 /**
982  * pch_gbe_clean_tx_ring - Free Tx Buffers
983  * @adapter:  Board private structure
984  * @tx_ring:  Ring to be cleaned
985  */
986 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
987                                    struct pch_gbe_tx_ring *tx_ring)
988 {
989         struct pch_gbe_hw *hw = &adapter->hw;
990         struct pch_gbe_buffer *buffer_info;
991         unsigned long size;
992         unsigned int i;
993
994         /* Free all the Tx ring sk_buffs */
995         for (i = 0; i < tx_ring->count; i++) {
996                 buffer_info = &tx_ring->buffer_info[i];
997                 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
998         }
999         pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
1000
1001         size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1002         memset(tx_ring->buffer_info, 0, size);
1003
1004         /* Zero out the descriptor ring */
1005         memset(tx_ring->desc, 0, tx_ring->size);
1006         tx_ring->next_to_use = 0;
1007         tx_ring->next_to_clean = 0;
1008         iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
1009         iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
1010 }
1011
1012 /**
1013  * pch_gbe_clean_rx_ring - Free Rx Buffers
1014  * @adapter:  Board private structure
1015  * @rx_ring:  Ring to free buffers from
1016  */
1017 static void
1018 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1019                       struct pch_gbe_rx_ring *rx_ring)
1020 {
1021         struct pch_gbe_hw *hw = &adapter->hw;
1022         struct pch_gbe_buffer *buffer_info;
1023         unsigned long size;
1024         unsigned int i;
1025
1026         /* Free all the Rx ring sk_buffs */
1027         for (i = 0; i < rx_ring->count; i++) {
1028                 buffer_info = &rx_ring->buffer_info[i];
1029                 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1030         }
1031         pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1032         size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1033         memset(rx_ring->buffer_info, 0, size);
1034
1035         /* Zero out the descriptor ring */
1036         memset(rx_ring->desc, 0, rx_ring->size);
1037         rx_ring->next_to_clean = 0;
1038         rx_ring->next_to_use = 0;
1039         iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1040         iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1041 }
1042
1043 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1044                                     u16 duplex)
1045 {
1046         struct pch_gbe_hw *hw = &adapter->hw;
1047         unsigned long rgmii = 0;
1048
1049         /* Set the RGMII control. */
1050 #ifdef PCH_GBE_MAC_IFOP_RGMII
1051         switch (speed) {
1052         case SPEED_10:
1053                 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1054                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1055                 break;
1056         case SPEED_100:
1057                 rgmii = (PCH_GBE_RGMII_RATE_25M |
1058                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1059                 break;
1060         case SPEED_1000:
1061                 rgmii = (PCH_GBE_RGMII_RATE_125M |
1062                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1063                 break;
1064         }
1065         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1066 #else   /* GMII */
1067         rgmii = 0;
1068         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1069 #endif
1070 }
1071 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1072                               u16 duplex)
1073 {
1074         struct net_device *netdev = adapter->netdev;
1075         struct pch_gbe_hw *hw = &adapter->hw;
1076         unsigned long mode = 0;
1077
1078         /* Set the communication mode */
1079         switch (speed) {
1080         case SPEED_10:
1081                 mode = PCH_GBE_MODE_MII_ETHER;
1082                 netdev->tx_queue_len = 10;
1083                 break;
1084         case SPEED_100:
1085                 mode = PCH_GBE_MODE_MII_ETHER;
1086                 netdev->tx_queue_len = 100;
1087                 break;
1088         case SPEED_1000:
1089                 mode = PCH_GBE_MODE_GMII_ETHER;
1090                 break;
1091         }
1092         if (duplex == DUPLEX_FULL)
1093                 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1094         else
1095                 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1096         iowrite32(mode, &hw->reg->MODE);
1097 }
1098
1099 /**
1100  * pch_gbe_watchdog - Watchdog process
1101  * @data:  Board private structure
1102  */
1103 static void pch_gbe_watchdog(unsigned long data)
1104 {
1105         struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1106         struct net_device *netdev = adapter->netdev;
1107         struct pch_gbe_hw *hw = &adapter->hw;
1108
1109         pr_debug("right now = %ld\n", jiffies);
1110
1111         pch_gbe_update_stats(adapter);
1112         if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1113                 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1114                 netdev->tx_queue_len = adapter->tx_queue_len;
1115                 /* mii library handles link maintenance tasks */
1116                 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1117                         pr_err("ethtool get setting Error\n");
1118                         mod_timer(&adapter->watchdog_timer,
1119                                   round_jiffies(jiffies +
1120                                                 PCH_GBE_WATCHDOG_PERIOD));
1121                         return;
1122                 }
1123                 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1124                 hw->mac.link_duplex = cmd.duplex;
1125                 /* Set the RGMII control. */
1126                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1127                                                 hw->mac.link_duplex);
1128                 /* Set the communication mode */
1129                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1130                                  hw->mac.link_duplex);
1131                 netdev_dbg(netdev,
1132                            "Link is Up %d Mbps %s-Duplex\n",
1133                            hw->mac.link_speed,
1134                            cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1135                 netif_carrier_on(netdev);
1136                 netif_wake_queue(netdev);
1137         } else if ((!mii_link_ok(&adapter->mii)) &&
1138                    (netif_carrier_ok(netdev))) {
1139                 netdev_dbg(netdev, "NIC Link is Down\n");
1140                 hw->mac.link_speed = SPEED_10;
1141                 hw->mac.link_duplex = DUPLEX_HALF;
1142                 netif_carrier_off(netdev);
1143                 netif_stop_queue(netdev);
1144         }
1145         mod_timer(&adapter->watchdog_timer,
1146                   round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1147 }
1148
1149 /**
1150  * pch_gbe_tx_queue - Carry out queuing of the transmission data
1151  * @adapter:  Board private structure
1152  * @tx_ring:  Tx descriptor ring structure
1153  * @skb:      Sockt buffer structure
1154  */
1155 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1156                               struct pch_gbe_tx_ring *tx_ring,
1157                               struct sk_buff *skb)
1158 {
1159         struct pch_gbe_hw *hw = &adapter->hw;
1160         struct pch_gbe_tx_desc *tx_desc;
1161         struct pch_gbe_buffer *buffer_info;
1162         struct sk_buff *tmp_skb;
1163         unsigned int frame_ctrl;
1164         unsigned int ring_num;
1165         unsigned long flags;
1166
1167         /*-- Set frame control --*/
1168         frame_ctrl = 0;
1169         if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1170                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1171         if (skb->ip_summed == CHECKSUM_NONE)
1172                 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1173
1174         /* Performs checksum processing */
1175         /*
1176          * It is because the hardware accelerator does not support a checksum,
1177          * when the received data size is less than 64 bytes.
1178          */
1179         if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1180                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1181                               PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1182                 if (skb->protocol == htons(ETH_P_IP)) {
1183                         struct iphdr *iph = ip_hdr(skb);
1184                         unsigned int offset;
1185                         iph->check = 0;
1186                         iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
1187                         offset = skb_transport_offset(skb);
1188                         if (iph->protocol == IPPROTO_TCP) {
1189                                 skb->csum = 0;
1190                                 tcp_hdr(skb)->check = 0;
1191                                 skb->csum = skb_checksum(skb, offset,
1192                                                          skb->len - offset, 0);
1193                                 tcp_hdr(skb)->check =
1194                                         csum_tcpudp_magic(iph->saddr,
1195                                                           iph->daddr,
1196                                                           skb->len - offset,
1197                                                           IPPROTO_TCP,
1198                                                           skb->csum);
1199                         } else if (iph->protocol == IPPROTO_UDP) {
1200                                 skb->csum = 0;
1201                                 udp_hdr(skb)->check = 0;
1202                                 skb->csum =
1203                                         skb_checksum(skb, offset,
1204                                                      skb->len - offset, 0);
1205                                 udp_hdr(skb)->check =
1206                                         csum_tcpudp_magic(iph->saddr,
1207                                                           iph->daddr,
1208                                                           skb->len - offset,
1209                                                           IPPROTO_UDP,
1210                                                           skb->csum);
1211                         }
1212                 }
1213         }
1214         spin_lock_irqsave(&tx_ring->tx_lock, flags);
1215         ring_num = tx_ring->next_to_use;
1216         if (unlikely((ring_num + 1) == tx_ring->count))
1217                 tx_ring->next_to_use = 0;
1218         else
1219                 tx_ring->next_to_use = ring_num + 1;
1220
1221         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1222         buffer_info = &tx_ring->buffer_info[ring_num];
1223         tmp_skb = buffer_info->skb;
1224
1225         /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1226         memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1227         tmp_skb->data[ETH_HLEN] = 0x00;
1228         tmp_skb->data[ETH_HLEN + 1] = 0x00;
1229         tmp_skb->len = skb->len;
1230         memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1231                (skb->len - ETH_HLEN));
1232         /*-- Set Buffer information --*/
1233         buffer_info->length = tmp_skb->len;
1234         buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1235                                           buffer_info->length,
1236                                           DMA_TO_DEVICE);
1237         if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1238                 pr_err("TX DMA map failed\n");
1239                 buffer_info->dma = 0;
1240                 buffer_info->time_stamp = 0;
1241                 tx_ring->next_to_use = ring_num;
1242                 return;
1243         }
1244         buffer_info->mapped = true;
1245         buffer_info->time_stamp = jiffies;
1246
1247         /*-- Set Tx descriptor --*/
1248         tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1249         tx_desc->buffer_addr = (buffer_info->dma);
1250         tx_desc->length = (tmp_skb->len);
1251         tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1252         tx_desc->tx_frame_ctrl = (frame_ctrl);
1253         tx_desc->gbec_status = (DSC_INIT16);
1254
1255         if (unlikely(++ring_num == tx_ring->count))
1256                 ring_num = 0;
1257
1258         /* Update software pointer of TX descriptor */
1259         iowrite32(tx_ring->dma +
1260                   (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1261                   &hw->reg->TX_DSC_SW_P);
1262
1263 #ifdef CONFIG_PCH_PTP
1264         pch_tx_timestamp(adapter, skb);
1265 #endif
1266
1267         dev_kfree_skb_any(skb);
1268 }
1269
1270 /**
1271  * pch_gbe_update_stats - Update the board statistics counters
1272  * @adapter:  Board private structure
1273  */
1274 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1275 {
1276         struct net_device *netdev = adapter->netdev;
1277         struct pci_dev *pdev = adapter->pdev;
1278         struct pch_gbe_hw_stats *stats = &adapter->stats;
1279         unsigned long flags;
1280
1281         /*
1282          * Prevent stats update while adapter is being reset, or if the pci
1283          * connection is down.
1284          */
1285         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1286                 return;
1287
1288         spin_lock_irqsave(&adapter->stats_lock, flags);
1289
1290         /* Update device status "adapter->stats" */
1291         stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1292         stats->tx_errors = stats->tx_length_errors +
1293             stats->tx_aborted_errors +
1294             stats->tx_carrier_errors + stats->tx_timeout_count;
1295
1296         /* Update network device status "adapter->net_stats" */
1297         netdev->stats.rx_packets = stats->rx_packets;
1298         netdev->stats.rx_bytes = stats->rx_bytes;
1299         netdev->stats.rx_dropped = stats->rx_dropped;
1300         netdev->stats.tx_packets = stats->tx_packets;
1301         netdev->stats.tx_bytes = stats->tx_bytes;
1302         netdev->stats.tx_dropped = stats->tx_dropped;
1303         /* Fill out the OS statistics structure */
1304         netdev->stats.multicast = stats->multicast;
1305         netdev->stats.collisions = stats->collisions;
1306         /* Rx Errors */
1307         netdev->stats.rx_errors = stats->rx_errors;
1308         netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1309         netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1310         /* Tx Errors */
1311         netdev->stats.tx_errors = stats->tx_errors;
1312         netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1313         netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1314
1315         spin_unlock_irqrestore(&adapter->stats_lock, flags);
1316 }
1317
1318 static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1319 {
1320         struct pch_gbe_hw *hw = &adapter->hw;
1321         u32 rxdma;
1322         u16 value;
1323         int ret;
1324
1325         /* Disable Receive DMA */
1326         rxdma = ioread32(&hw->reg->DMA_CTRL);
1327         rxdma &= ~PCH_GBE_RX_DMA_EN;
1328         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1329         /* Wait Rx DMA BUS is IDLE */
1330         ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1331         if (ret) {
1332                 /* Disable Bus master */
1333                 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1334                 value &= ~PCI_COMMAND_MASTER;
1335                 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1336                 /* Stop Receive */
1337                 pch_gbe_mac_reset_rx(hw);
1338                 /* Enable Bus master */
1339                 value |= PCI_COMMAND_MASTER;
1340                 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1341         } else {
1342                 /* Stop Receive */
1343                 pch_gbe_mac_reset_rx(hw);
1344         }
1345 }
1346
1347 static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1348 {
1349         u32 rxdma;
1350
1351         /* Enables Receive DMA */
1352         rxdma = ioread32(&hw->reg->DMA_CTRL);
1353         rxdma |= PCH_GBE_RX_DMA_EN;
1354         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1355         /* Enables Receive */
1356         iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1357         return;
1358 }
1359
1360 /**
1361  * pch_gbe_intr - Interrupt Handler
1362  * @irq:   Interrupt number
1363  * @data:  Pointer to a network interface device structure
1364  * Returns
1365  *      - IRQ_HANDLED:  Our interrupt
1366  *      - IRQ_NONE:     Not our interrupt
1367  */
1368 static irqreturn_t pch_gbe_intr(int irq, void *data)
1369 {
1370         struct net_device *netdev = data;
1371         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1372         struct pch_gbe_hw *hw = &adapter->hw;
1373         u32 int_st;
1374         u32 int_en;
1375
1376         /* Check request status */
1377         int_st = ioread32(&hw->reg->INT_ST);
1378         int_st = int_st & ioread32(&hw->reg->INT_EN);
1379         /* When request status is no interruption factor */
1380         if (unlikely(!int_st))
1381                 return IRQ_NONE;        /* Not our interrupt. End processing. */
1382         pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1383         if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1384                 adapter->stats.intr_rx_frame_err_count++;
1385         if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1386                 if (!adapter->rx_stop_flag) {
1387                         adapter->stats.intr_rx_fifo_err_count++;
1388                         pr_debug("Rx fifo over run\n");
1389                         adapter->rx_stop_flag = true;
1390                         int_en = ioread32(&hw->reg->INT_EN);
1391                         iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1392                                   &hw->reg->INT_EN);
1393                         pch_gbe_stop_receive(adapter);
1394                         int_st |= ioread32(&hw->reg->INT_ST);
1395                         int_st = int_st & ioread32(&hw->reg->INT_EN);
1396                 }
1397         if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1398                 adapter->stats.intr_rx_dma_err_count++;
1399         if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1400                 adapter->stats.intr_tx_fifo_err_count++;
1401         if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1402                 adapter->stats.intr_tx_dma_err_count++;
1403         if (int_st & PCH_GBE_INT_TCPIP_ERR)
1404                 adapter->stats.intr_tcpip_err_count++;
1405         /* When Rx descriptor is empty  */
1406         if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1407                 adapter->stats.intr_rx_dsc_empty_count++;
1408                 pr_debug("Rx descriptor is empty\n");
1409                 int_en = ioread32(&hw->reg->INT_EN);
1410                 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1411                 if (hw->mac.tx_fc_enable) {
1412                         /* Set Pause packet */
1413                         pch_gbe_mac_set_pause_packet(hw);
1414                 }
1415         }
1416
1417         /* When request status is Receive interruption */
1418         if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1419             (adapter->rx_stop_flag)) {
1420                 if (likely(napi_schedule_prep(&adapter->napi))) {
1421                         /* Enable only Rx Descriptor empty */
1422                         atomic_inc(&adapter->irq_sem);
1423                         int_en = ioread32(&hw->reg->INT_EN);
1424                         int_en &=
1425                             ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1426                         iowrite32(int_en, &hw->reg->INT_EN);
1427                         /* Start polling for NAPI */
1428                         __napi_schedule(&adapter->napi);
1429                 }
1430         }
1431         pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
1432                  IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1433         return IRQ_HANDLED;
1434 }
1435
1436 /**
1437  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1438  * @adapter:       Board private structure
1439  * @rx_ring:       Rx descriptor ring
1440  * @cleaned_count: Cleaned count
1441  */
1442 static void
1443 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1444                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1445 {
1446         struct net_device *netdev = adapter->netdev;
1447         struct pci_dev *pdev = adapter->pdev;
1448         struct pch_gbe_hw *hw = &adapter->hw;
1449         struct pch_gbe_rx_desc *rx_desc;
1450         struct pch_gbe_buffer *buffer_info;
1451         struct sk_buff *skb;
1452         unsigned int i;
1453         unsigned int bufsz;
1454
1455         bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1456         i = rx_ring->next_to_use;
1457
1458         while ((cleaned_count--)) {
1459                 buffer_info = &rx_ring->buffer_info[i];
1460                 skb = netdev_alloc_skb(netdev, bufsz);
1461                 if (unlikely(!skb)) {
1462                         /* Better luck next round */
1463                         adapter->stats.rx_alloc_buff_failed++;
1464                         break;
1465                 }
1466                 /* align */
1467                 skb_reserve(skb, NET_IP_ALIGN);
1468                 buffer_info->skb = skb;
1469
1470                 buffer_info->dma = dma_map_single(&pdev->dev,
1471                                                   buffer_info->rx_buffer,
1472                                                   buffer_info->length,
1473                                                   DMA_FROM_DEVICE);
1474                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1475                         dev_kfree_skb(skb);
1476                         buffer_info->skb = NULL;
1477                         buffer_info->dma = 0;
1478                         adapter->stats.rx_alloc_buff_failed++;
1479                         break; /* while !buffer_info->skb */
1480                 }
1481                 buffer_info->mapped = true;
1482                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1483                 rx_desc->buffer_addr = (buffer_info->dma);
1484                 rx_desc->gbec_status = DSC_INIT16;
1485
1486                 pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1487                          i, (unsigned long long)buffer_info->dma,
1488                          buffer_info->length);
1489
1490                 if (unlikely(++i == rx_ring->count))
1491                         i = 0;
1492         }
1493         if (likely(rx_ring->next_to_use != i)) {
1494                 rx_ring->next_to_use = i;
1495                 if (unlikely(i-- == 0))
1496                         i = (rx_ring->count - 1);
1497                 iowrite32(rx_ring->dma +
1498                           (int)sizeof(struct pch_gbe_rx_desc) * i,
1499                           &hw->reg->RX_DSC_SW_P);
1500         }
1501         return;
1502 }
1503
1504 static int
1505 pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1506                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1507 {
1508         struct pci_dev *pdev = adapter->pdev;
1509         struct pch_gbe_buffer *buffer_info;
1510         unsigned int i;
1511         unsigned int bufsz;
1512         unsigned int size;
1513
1514         bufsz = adapter->rx_buffer_len;
1515
1516         size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1517         rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1518                                                 &rx_ring->rx_buff_pool_logic,
1519                                                 GFP_KERNEL);
1520         if (!rx_ring->rx_buff_pool) {
1521                 pr_err("Unable to allocate memory for the receive poll buffer\n");
1522                 return -ENOMEM;
1523         }
1524         memset(rx_ring->rx_buff_pool, 0, size);
1525         rx_ring->rx_buff_pool_size = size;
1526         for (i = 0; i < rx_ring->count; i++) {
1527                 buffer_info = &rx_ring->buffer_info[i];
1528                 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1529                 buffer_info->length = bufsz;
1530         }
1531         return 0;
1532 }
1533
1534 /**
1535  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1536  * @adapter:   Board private structure
1537  * @tx_ring:   Tx descriptor ring
1538  */
1539 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1540                                         struct pch_gbe_tx_ring *tx_ring)
1541 {
1542         struct pch_gbe_buffer *buffer_info;
1543         struct sk_buff *skb;
1544         unsigned int i;
1545         unsigned int bufsz;
1546         struct pch_gbe_tx_desc *tx_desc;
1547
1548         bufsz =
1549             adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1550
1551         for (i = 0; i < tx_ring->count; i++) {
1552                 buffer_info = &tx_ring->buffer_info[i];
1553                 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1554                 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1555                 buffer_info->skb = skb;
1556                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1557                 tx_desc->gbec_status = (DSC_INIT16);
1558         }
1559         return;
1560 }
1561
1562 /**
1563  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1564  * @adapter:   Board private structure
1565  * @tx_ring:   Tx descriptor ring
1566  * Returns
1567  *      true:  Cleaned the descriptor
1568  *      false: Not cleaned the descriptor
1569  */
1570 static bool
1571 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1572                  struct pch_gbe_tx_ring *tx_ring)
1573 {
1574         struct pch_gbe_tx_desc *tx_desc;
1575         struct pch_gbe_buffer *buffer_info;
1576         struct sk_buff *skb;
1577         unsigned int i;
1578         unsigned int cleaned_count = 0;
1579         bool cleaned = true;
1580
1581         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1582
1583         i = tx_ring->next_to_clean;
1584         tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1585         pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
1586                  tx_desc->gbec_status, tx_desc->dma_status);
1587
1588         while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1589                 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1590                 buffer_info = &tx_ring->buffer_info[i];
1591                 skb = buffer_info->skb;
1592
1593                 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1594                         adapter->stats.tx_aborted_errors++;
1595                         pr_err("Transfer Abort Error\n");
1596                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1597                           ) {
1598                         adapter->stats.tx_carrier_errors++;
1599                         pr_err("Transfer Carrier Sense Error\n");
1600                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1601                           ) {
1602                         adapter->stats.tx_aborted_errors++;
1603                         pr_err("Transfer Collision Abort Error\n");
1604                 } else if ((tx_desc->gbec_status &
1605                             (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1606                              PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1607                         adapter->stats.collisions++;
1608                         adapter->stats.tx_packets++;
1609                         adapter->stats.tx_bytes += skb->len;
1610                         pr_debug("Transfer Collision\n");
1611                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1612                           ) {
1613                         adapter->stats.tx_packets++;
1614                         adapter->stats.tx_bytes += skb->len;
1615                 }
1616                 if (buffer_info->mapped) {
1617                         pr_debug("unmap buffer_info->dma : %d\n", i);
1618                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1619                                          buffer_info->length, DMA_TO_DEVICE);
1620                         buffer_info->mapped = false;
1621                 }
1622                 if (buffer_info->skb) {
1623                         pr_debug("trim buffer_info->skb : %d\n", i);
1624                         skb_trim(buffer_info->skb, 0);
1625                 }
1626                 tx_desc->gbec_status = DSC_INIT16;
1627                 if (unlikely(++i == tx_ring->count))
1628                         i = 0;
1629                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1630
1631                 /* weight of a sort for tx, to avoid endless transmit cleanup */
1632                 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1633                         cleaned = false;
1634                         break;
1635                 }
1636         }
1637         pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1638                  cleaned_count);
1639         /* Recover from running out of Tx resources in xmit_frame */
1640         if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1641                 netif_wake_queue(adapter->netdev);
1642                 adapter->stats.tx_restart_count++;
1643                 pr_debug("Tx wake queue\n");
1644         }
1645         spin_lock(&adapter->tx_queue_lock);
1646         tx_ring->next_to_clean = i;
1647         spin_unlock(&adapter->tx_queue_lock);
1648         pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1649         return cleaned;
1650 }
1651
1652 /**
1653  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1654  * @adapter:     Board private structure
1655  * @rx_ring:     Rx descriptor ring
1656  * @work_done:   Completed count
1657  * @work_to_do:  Request count
1658  * Returns
1659  *      true:  Cleaned the descriptor
1660  *      false: Not cleaned the descriptor
1661  */
1662 static bool
1663 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1664                  struct pch_gbe_rx_ring *rx_ring,
1665                  int *work_done, int work_to_do)
1666 {
1667         struct net_device *netdev = adapter->netdev;
1668         struct pci_dev *pdev = adapter->pdev;
1669         struct pch_gbe_buffer *buffer_info;
1670         struct pch_gbe_rx_desc *rx_desc;
1671         u32 length;
1672         unsigned int i;
1673         unsigned int cleaned_count = 0;
1674         bool cleaned = false;
1675         struct sk_buff *skb;
1676         u8 dma_status;
1677         u16 gbec_status;
1678         u32 tcp_ip_status;
1679
1680         i = rx_ring->next_to_clean;
1681
1682         while (*work_done < work_to_do) {
1683                 /* Check Rx descriptor status */
1684                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1685                 if (rx_desc->gbec_status == DSC_INIT16)
1686                         break;
1687                 cleaned = true;
1688                 cleaned_count++;
1689
1690                 dma_status = rx_desc->dma_status;
1691                 gbec_status = rx_desc->gbec_status;
1692                 tcp_ip_status = rx_desc->tcp_ip_status;
1693                 rx_desc->gbec_status = DSC_INIT16;
1694                 buffer_info = &rx_ring->buffer_info[i];
1695                 skb = buffer_info->skb;
1696                 buffer_info->skb = NULL;
1697
1698                 /* unmap dma */
1699                 dma_unmap_single(&pdev->dev, buffer_info->dma,
1700                                    buffer_info->length, DMA_FROM_DEVICE);
1701                 buffer_info->mapped = false;
1702
1703                 pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
1704                          "TCP:0x%08x]  BufInf = 0x%p\n",
1705                          i, dma_status, gbec_status, tcp_ip_status,
1706                          buffer_info);
1707                 /* Error check */
1708                 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1709                         adapter->stats.rx_frame_errors++;
1710                         pr_err("Receive Not Octal Error\n");
1711                 } else if (unlikely(gbec_status &
1712                                 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1713                         adapter->stats.rx_frame_errors++;
1714                         pr_err("Receive Nibble Error\n");
1715                 } else if (unlikely(gbec_status &
1716                                 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1717                         adapter->stats.rx_crc_errors++;
1718                         pr_err("Receive CRC Error\n");
1719                 } else {
1720                         /* get receive length */
1721                         /* length convert[-3], length includes FCS length */
1722                         length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1723                         if (rx_desc->rx_words_eob & 0x02)
1724                                 length = length - 4;
1725                         /*
1726                          * buffer_info->rx_buffer: [Header:14][payload]
1727                          * skb->data: [Reserve:2][Header:14][payload]
1728                          */
1729                         memcpy(skb->data, buffer_info->rx_buffer, length);
1730
1731                         /* update status of driver */
1732                         adapter->stats.rx_bytes += length;
1733                         adapter->stats.rx_packets++;
1734                         if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1735                                 adapter->stats.multicast++;
1736                         /* Write meta date of skb */
1737                         skb_put(skb, length);
1738
1739 #ifdef CONFIG_PCH_PTP
1740                         pch_rx_timestamp(adapter, skb);
1741 #endif
1742
1743                         skb->protocol = eth_type_trans(skb, netdev);
1744                         if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1745                                 skb->ip_summed = CHECKSUM_NONE;
1746                         else
1747                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1748
1749                         napi_gro_receive(&adapter->napi, skb);
1750                         (*work_done)++;
1751                         pr_debug("Receive skb->ip_summed: %d length: %d\n",
1752                                  skb->ip_summed, length);
1753                 }
1754                 /* return some buffers to hardware, one at a time is too slow */
1755                 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1756                         pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1757                                                  cleaned_count);
1758                         cleaned_count = 0;
1759                 }
1760                 if (++i == rx_ring->count)
1761                         i = 0;
1762         }
1763         rx_ring->next_to_clean = i;
1764         if (cleaned_count)
1765                 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1766         return cleaned;
1767 }
1768
1769 /**
1770  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1771  * @adapter:  Board private structure
1772  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1773  * Returns
1774  *      0:              Successfully
1775  *      Negative value: Failed
1776  */
1777 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1778                                 struct pch_gbe_tx_ring *tx_ring)
1779 {
1780         struct pci_dev *pdev = adapter->pdev;
1781         struct pch_gbe_tx_desc *tx_desc;
1782         int size;
1783         int desNo;
1784
1785         size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1786         tx_ring->buffer_info = vzalloc(size);
1787         if (!tx_ring->buffer_info)
1788                 return -ENOMEM;
1789
1790         tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1791
1792         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1793                                            &tx_ring->dma, GFP_KERNEL);
1794         if (!tx_ring->desc) {
1795                 vfree(tx_ring->buffer_info);
1796                 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1797                 return -ENOMEM;
1798         }
1799         memset(tx_ring->desc, 0, tx_ring->size);
1800
1801         tx_ring->next_to_use = 0;
1802         tx_ring->next_to_clean = 0;
1803         spin_lock_init(&tx_ring->tx_lock);
1804
1805         for (desNo = 0; desNo < tx_ring->count; desNo++) {
1806                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1807                 tx_desc->gbec_status = DSC_INIT16;
1808         }
1809         pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
1810                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1811                  tx_ring->desc, (unsigned long long)tx_ring->dma,
1812                  tx_ring->next_to_clean, tx_ring->next_to_use);
1813         return 0;
1814 }
1815
1816 /**
1817  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1818  * @adapter:  Board private structure
1819  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1820  * Returns
1821  *      0:              Successfully
1822  *      Negative value: Failed
1823  */
1824 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1825                                 struct pch_gbe_rx_ring *rx_ring)
1826 {
1827         struct pci_dev *pdev = adapter->pdev;
1828         struct pch_gbe_rx_desc *rx_desc;
1829         int size;
1830         int desNo;
1831
1832         size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1833         rx_ring->buffer_info = vzalloc(size);
1834         if (!rx_ring->buffer_info)
1835                 return -ENOMEM;
1836
1837         rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1838         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1839                                            &rx_ring->dma, GFP_KERNEL);
1840
1841         if (!rx_ring->desc) {
1842                 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1843                 vfree(rx_ring->buffer_info);
1844                 return -ENOMEM;
1845         }
1846         memset(rx_ring->desc, 0, rx_ring->size);
1847         rx_ring->next_to_clean = 0;
1848         rx_ring->next_to_use = 0;
1849         for (desNo = 0; desNo < rx_ring->count; desNo++) {
1850                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1851                 rx_desc->gbec_status = DSC_INIT16;
1852         }
1853         pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
1854                  "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1855                  rx_ring->desc, (unsigned long long)rx_ring->dma,
1856                  rx_ring->next_to_clean, rx_ring->next_to_use);
1857         return 0;
1858 }
1859
1860 /**
1861  * pch_gbe_free_tx_resources - Free Tx Resources
1862  * @adapter:  Board private structure
1863  * @tx_ring:  Tx descriptor ring for a specific queue
1864  */
1865 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1866                                 struct pch_gbe_tx_ring *tx_ring)
1867 {
1868         struct pci_dev *pdev = adapter->pdev;
1869
1870         pch_gbe_clean_tx_ring(adapter, tx_ring);
1871         vfree(tx_ring->buffer_info);
1872         tx_ring->buffer_info = NULL;
1873         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1874         tx_ring->desc = NULL;
1875 }
1876
1877 /**
1878  * pch_gbe_free_rx_resources - Free Rx Resources
1879  * @adapter:  Board private structure
1880  * @rx_ring:  Ring to clean the resources from
1881  */
1882 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1883                                 struct pch_gbe_rx_ring *rx_ring)
1884 {
1885         struct pci_dev *pdev = adapter->pdev;
1886
1887         pch_gbe_clean_rx_ring(adapter, rx_ring);
1888         vfree(rx_ring->buffer_info);
1889         rx_ring->buffer_info = NULL;
1890         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1891         rx_ring->desc = NULL;
1892 }
1893
1894 /**
1895  * pch_gbe_request_irq - Allocate an interrupt line
1896  * @adapter:  Board private structure
1897  * Returns
1898  *      0:              Successfully
1899  *      Negative value: Failed
1900  */
1901 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1902 {
1903         struct net_device *netdev = adapter->netdev;
1904         int err;
1905         int flags;
1906
1907         flags = IRQF_SHARED;
1908         adapter->have_msi = false;
1909         err = pci_enable_msi(adapter->pdev);
1910         pr_debug("call pci_enable_msi\n");
1911         if (err) {
1912                 pr_debug("call pci_enable_msi - Error: %d\n", err);
1913         } else {
1914                 flags = 0;
1915                 adapter->have_msi = true;
1916         }
1917         err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1918                           flags, netdev->name, netdev);
1919         if (err)
1920                 pr_err("Unable to allocate interrupt Error: %d\n", err);
1921         pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
1922                  adapter->have_msi, flags, err);
1923         return err;
1924 }
1925
1926
1927 static void pch_gbe_set_multi(struct net_device *netdev);
1928 /**
1929  * pch_gbe_up - Up GbE network device
1930  * @adapter:  Board private structure
1931  * Returns
1932  *      0:              Successfully
1933  *      Negative value: Failed
1934  */
1935 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1936 {
1937         struct net_device *netdev = adapter->netdev;
1938         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1939         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1940         int err;
1941
1942         /* Ensure we have a valid MAC */
1943         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1944                 pr_err("Error: Invalid MAC address\n");
1945                 return -EINVAL;
1946         }
1947
1948         /* hardware has been reset, we need to reload some things */
1949         pch_gbe_set_multi(netdev);
1950
1951         pch_gbe_setup_tctl(adapter);
1952         pch_gbe_configure_tx(adapter);
1953         pch_gbe_setup_rctl(adapter);
1954         pch_gbe_configure_rx(adapter);
1955
1956         err = pch_gbe_request_irq(adapter);
1957         if (err) {
1958                 pr_err("Error: can't bring device up\n");
1959                 return err;
1960         }
1961         err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1962         if (err) {
1963                 pr_err("Error: can't bring device up\n");
1964                 return err;
1965         }
1966         pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1967         pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1968         adapter->tx_queue_len = netdev->tx_queue_len;
1969         pch_gbe_start_receive(&adapter->hw);
1970
1971         mod_timer(&adapter->watchdog_timer, jiffies);
1972
1973         napi_enable(&adapter->napi);
1974         pch_gbe_irq_enable(adapter);
1975         netif_start_queue(adapter->netdev);
1976
1977         return 0;
1978 }
1979
1980 /**
1981  * pch_gbe_down - Down GbE network device
1982  * @adapter:  Board private structure
1983  */
1984 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1985 {
1986         struct net_device *netdev = adapter->netdev;
1987         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1988
1989         /* signal that we're down so the interrupt handler does not
1990          * reschedule our watchdog timer */
1991         napi_disable(&adapter->napi);
1992         atomic_set(&adapter->irq_sem, 0);
1993
1994         pch_gbe_irq_disable(adapter);
1995         pch_gbe_free_irq(adapter);
1996
1997         del_timer_sync(&adapter->watchdog_timer);
1998
1999         netdev->tx_queue_len = adapter->tx_queue_len;
2000         netif_carrier_off(netdev);
2001         netif_stop_queue(netdev);
2002
2003         pch_gbe_reset(adapter);
2004         pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
2005         pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
2006
2007         pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
2008                             rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2009         rx_ring->rx_buff_pool_logic = 0;
2010         rx_ring->rx_buff_pool_size = 0;
2011         rx_ring->rx_buff_pool = NULL;
2012 }
2013
2014 /**
2015  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2016  * @adapter:  Board private structure to initialize
2017  * Returns
2018  *      0:              Successfully
2019  *      Negative value: Failed
2020  */
2021 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2022 {
2023         struct pch_gbe_hw *hw = &adapter->hw;
2024         struct net_device *netdev = adapter->netdev;
2025
2026         adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2027         hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2028         hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2029
2030         /* Initialize the hardware-specific values */
2031         if (pch_gbe_hal_setup_init_funcs(hw)) {
2032                 pr_err("Hardware Initialization Failure\n");
2033                 return -EIO;
2034         }
2035         if (pch_gbe_alloc_queues(adapter)) {
2036                 pr_err("Unable to allocate memory for queues\n");
2037                 return -ENOMEM;
2038         }
2039         spin_lock_init(&adapter->hw.miim_lock);
2040         spin_lock_init(&adapter->tx_queue_lock);
2041         spin_lock_init(&adapter->stats_lock);
2042         spin_lock_init(&adapter->ethtool_lock);
2043         atomic_set(&adapter->irq_sem, 0);
2044         pch_gbe_irq_disable(adapter);
2045
2046         pch_gbe_init_stats(adapter);
2047
2048         pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
2049                  (u32) adapter->rx_buffer_len,
2050                  hw->mac.min_frame_size, hw->mac.max_frame_size);
2051         return 0;
2052 }
2053
2054 /**
2055  * pch_gbe_open - Called when a network interface is made active
2056  * @netdev:     Network interface device structure
2057  * Returns
2058  *      0:              Successfully
2059  *      Negative value: Failed
2060  */
2061 static int pch_gbe_open(struct net_device *netdev)
2062 {
2063         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2064         struct pch_gbe_hw *hw = &adapter->hw;
2065         int err;
2066
2067         /* allocate transmit descriptors */
2068         err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2069         if (err)
2070                 goto err_setup_tx;
2071         /* allocate receive descriptors */
2072         err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2073         if (err)
2074                 goto err_setup_rx;
2075         pch_gbe_hal_power_up_phy(hw);
2076         err = pch_gbe_up(adapter);
2077         if (err)
2078                 goto err_up;
2079         pr_debug("Success End\n");
2080         return 0;
2081
2082 err_up:
2083         if (!adapter->wake_up_evt)
2084                 pch_gbe_hal_power_down_phy(hw);
2085         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2086 err_setup_rx:
2087         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2088 err_setup_tx:
2089         pch_gbe_reset(adapter);
2090         pr_err("Error End\n");
2091         return err;
2092 }
2093
2094 /**
2095  * pch_gbe_stop - Disables a network interface
2096  * @netdev:  Network interface device structure
2097  * Returns
2098  *      0: Successfully
2099  */
2100 static int pch_gbe_stop(struct net_device *netdev)
2101 {
2102         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2103         struct pch_gbe_hw *hw = &adapter->hw;
2104
2105         pch_gbe_down(adapter);
2106         if (!adapter->wake_up_evt)
2107                 pch_gbe_hal_power_down_phy(hw);
2108         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2109         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2110         return 0;
2111 }
2112
2113 /**
2114  * pch_gbe_xmit_frame - Packet transmitting start
2115  * @skb:     Socket buffer structure
2116  * @netdev:  Network interface device structure
2117  * Returns
2118  *      - NETDEV_TX_OK:   Normal end
2119  *      - NETDEV_TX_BUSY: Error end
2120  */
2121 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2122 {
2123         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2124         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2125         unsigned long flags;
2126
2127         if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
2128                 pr_err("Transfer length Error: skb len: %d > max: %d\n",
2129                        skb->len, adapter->hw.mac.max_frame_size);
2130                 dev_kfree_skb_any(skb);
2131                 adapter->stats.tx_length_errors++;
2132                 return NETDEV_TX_OK;
2133         }
2134         if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2135                 /* Collision - tell upper layer to requeue */
2136                 return NETDEV_TX_LOCKED;
2137         }
2138         if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2139                 netif_stop_queue(netdev);
2140                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2141                 pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
2142                          tx_ring->next_to_use, tx_ring->next_to_clean);
2143                 return NETDEV_TX_BUSY;
2144         }
2145         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2146
2147         /* CRC,ITAG no support */
2148         pch_gbe_tx_queue(adapter, tx_ring, skb);
2149         return NETDEV_TX_OK;
2150 }
2151
2152 /**
2153  * pch_gbe_get_stats - Get System Network Statistics
2154  * @netdev:  Network interface device structure
2155  * Returns:  The current stats
2156  */
2157 static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2158 {
2159         /* only return the current stats */
2160         return &netdev->stats;
2161 }
2162
2163 /**
2164  * pch_gbe_set_multi - Multicast and Promiscuous mode set
2165  * @netdev:   Network interface device structure
2166  */
2167 static void pch_gbe_set_multi(struct net_device *netdev)
2168 {
2169         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2170         struct pch_gbe_hw *hw = &adapter->hw;
2171         struct netdev_hw_addr *ha;
2172         u8 *mta_list;
2173         u32 rctl;
2174         int i;
2175         int mc_count;
2176
2177         pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2178
2179         /* Check for Promiscuous and All Multicast modes */
2180         rctl = ioread32(&hw->reg->RX_MODE);
2181         mc_count = netdev_mc_count(netdev);
2182         if ((netdev->flags & IFF_PROMISC)) {
2183                 rctl &= ~PCH_GBE_ADD_FIL_EN;
2184                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2185         } else if ((netdev->flags & IFF_ALLMULTI)) {
2186                 /* all the multicasting receive permissions */
2187                 rctl |= PCH_GBE_ADD_FIL_EN;
2188                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2189         } else {
2190                 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2191                         /* all the multicasting receive permissions */
2192                         rctl |= PCH_GBE_ADD_FIL_EN;
2193                         rctl &= ~PCH_GBE_MLT_FIL_EN;
2194                 } else {
2195                         rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2196                 }
2197         }
2198         iowrite32(rctl, &hw->reg->RX_MODE);
2199
2200         if (mc_count >= PCH_GBE_MAR_ENTRIES)
2201                 return;
2202         mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2203         if (!mta_list)
2204                 return;
2205
2206         /* The shared function expects a packed array of only addresses. */
2207         i = 0;
2208         netdev_for_each_mc_addr(ha, netdev) {
2209                 if (i == mc_count)
2210                         break;
2211                 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2212         }
2213         pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2214                                         PCH_GBE_MAR_ENTRIES);
2215         kfree(mta_list);
2216
2217         pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
2218                  ioread32(&hw->reg->RX_MODE), mc_count);
2219 }
2220
2221 /**
2222  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2223  * @netdev: Network interface device structure
2224  * @addr:   Pointer to an address structure
2225  * Returns
2226  *      0:              Successfully
2227  *      -EADDRNOTAVAIL: Failed
2228  */
2229 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2230 {
2231         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2232         struct sockaddr *skaddr = addr;
2233         int ret_val;
2234
2235         if (!is_valid_ether_addr(skaddr->sa_data)) {
2236                 ret_val = -EADDRNOTAVAIL;
2237         } else {
2238                 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2239                 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2240                 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2241                 ret_val = 0;
2242         }
2243         pr_debug("ret_val : 0x%08x\n", ret_val);
2244         pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2245         pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2246         pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2247                  ioread32(&adapter->hw.reg->mac_adr[0].high),
2248                  ioread32(&adapter->hw.reg->mac_adr[0].low));
2249         return ret_val;
2250 }
2251
2252 /**
2253  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2254  * @netdev:   Network interface device structure
2255  * @new_mtu:  New value for maximum frame size
2256  * Returns
2257  *      0:              Successfully
2258  *      -EINVAL:        Failed
2259  */
2260 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2261 {
2262         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2263         int max_frame;
2264         unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2265         int err;
2266
2267         max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2268         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2269                 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2270                 pr_err("Invalid MTU setting\n");
2271                 return -EINVAL;
2272         }
2273         if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2274                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2275         else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2276                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2277         else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2278                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2279         else
2280                 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2281
2282         if (netif_running(netdev)) {
2283                 pch_gbe_down(adapter);
2284                 err = pch_gbe_up(adapter);
2285                 if (err) {
2286                         adapter->rx_buffer_len = old_rx_buffer_len;
2287                         pch_gbe_up(adapter);
2288                         return -ENOMEM;
2289                 } else {
2290                         netdev->mtu = new_mtu;
2291                         adapter->hw.mac.max_frame_size = max_frame;
2292                 }
2293         } else {
2294                 pch_gbe_reset(adapter);
2295                 netdev->mtu = new_mtu;
2296                 adapter->hw.mac.max_frame_size = max_frame;
2297         }
2298
2299         pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2300                  max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2301                  adapter->hw.mac.max_frame_size);
2302         return 0;
2303 }
2304
2305 /**
2306  * pch_gbe_set_features - Reset device after features changed
2307  * @netdev:   Network interface device structure
2308  * @features:  New features
2309  * Returns
2310  *      0:              HW state updated successfully
2311  */
2312 static int pch_gbe_set_features(struct net_device *netdev,
2313         netdev_features_t features)
2314 {
2315         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2316         netdev_features_t changed = features ^ netdev->features;
2317
2318         if (!(changed & NETIF_F_RXCSUM))
2319                 return 0;
2320
2321         if (netif_running(netdev))
2322                 pch_gbe_reinit_locked(adapter);
2323         else
2324                 pch_gbe_reset(adapter);
2325
2326         return 0;
2327 }
2328
2329 /**
2330  * pch_gbe_ioctl - Controls register through a MII interface
2331  * @netdev:   Network interface device structure
2332  * @ifr:      Pointer to ifr structure
2333  * @cmd:      Control command
2334  * Returns
2335  *      0:      Successfully
2336  *      Negative value: Failed
2337  */
2338 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2339 {
2340         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2341
2342         pr_debug("cmd : 0x%04x\n", cmd);
2343
2344 #ifdef CONFIG_PCH_PTP
2345         if (cmd == SIOCSHWTSTAMP)
2346                 return hwtstamp_ioctl(netdev, ifr, cmd);
2347 #endif
2348
2349         return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2350 }
2351
2352 /**
2353  * pch_gbe_tx_timeout - Respond to a Tx Hang
2354  * @netdev:   Network interface device structure
2355  */
2356 static void pch_gbe_tx_timeout(struct net_device *netdev)
2357 {
2358         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2359
2360         /* Do the reset outside of interrupt context */
2361         adapter->stats.tx_timeout_count++;
2362         schedule_work(&adapter->reset_task);
2363 }
2364
2365 /**
2366  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2367  * @napi:    Pointer of polling device struct
2368  * @budget:  The maximum number of a packet
2369  * Returns
2370  *      false:  Exit the polling mode
2371  *      true:   Continue the polling mode
2372  */
2373 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2374 {
2375         struct pch_gbe_adapter *adapter =
2376             container_of(napi, struct pch_gbe_adapter, napi);
2377         int work_done = 0;
2378         bool poll_end_flag = false;
2379         bool cleaned = false;
2380         u32 int_en;
2381
2382         pr_debug("budget : %d\n", budget);
2383
2384         pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2385         cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2386
2387         if (!cleaned)
2388                 work_done = budget;
2389         /* If no Tx and not enough Rx work done,
2390          * exit the polling mode
2391          */
2392         if (work_done < budget)
2393                 poll_end_flag = true;
2394
2395         if (poll_end_flag) {
2396                 napi_complete(napi);
2397                 if (adapter->rx_stop_flag) {
2398                         adapter->rx_stop_flag = false;
2399                         pch_gbe_start_receive(&adapter->hw);
2400                 }
2401                 pch_gbe_irq_enable(adapter);
2402         } else
2403                 if (adapter->rx_stop_flag) {
2404                         adapter->rx_stop_flag = false;
2405                         pch_gbe_start_receive(&adapter->hw);
2406                         int_en = ioread32(&adapter->hw.reg->INT_EN);
2407                         iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2408                                 &adapter->hw.reg->INT_EN);
2409                 }
2410
2411         pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
2412                  poll_end_flag, work_done, budget);
2413
2414         return work_done;
2415 }
2416
2417 #ifdef CONFIG_NET_POLL_CONTROLLER
2418 /**
2419  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2420  * @netdev:  Network interface device structure
2421  */
2422 static void pch_gbe_netpoll(struct net_device *netdev)
2423 {
2424         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2425
2426         disable_irq(adapter->pdev->irq);
2427         pch_gbe_intr(adapter->pdev->irq, netdev);
2428         enable_irq(adapter->pdev->irq);
2429 }
2430 #endif
2431
2432 static const struct net_device_ops pch_gbe_netdev_ops = {
2433         .ndo_open = pch_gbe_open,
2434         .ndo_stop = pch_gbe_stop,
2435         .ndo_start_xmit = pch_gbe_xmit_frame,
2436         .ndo_get_stats = pch_gbe_get_stats,
2437         .ndo_set_mac_address = pch_gbe_set_mac,
2438         .ndo_tx_timeout = pch_gbe_tx_timeout,
2439         .ndo_change_mtu = pch_gbe_change_mtu,
2440         .ndo_set_features = pch_gbe_set_features,
2441         .ndo_do_ioctl = pch_gbe_ioctl,
2442         .ndo_set_rx_mode = pch_gbe_set_multi,
2443 #ifdef CONFIG_NET_POLL_CONTROLLER
2444         .ndo_poll_controller = pch_gbe_netpoll,
2445 #endif
2446 };
2447
2448 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2449                                                 pci_channel_state_t state)
2450 {
2451         struct net_device *netdev = pci_get_drvdata(pdev);
2452         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2453
2454         netif_device_detach(netdev);
2455         if (netif_running(netdev))
2456                 pch_gbe_down(adapter);
2457         pci_disable_device(pdev);
2458         /* Request a slot slot reset. */
2459         return PCI_ERS_RESULT_NEED_RESET;
2460 }
2461
2462 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2463 {
2464         struct net_device *netdev = pci_get_drvdata(pdev);
2465         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2466         struct pch_gbe_hw *hw = &adapter->hw;
2467
2468         if (pci_enable_device(pdev)) {
2469                 pr_err("Cannot re-enable PCI device after reset\n");
2470                 return PCI_ERS_RESULT_DISCONNECT;
2471         }
2472         pci_set_master(pdev);
2473         pci_enable_wake(pdev, PCI_D0, 0);
2474         pch_gbe_hal_power_up_phy(hw);
2475         pch_gbe_reset(adapter);
2476         /* Clear wake up status */
2477         pch_gbe_mac_set_wol_event(hw, 0);
2478
2479         return PCI_ERS_RESULT_RECOVERED;
2480 }
2481
2482 static void pch_gbe_io_resume(struct pci_dev *pdev)
2483 {
2484         struct net_device *netdev = pci_get_drvdata(pdev);
2485         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2486
2487         if (netif_running(netdev)) {
2488                 if (pch_gbe_up(adapter)) {
2489                         pr_debug("can't bring device back up after reset\n");
2490                         return;
2491                 }
2492         }
2493         netif_device_attach(netdev);
2494 }
2495
2496 static int __pch_gbe_suspend(struct pci_dev *pdev)
2497 {
2498         struct net_device *netdev = pci_get_drvdata(pdev);
2499         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2500         struct pch_gbe_hw *hw = &adapter->hw;
2501         u32 wufc = adapter->wake_up_evt;
2502         int retval = 0;
2503
2504         netif_device_detach(netdev);
2505         if (netif_running(netdev))
2506                 pch_gbe_down(adapter);
2507         if (wufc) {
2508                 pch_gbe_set_multi(netdev);
2509                 pch_gbe_setup_rctl(adapter);
2510                 pch_gbe_configure_rx(adapter);
2511                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2512                                         hw->mac.link_duplex);
2513                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2514                                         hw->mac.link_duplex);
2515                 pch_gbe_mac_set_wol_event(hw, wufc);
2516                 pci_disable_device(pdev);
2517         } else {
2518                 pch_gbe_hal_power_down_phy(hw);
2519                 pch_gbe_mac_set_wol_event(hw, wufc);
2520                 pci_disable_device(pdev);
2521         }
2522         return retval;
2523 }
2524
2525 #ifdef CONFIG_PM
2526 static int pch_gbe_suspend(struct device *device)
2527 {
2528         struct pci_dev *pdev = to_pci_dev(device);
2529
2530         return __pch_gbe_suspend(pdev);
2531 }
2532
2533 static int pch_gbe_resume(struct device *device)
2534 {
2535         struct pci_dev *pdev = to_pci_dev(device);
2536         struct net_device *netdev = pci_get_drvdata(pdev);
2537         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2538         struct pch_gbe_hw *hw = &adapter->hw;
2539         u32 err;
2540
2541         err = pci_enable_device(pdev);
2542         if (err) {
2543                 pr_err("Cannot enable PCI device from suspend\n");
2544                 return err;
2545         }
2546         pci_set_master(pdev);
2547         pch_gbe_hal_power_up_phy(hw);
2548         pch_gbe_reset(adapter);
2549         /* Clear wake on lan control and status */
2550         pch_gbe_mac_set_wol_event(hw, 0);
2551
2552         if (netif_running(netdev))
2553                 pch_gbe_up(adapter);
2554         netif_device_attach(netdev);
2555
2556         return 0;
2557 }
2558 #endif /* CONFIG_PM */
2559
2560 static void pch_gbe_shutdown(struct pci_dev *pdev)
2561 {
2562         __pch_gbe_suspend(pdev);
2563         if (system_state == SYSTEM_POWER_OFF) {
2564                 pci_wake_from_d3(pdev, true);
2565                 pci_set_power_state(pdev, PCI_D3hot);
2566         }
2567 }
2568
2569 static void pch_gbe_remove(struct pci_dev *pdev)
2570 {
2571         struct net_device *netdev = pci_get_drvdata(pdev);
2572         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2573
2574         cancel_work_sync(&adapter->reset_task);
2575         unregister_netdev(netdev);
2576
2577         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2578
2579         kfree(adapter->tx_ring);
2580         kfree(adapter->rx_ring);
2581
2582         iounmap(adapter->hw.reg);
2583         pci_release_regions(pdev);
2584         free_netdev(netdev);
2585         pci_disable_device(pdev);
2586 }
2587
2588 static int pch_gbe_probe(struct pci_dev *pdev,
2589                           const struct pci_device_id *pci_id)
2590 {
2591         struct net_device *netdev;
2592         struct pch_gbe_adapter *adapter;
2593         int ret;
2594
2595         ret = pci_enable_device(pdev);
2596         if (ret)
2597                 return ret;
2598
2599         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2600                 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2601                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2602                 if (ret) {
2603                         ret = pci_set_consistent_dma_mask(pdev,
2604                                                           DMA_BIT_MASK(32));
2605                         if (ret) {
2606                                 dev_err(&pdev->dev, "ERR: No usable DMA "
2607                                         "configuration, aborting\n");
2608                                 goto err_disable_device;
2609                         }
2610                 }
2611         }
2612
2613         ret = pci_request_regions(pdev, KBUILD_MODNAME);
2614         if (ret) {
2615                 dev_err(&pdev->dev,
2616                         "ERR: Can't reserve PCI I/O and memory resources\n");
2617                 goto err_disable_device;
2618         }
2619         pci_set_master(pdev);
2620
2621         netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2622         if (!netdev) {
2623                 ret = -ENOMEM;
2624                 goto err_release_pci;
2625         }
2626         SET_NETDEV_DEV(netdev, &pdev->dev);
2627
2628         pci_set_drvdata(pdev, netdev);
2629         adapter = netdev_priv(netdev);
2630         adapter->netdev = netdev;
2631         adapter->pdev = pdev;
2632         adapter->hw.back = adapter;
2633         adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2634         if (!adapter->hw.reg) {
2635                 ret = -EIO;
2636                 dev_err(&pdev->dev, "Can't ioremap\n");
2637                 goto err_free_netdev;
2638         }
2639
2640 #ifdef CONFIG_PCH_PTP
2641         adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2642                                                PCI_DEVFN(12, 4));
2643         if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2644                 pr_err("Bad ptp filter\n");
2645                 return -EINVAL;
2646         }
2647 #endif
2648
2649         netdev->netdev_ops = &pch_gbe_netdev_ops;
2650         netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2651         netif_napi_add(netdev, &adapter->napi,
2652                        pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2653         netdev->hw_features = NETIF_F_RXCSUM |
2654                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2655         netdev->features = netdev->hw_features;
2656         pch_gbe_set_ethtool_ops(netdev);
2657
2658         pch_gbe_mac_load_mac_addr(&adapter->hw);
2659         pch_gbe_mac_reset_hw(&adapter->hw);
2660
2661         /* setup the private structure */
2662         ret = pch_gbe_sw_init(adapter);
2663         if (ret)
2664                 goto err_iounmap;
2665
2666         /* Initialize PHY */
2667         ret = pch_gbe_init_phy(adapter);
2668         if (ret) {
2669                 dev_err(&pdev->dev, "PHY initialize error\n");
2670                 goto err_free_adapter;
2671         }
2672         pch_gbe_hal_get_bus_info(&adapter->hw);
2673
2674         /* Read the MAC address. and store to the private data */
2675         ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2676         if (ret) {
2677                 dev_err(&pdev->dev, "MAC address Read Error\n");
2678                 goto err_free_adapter;
2679         }
2680
2681         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2682         if (!is_valid_ether_addr(netdev->dev_addr)) {
2683                 /*
2684                  * If the MAC is invalid (or just missing), display a warning
2685                  * but do not abort setting up the device. pch_gbe_up will
2686                  * prevent the interface from being brought up until a valid MAC
2687                  * is set.
2688                  */
2689                 dev_err(&pdev->dev, "Invalid MAC address, "
2690                                     "interface disabled.\n");
2691         }
2692         setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2693                     (unsigned long)adapter);
2694
2695         INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2696
2697         pch_gbe_check_options(adapter);
2698
2699         /* initialize the wol settings based on the eeprom settings */
2700         adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2701         dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2702
2703         /* reset the hardware with the new settings */
2704         pch_gbe_reset(adapter);
2705
2706         ret = register_netdev(netdev);
2707         if (ret)
2708                 goto err_free_adapter;
2709         /* tell the stack to leave us alone until pch_gbe_open() is called */
2710         netif_carrier_off(netdev);
2711         netif_stop_queue(netdev);
2712
2713         dev_dbg(&pdev->dev, "PCH Network Connection\n");
2714
2715         device_set_wakeup_enable(&pdev->dev, 1);
2716         return 0;
2717
2718 err_free_adapter:
2719         pch_gbe_hal_phy_hw_reset(&adapter->hw);
2720         kfree(adapter->tx_ring);
2721         kfree(adapter->rx_ring);
2722 err_iounmap:
2723         iounmap(adapter->hw.reg);
2724 err_free_netdev:
2725         free_netdev(netdev);
2726 err_release_pci:
2727         pci_release_regions(pdev);
2728 err_disable_device:
2729         pci_disable_device(pdev);
2730         return ret;
2731 }
2732
2733 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2734         {.vendor = PCI_VENDOR_ID_INTEL,
2735          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2736          .subvendor = PCI_ANY_ID,
2737          .subdevice = PCI_ANY_ID,
2738          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2739          .class_mask = (0xFFFF00)
2740          },
2741         {.vendor = PCI_VENDOR_ID_ROHM,
2742          .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2743          .subvendor = PCI_ANY_ID,
2744          .subdevice = PCI_ANY_ID,
2745          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2746          .class_mask = (0xFFFF00)
2747          },
2748         {.vendor = PCI_VENDOR_ID_ROHM,
2749          .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2750          .subvendor = PCI_ANY_ID,
2751          .subdevice = PCI_ANY_ID,
2752          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2753          .class_mask = (0xFFFF00)
2754          },
2755         /* required last entry */
2756         {0}
2757 };
2758
2759 #ifdef CONFIG_PM
2760 static const struct dev_pm_ops pch_gbe_pm_ops = {
2761         .suspend = pch_gbe_suspend,
2762         .resume = pch_gbe_resume,
2763         .freeze = pch_gbe_suspend,
2764         .thaw = pch_gbe_resume,
2765         .poweroff = pch_gbe_suspend,
2766         .restore = pch_gbe_resume,
2767 };
2768 #endif
2769
2770 static struct pci_error_handlers pch_gbe_err_handler = {
2771         .error_detected = pch_gbe_io_error_detected,
2772         .slot_reset = pch_gbe_io_slot_reset,
2773         .resume = pch_gbe_io_resume
2774 };
2775
2776 static struct pci_driver pch_gbe_driver = {
2777         .name = KBUILD_MODNAME,
2778         .id_table = pch_gbe_pcidev_id,
2779         .probe = pch_gbe_probe,
2780         .remove = pch_gbe_remove,
2781 #ifdef CONFIG_PM
2782         .driver.pm = &pch_gbe_pm_ops,
2783 #endif
2784         .shutdown = pch_gbe_shutdown,
2785         .err_handler = &pch_gbe_err_handler
2786 };
2787
2788
2789 static int __init pch_gbe_init_module(void)
2790 {
2791         int ret;
2792
2793         ret = pci_register_driver(&pch_gbe_driver);
2794         if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2795                 if (copybreak == 0) {
2796                         pr_info("copybreak disabled\n");
2797                 } else {
2798                         pr_info("copybreak enabled for packets <= %u bytes\n",
2799                                 copybreak);
2800                 }
2801         }
2802         return ret;
2803 }
2804
2805 static void __exit pch_gbe_exit_module(void)
2806 {
2807         pci_unregister_driver(&pch_gbe_driver);
2808 }
2809
2810 module_init(pch_gbe_init_module);
2811 module_exit(pch_gbe_exit_module);
2812
2813 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2814 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2815 MODULE_LICENSE("GPL");
2816 MODULE_VERSION(DRV_VERSION);
2817 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2818
2819 module_param(copybreak, uint, 0644);
2820 MODULE_PARM_DESC(copybreak,
2821         "Maximum size of packet that is copied to a new buffer on receive");
2822
2823 /* pch_gbe_main.c */