37caa8885c2a02c78cbfd829e5d697237668a09a
[linux-flexiantxendom0-3.2.10.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139                                struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                struct e1000_rx_ring *rx_ring,
143                                int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145                                      struct e1000_rx_ring *rx_ring,
146                                      int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148                                    struct e1000_rx_ring *rx_ring,
149                                    int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151                                          struct e1000_rx_ring *rx_ring,
152                                          int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155                            int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162                                        struct sk_buff *skb);
163
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166                             netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168                                      bool filter_on);
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
173 #ifdef CONFIG_PM
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
176 #endif
177 static void e1000_shutdown(struct pci_dev *pdev);
178
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
182 #endif
183
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188         "Maximum size of packet that is copied to a new buffer on receive");
189
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191                      pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
194
195 static struct pci_error_handlers e1000_err_handler = {
196         .error_detected = e1000_io_error_detected,
197         .slot_reset = e1000_io_slot_reset,
198         .resume = e1000_io_resume,
199 };
200
201 static struct pci_driver e1000_driver = {
202         .name     = e1000_driver_name,
203         .id_table = e1000_pci_tbl,
204         .probe    = e1000_probe,
205         .remove   = __devexit_p(e1000_remove),
206 #ifdef CONFIG_PM
207         /* Power Management Hooks */
208         .suspend  = e1000_suspend,
209         .resume   = e1000_resume,
210 #endif
211         .shutdown = e1000_shutdown,
212         .err_handler = &e1000_err_handler
213 };
214
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
219
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225 /**
226  * e1000_get_hw_dev - return device
227  * used by hardware layer to print debugging information
228  *
229  **/
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231 {
232         struct e1000_adapter *adapter = hw->back;
233         return adapter->netdev;
234 }
235
236 /**
237  * e1000_init_module - Driver Registration Routine
238  *
239  * e1000_init_module is the first routine called when the driver is
240  * loaded. All it does is register with the PCI subsystem.
241  **/
242
243 static int __init e1000_init_module(void)
244 {
245         int ret;
246         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
247
248         pr_info("%s\n", e1000_copyright);
249
250         ret = pci_register_driver(&e1000_driver);
251         if (copybreak != COPYBREAK_DEFAULT) {
252                 if (copybreak == 0)
253                         pr_info("copybreak disabled\n");
254                 else
255                         pr_info("copybreak enabled for "
256                                    "packets <= %u bytes\n", copybreak);
257         }
258         return ret;
259 }
260
261 module_init(e1000_init_module);
262
263 /**
264  * e1000_exit_module - Driver Exit Cleanup Routine
265  *
266  * e1000_exit_module is called just before the driver is removed
267  * from memory.
268  **/
269
270 static void __exit e1000_exit_module(void)
271 {
272         pci_unregister_driver(&e1000_driver);
273 }
274
275 module_exit(e1000_exit_module);
276
277 static int e1000_request_irq(struct e1000_adapter *adapter)
278 {
279         struct net_device *netdev = adapter->netdev;
280         irq_handler_t handler = e1000_intr;
281         int irq_flags = IRQF_SHARED;
282         int err;
283
284         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285                           netdev);
286         if (err) {
287                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
288         }
289
290         return err;
291 }
292
293 static void e1000_free_irq(struct e1000_adapter *adapter)
294 {
295         struct net_device *netdev = adapter->netdev;
296
297         free_irq(adapter->pdev->irq, netdev);
298 }
299
300 /**
301  * e1000_irq_disable - Mask off interrupt generation on the NIC
302  * @adapter: board private structure
303  **/
304
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
306 {
307         struct e1000_hw *hw = &adapter->hw;
308
309         ew32(IMC, ~0);
310         E1000_WRITE_FLUSH();
311         synchronize_irq(adapter->pdev->irq);
312 }
313
314 /**
315  * e1000_irq_enable - Enable default interrupt generation settings
316  * @adapter: board private structure
317  **/
318
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
320 {
321         struct e1000_hw *hw = &adapter->hw;
322
323         ew32(IMS, IMS_ENABLE_MASK);
324         E1000_WRITE_FLUSH();
325 }
326
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
328 {
329         struct e1000_hw *hw = &adapter->hw;
330         struct net_device *netdev = adapter->netdev;
331         u16 vid = hw->mng_cookie.vlan_id;
332         u16 old_vid = adapter->mng_vlan_id;
333
334         if (!e1000_vlan_used(adapter))
335                 return;
336
337         if (!test_bit(vid, adapter->active_vlans)) {
338                 if (hw->mng_cookie.status &
339                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340                         e1000_vlan_rx_add_vid(netdev, vid);
341                         adapter->mng_vlan_id = vid;
342                 } else {
343                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344                 }
345                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346                     (vid != old_vid) &&
347                     !test_bit(old_vid, adapter->active_vlans))
348                         e1000_vlan_rx_kill_vid(netdev, old_vid);
349         } else {
350                 adapter->mng_vlan_id = vid;
351         }
352 }
353
354 static void e1000_init_manageability(struct e1000_adapter *adapter)
355 {
356         struct e1000_hw *hw = &adapter->hw;
357
358         if (adapter->en_mng_pt) {
359                 u32 manc = er32(MANC);
360
361                 /* disable hardware interception of ARP */
362                 manc &= ~(E1000_MANC_ARP_EN);
363
364                 ew32(MANC, manc);
365         }
366 }
367
368 static void e1000_release_manageability(struct e1000_adapter *adapter)
369 {
370         struct e1000_hw *hw = &adapter->hw;
371
372         if (adapter->en_mng_pt) {
373                 u32 manc = er32(MANC);
374
375                 /* re-enable hardware interception of ARP */
376                 manc |= E1000_MANC_ARP_EN;
377
378                 ew32(MANC, manc);
379         }
380 }
381
382 /**
383  * e1000_configure - configure the hardware for RX and TX
384  * @adapter = private board structure
385  **/
386 static void e1000_configure(struct e1000_adapter *adapter)
387 {
388         struct net_device *netdev = adapter->netdev;
389         int i;
390
391         e1000_set_rx_mode(netdev);
392
393         e1000_restore_vlan(adapter);
394         e1000_init_manageability(adapter);
395
396         e1000_configure_tx(adapter);
397         e1000_setup_rctl(adapter);
398         e1000_configure_rx(adapter);
399         /* call E1000_DESC_UNUSED which always leaves
400          * at least 1 descriptor unused to make sure
401          * next_to_use != next_to_clean */
402         for (i = 0; i < adapter->num_rx_queues; i++) {
403                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404                 adapter->alloc_rx_buf(adapter, ring,
405                                       E1000_DESC_UNUSED(ring));
406         }
407 }
408
409 int e1000_up(struct e1000_adapter *adapter)
410 {
411         struct e1000_hw *hw = &adapter->hw;
412
413         /* hardware has been reset, we need to reload some things */
414         e1000_configure(adapter);
415
416         clear_bit(__E1000_DOWN, &adapter->flags);
417
418         napi_enable(&adapter->napi);
419
420         e1000_irq_enable(adapter);
421
422         netif_wake_queue(adapter->netdev);
423
424         /* fire a link change interrupt to start the watchdog */
425         ew32(ICS, E1000_ICS_LSC);
426         return 0;
427 }
428
429 /**
430  * e1000_power_up_phy - restore link in case the phy was powered down
431  * @adapter: address of board private structure
432  *
433  * The phy may be powered down to save power and turn off link when the
434  * driver is unloaded and wake on lan is not enabled (among others)
435  * *** this routine MUST be followed by a call to e1000_reset ***
436  *
437  **/
438
439 void e1000_power_up_phy(struct e1000_adapter *adapter)
440 {
441         struct e1000_hw *hw = &adapter->hw;
442         u16 mii_reg = 0;
443
444         /* Just clear the power down bit to wake the phy back up */
445         if (hw->media_type == e1000_media_type_copper) {
446                 /* according to the manual, the phy will retain its
447                  * settings across a power-down/up cycle */
448                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449                 mii_reg &= ~MII_CR_POWER_DOWN;
450                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
451         }
452 }
453
454 static void e1000_power_down_phy(struct e1000_adapter *adapter)
455 {
456         struct e1000_hw *hw = &adapter->hw;
457
458         /* Power down the PHY so no link is implied when interface is down *
459          * The PHY cannot be powered down if any of the following is true *
460          * (a) WoL is enabled
461          * (b) AMT is active
462          * (c) SoL/IDER session is active */
463         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464            hw->media_type == e1000_media_type_copper) {
465                 u16 mii_reg = 0;
466
467                 switch (hw->mac_type) {
468                 case e1000_82540:
469                 case e1000_82545:
470                 case e1000_82545_rev_3:
471                 case e1000_82546:
472                 case e1000_ce4100:
473                 case e1000_82546_rev_3:
474                 case e1000_82541:
475                 case e1000_82541_rev_2:
476                 case e1000_82547:
477                 case e1000_82547_rev_2:
478                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
479                                 goto out;
480                         break;
481                 default:
482                         goto out;
483                 }
484                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485                 mii_reg |= MII_CR_POWER_DOWN;
486                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
487                 msleep(1);
488         }
489 out:
490         return;
491 }
492
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
494 {
495         set_bit(__E1000_DOWN, &adapter->flags);
496         cancel_work_sync(&adapter->reset_task);
497         cancel_delayed_work_sync(&adapter->watchdog_task);
498         cancel_delayed_work_sync(&adapter->phy_info_task);
499         cancel_delayed_work_sync(&adapter->fifo_stall_task);
500 }
501
502 void e1000_down(struct e1000_adapter *adapter)
503 {
504         struct e1000_hw *hw = &adapter->hw;
505         struct net_device *netdev = adapter->netdev;
506         u32 rctl, tctl;
507
508
509         /* disable receives in the hardware */
510         rctl = er32(RCTL);
511         ew32(RCTL, rctl & ~E1000_RCTL_EN);
512         /* flush and sleep below */
513
514         netif_tx_disable(netdev);
515
516         /* disable transmits in the hardware */
517         tctl = er32(TCTL);
518         tctl &= ~E1000_TCTL_EN;
519         ew32(TCTL, tctl);
520         /* flush both disables and wait for them to finish */
521         E1000_WRITE_FLUSH();
522         msleep(10);
523
524         napi_disable(&adapter->napi);
525
526         e1000_irq_disable(adapter);
527
528         /*
529          * Setting DOWN must be after irq_disable to prevent
530          * a screaming interrupt.  Setting DOWN also prevents
531          * tasks from rescheduling.
532          */
533         e1000_down_and_stop(adapter);
534
535         adapter->link_speed = 0;
536         adapter->link_duplex = 0;
537         netif_carrier_off(netdev);
538
539         e1000_reset(adapter);
540         e1000_clean_all_tx_rings(adapter);
541         e1000_clean_all_rx_rings(adapter);
542 }
543
544 static void e1000_reinit_safe(struct e1000_adapter *adapter)
545 {
546         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
547                 msleep(1);
548         mutex_lock(&adapter->mutex);
549         e1000_down(adapter);
550         e1000_up(adapter);
551         mutex_unlock(&adapter->mutex);
552         clear_bit(__E1000_RESETTING, &adapter->flags);
553 }
554
555 void e1000_reinit_locked(struct e1000_adapter *adapter)
556 {
557         /* if rtnl_lock is not held the call path is bogus */
558         ASSERT_RTNL();
559         WARN_ON(in_interrupt());
560         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
561                 msleep(1);
562         e1000_down(adapter);
563         e1000_up(adapter);
564         clear_bit(__E1000_RESETTING, &adapter->flags);
565 }
566
567 void e1000_reset(struct e1000_adapter *adapter)
568 {
569         struct e1000_hw *hw = &adapter->hw;
570         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
571         bool legacy_pba_adjust = false;
572         u16 hwm;
573
574         /* Repartition Pba for greater than 9k mtu
575          * To take effect CTRL.RST is required.
576          */
577
578         switch (hw->mac_type) {
579         case e1000_82542_rev2_0:
580         case e1000_82542_rev2_1:
581         case e1000_82543:
582         case e1000_82544:
583         case e1000_82540:
584         case e1000_82541:
585         case e1000_82541_rev_2:
586                 legacy_pba_adjust = true;
587                 pba = E1000_PBA_48K;
588                 break;
589         case e1000_82545:
590         case e1000_82545_rev_3:
591         case e1000_82546:
592         case e1000_ce4100:
593         case e1000_82546_rev_3:
594                 pba = E1000_PBA_48K;
595                 break;
596         case e1000_82547:
597         case e1000_82547_rev_2:
598                 legacy_pba_adjust = true;
599                 pba = E1000_PBA_30K;
600                 break;
601         case e1000_undefined:
602         case e1000_num_macs:
603                 break;
604         }
605
606         if (legacy_pba_adjust) {
607                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
608                         pba -= 8; /* allocate more FIFO for Tx */
609
610                 if (hw->mac_type == e1000_82547) {
611                         adapter->tx_fifo_head = 0;
612                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
613                         adapter->tx_fifo_size =
614                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
615                         atomic_set(&adapter->tx_fifo_stall, 0);
616                 }
617         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
618                 /* adjust PBA for jumbo frames */
619                 ew32(PBA, pba);
620
621                 /* To maintain wire speed transmits, the Tx FIFO should be
622                  * large enough to accommodate two full transmit packets,
623                  * rounded up to the next 1KB and expressed in KB.  Likewise,
624                  * the Rx FIFO should be large enough to accommodate at least
625                  * one full receive packet and is similarly rounded up and
626                  * expressed in KB. */
627                 pba = er32(PBA);
628                 /* upper 16 bits has Tx packet buffer allocation size in KB */
629                 tx_space = pba >> 16;
630                 /* lower 16 bits has Rx packet buffer allocation size in KB */
631                 pba &= 0xffff;
632                 /*
633                  * the tx fifo also stores 16 bytes of information about the tx
634                  * but don't include ethernet FCS because hardware appends it
635                  */
636                 min_tx_space = (hw->max_frame_size +
637                                 sizeof(struct e1000_tx_desc) -
638                                 ETH_FCS_LEN) * 2;
639                 min_tx_space = ALIGN(min_tx_space, 1024);
640                 min_tx_space >>= 10;
641                 /* software strips receive CRC, so leave room for it */
642                 min_rx_space = hw->max_frame_size;
643                 min_rx_space = ALIGN(min_rx_space, 1024);
644                 min_rx_space >>= 10;
645
646                 /* If current Tx allocation is less than the min Tx FIFO size,
647                  * and the min Tx FIFO size is less than the current Rx FIFO
648                  * allocation, take space away from current Rx allocation */
649                 if (tx_space < min_tx_space &&
650                     ((min_tx_space - tx_space) < pba)) {
651                         pba = pba - (min_tx_space - tx_space);
652
653                         /* PCI/PCIx hardware has PBA alignment constraints */
654                         switch (hw->mac_type) {
655                         case e1000_82545 ... e1000_82546_rev_3:
656                                 pba &= ~(E1000_PBA_8K - 1);
657                                 break;
658                         default:
659                                 break;
660                         }
661
662                         /* if short on rx space, rx wins and must trump tx
663                          * adjustment or use Early Receive if available */
664                         if (pba < min_rx_space)
665                                 pba = min_rx_space;
666                 }
667         }
668
669         ew32(PBA, pba);
670
671         /*
672          * flow control settings:
673          * The high water mark must be low enough to fit one full frame
674          * (or the size used for early receive) above it in the Rx FIFO.
675          * Set it to the lower of:
676          * - 90% of the Rx FIFO size, and
677          * - the full Rx FIFO size minus the early receive size (for parts
678          *   with ERT support assuming ERT set to E1000_ERT_2048), or
679          * - the full Rx FIFO size minus one full frame
680          */
681         hwm = min(((pba << 10) * 9 / 10),
682                   ((pba << 10) - hw->max_frame_size));
683
684         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
685         hw->fc_low_water = hw->fc_high_water - 8;
686         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
687         hw->fc_send_xon = 1;
688         hw->fc = hw->original_fc;
689
690         /* Allow time for pending master requests to run */
691         e1000_reset_hw(hw);
692         if (hw->mac_type >= e1000_82544)
693                 ew32(WUC, 0);
694
695         if (e1000_init_hw(hw))
696                 e_dev_err("Hardware Error\n");
697         e1000_update_mng_vlan(adapter);
698
699         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
700         if (hw->mac_type >= e1000_82544 &&
701             hw->autoneg == 1 &&
702             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
703                 u32 ctrl = er32(CTRL);
704                 /* clear phy power management bit if we are in gig only mode,
705                  * which if enabled will attempt negotiation to 100Mb, which
706                  * can cause a loss of link at power off or driver unload */
707                 ctrl &= ~E1000_CTRL_SWDPIN3;
708                 ew32(CTRL, ctrl);
709         }
710
711         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
712         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
713
714         e1000_reset_adaptive(hw);
715         e1000_phy_get_info(hw, &adapter->phy_info);
716
717         e1000_release_manageability(adapter);
718 }
719
720 /**
721  *  Dump the eeprom for users having checksum issues
722  **/
723 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
724 {
725         struct net_device *netdev = adapter->netdev;
726         struct ethtool_eeprom eeprom;
727         const struct ethtool_ops *ops = netdev->ethtool_ops;
728         u8 *data;
729         int i;
730         u16 csum_old, csum_new = 0;
731
732         eeprom.len = ops->get_eeprom_len(netdev);
733         eeprom.offset = 0;
734
735         data = kmalloc(eeprom.len, GFP_KERNEL);
736         if (!data)
737                 return;
738
739         ops->get_eeprom(netdev, &eeprom, data);
740
741         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
742                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
743         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
744                 csum_new += data[i] + (data[i + 1] << 8);
745         csum_new = EEPROM_SUM - csum_new;
746
747         pr_err("/*********************/\n");
748         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
749         pr_err("Calculated              : 0x%04x\n", csum_new);
750
751         pr_err("Offset    Values\n");
752         pr_err("========  ======\n");
753         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
754
755         pr_err("Include this output when contacting your support provider.\n");
756         pr_err("This is not a software error! Something bad happened to\n");
757         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
758         pr_err("result in further problems, possibly loss of data,\n");
759         pr_err("corruption or system hangs!\n");
760         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
761         pr_err("which is invalid and requires you to set the proper MAC\n");
762         pr_err("address manually before continuing to enable this network\n");
763         pr_err("device. Please inspect the EEPROM dump and report the\n");
764         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
765         pr_err("/*********************/\n");
766
767         kfree(data);
768 }
769
770 /**
771  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
772  * @pdev: PCI device information struct
773  *
774  * Return true if an adapter needs ioport resources
775  **/
776 static int e1000_is_need_ioport(struct pci_dev *pdev)
777 {
778         switch (pdev->device) {
779         case E1000_DEV_ID_82540EM:
780         case E1000_DEV_ID_82540EM_LOM:
781         case E1000_DEV_ID_82540EP:
782         case E1000_DEV_ID_82540EP_LOM:
783         case E1000_DEV_ID_82540EP_LP:
784         case E1000_DEV_ID_82541EI:
785         case E1000_DEV_ID_82541EI_MOBILE:
786         case E1000_DEV_ID_82541ER:
787         case E1000_DEV_ID_82541ER_LOM:
788         case E1000_DEV_ID_82541GI:
789         case E1000_DEV_ID_82541GI_LF:
790         case E1000_DEV_ID_82541GI_MOBILE:
791         case E1000_DEV_ID_82544EI_COPPER:
792         case E1000_DEV_ID_82544EI_FIBER:
793         case E1000_DEV_ID_82544GC_COPPER:
794         case E1000_DEV_ID_82544GC_LOM:
795         case E1000_DEV_ID_82545EM_COPPER:
796         case E1000_DEV_ID_82545EM_FIBER:
797         case E1000_DEV_ID_82546EB_COPPER:
798         case E1000_DEV_ID_82546EB_FIBER:
799         case E1000_DEV_ID_82546EB_QUAD_COPPER:
800                 return true;
801         default:
802                 return false;
803         }
804 }
805
806 static netdev_features_t e1000_fix_features(struct net_device *netdev,
807         netdev_features_t features)
808 {
809         /*
810          * Since there is no support for separate rx/tx vlan accel
811          * enable/disable make sure tx flag is always in same state as rx.
812          */
813         if (features & NETIF_F_HW_VLAN_RX)
814                 features |= NETIF_F_HW_VLAN_TX;
815         else
816                 features &= ~NETIF_F_HW_VLAN_TX;
817
818         return features;
819 }
820
821 static int e1000_set_features(struct net_device *netdev,
822         netdev_features_t features)
823 {
824         struct e1000_adapter *adapter = netdev_priv(netdev);
825         netdev_features_t changed = features ^ netdev->features;
826
827         if (changed & NETIF_F_HW_VLAN_RX)
828                 e1000_vlan_mode(netdev, features);
829
830         if (!(changed & NETIF_F_RXCSUM))
831                 return 0;
832
833         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
834
835         if (netif_running(netdev))
836                 e1000_reinit_locked(adapter);
837         else
838                 e1000_reset(adapter);
839
840         return 0;
841 }
842
843 static const struct net_device_ops e1000_netdev_ops = {
844         .ndo_open               = e1000_open,
845         .ndo_stop               = e1000_close,
846         .ndo_start_xmit         = e1000_xmit_frame,
847         .ndo_get_stats          = e1000_get_stats,
848         .ndo_set_rx_mode        = e1000_set_rx_mode,
849         .ndo_set_mac_address    = e1000_set_mac,
850         .ndo_tx_timeout         = e1000_tx_timeout,
851         .ndo_change_mtu         = e1000_change_mtu,
852         .ndo_do_ioctl           = e1000_ioctl,
853         .ndo_validate_addr      = eth_validate_addr,
854         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
855         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
856 #ifdef CONFIG_NET_POLL_CONTROLLER
857         .ndo_poll_controller    = e1000_netpoll,
858 #endif
859         .ndo_fix_features       = e1000_fix_features,
860         .ndo_set_features       = e1000_set_features,
861 };
862
863 /**
864  * e1000_init_hw_struct - initialize members of hw struct
865  * @adapter: board private struct
866  * @hw: structure used by e1000_hw.c
867  *
868  * Factors out initialization of the e1000_hw struct to its own function
869  * that can be called very early at init (just after struct allocation).
870  * Fields are initialized based on PCI device information and
871  * OS network device settings (MTU size).
872  * Returns negative error codes if MAC type setup fails.
873  */
874 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
875                                 struct e1000_hw *hw)
876 {
877         struct pci_dev *pdev = adapter->pdev;
878
879         /* PCI config space info */
880         hw->vendor_id = pdev->vendor;
881         hw->device_id = pdev->device;
882         hw->subsystem_vendor_id = pdev->subsystem_vendor;
883         hw->subsystem_id = pdev->subsystem_device;
884         hw->revision_id = pdev->revision;
885
886         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
887
888         hw->max_frame_size = adapter->netdev->mtu +
889                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
890         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
891
892         /* identify the MAC */
893         if (e1000_set_mac_type(hw)) {
894                 e_err(probe, "Unknown MAC Type\n");
895                 return -EIO;
896         }
897
898         switch (hw->mac_type) {
899         default:
900                 break;
901         case e1000_82541:
902         case e1000_82547:
903         case e1000_82541_rev_2:
904         case e1000_82547_rev_2:
905                 hw->phy_init_script = 1;
906                 break;
907         }
908
909         e1000_set_media_type(hw);
910         e1000_get_bus_info(hw);
911
912         hw->wait_autoneg_complete = false;
913         hw->tbi_compatibility_en = true;
914         hw->adaptive_ifs = true;
915
916         /* Copper options */
917
918         if (hw->media_type == e1000_media_type_copper) {
919                 hw->mdix = AUTO_ALL_MODES;
920                 hw->disable_polarity_correction = false;
921                 hw->master_slave = E1000_MASTER_SLAVE;
922         }
923
924         return 0;
925 }
926
927 /**
928  * e1000_probe - Device Initialization Routine
929  * @pdev: PCI device information struct
930  * @ent: entry in e1000_pci_tbl
931  *
932  * Returns 0 on success, negative on failure
933  *
934  * e1000_probe initializes an adapter identified by a pci_dev structure.
935  * The OS initialization, configuring of the adapter private structure,
936  * and a hardware reset occur.
937  **/
938 static int __devinit e1000_probe(struct pci_dev *pdev,
939                                  const struct pci_device_id *ent)
940 {
941         struct net_device *netdev;
942         struct e1000_adapter *adapter;
943         struct e1000_hw *hw;
944
945         static int cards_found = 0;
946         static int global_quad_port_a = 0; /* global ksp3 port a indication */
947         int i, err, pci_using_dac;
948         u16 eeprom_data = 0;
949         u16 tmp = 0;
950         u16 eeprom_apme_mask = E1000_EEPROM_APME;
951         int bars, need_ioport;
952
953         /* do not allocate ioport bars when not needed */
954         need_ioport = e1000_is_need_ioport(pdev);
955         if (need_ioport) {
956                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
957                 err = pci_enable_device(pdev);
958         } else {
959                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
960                 err = pci_enable_device_mem(pdev);
961         }
962         if (err)
963                 return err;
964
965         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
966         if (err)
967                 goto err_pci_reg;
968
969         pci_set_master(pdev);
970         err = pci_save_state(pdev);
971         if (err)
972                 goto err_alloc_etherdev;
973
974         err = -ENOMEM;
975         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
976         if (!netdev)
977                 goto err_alloc_etherdev;
978
979         SET_NETDEV_DEV(netdev, &pdev->dev);
980
981         pci_set_drvdata(pdev, netdev);
982         adapter = netdev_priv(netdev);
983         adapter->netdev = netdev;
984         adapter->pdev = pdev;
985         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
986         adapter->bars = bars;
987         adapter->need_ioport = need_ioport;
988
989         hw = &adapter->hw;
990         hw->back = adapter;
991
992         err = -EIO;
993         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
994         if (!hw->hw_addr)
995                 goto err_ioremap;
996
997         if (adapter->need_ioport) {
998                 for (i = BAR_1; i <= BAR_5; i++) {
999                         if (pci_resource_len(pdev, i) == 0)
1000                                 continue;
1001                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1002                                 hw->io_base = pci_resource_start(pdev, i);
1003                                 break;
1004                         }
1005                 }
1006         }
1007
1008         /* make ready for any if (hw->...) below */
1009         err = e1000_init_hw_struct(adapter, hw);
1010         if (err)
1011                 goto err_sw_init;
1012
1013         /*
1014          * there is a workaround being applied below that limits
1015          * 64-bit DMA addresses to 64-bit hardware.  There are some
1016          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1017          */
1018         pci_using_dac = 0;
1019         if ((hw->bus_type == e1000_bus_type_pcix) &&
1020             !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1021                 /*
1022                  * according to DMA-API-HOWTO, coherent calls will always
1023                  * succeed if the set call did
1024                  */
1025                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1026                 pci_using_dac = 1;
1027         } else {
1028                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1029                 if (err) {
1030                         pr_err("No usable DMA config, aborting\n");
1031                         goto err_dma;
1032                 }
1033                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1034         }
1035
1036         netdev->netdev_ops = &e1000_netdev_ops;
1037         e1000_set_ethtool_ops(netdev);
1038         netdev->watchdog_timeo = 5 * HZ;
1039         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1040
1041         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1042
1043         adapter->bd_number = cards_found;
1044
1045         /* setup the private structure */
1046
1047         err = e1000_sw_init(adapter);
1048         if (err)
1049                 goto err_sw_init;
1050
1051         err = -EIO;
1052         if (hw->mac_type == e1000_ce4100) {
1053                 hw->ce4100_gbe_mdio_base_virt =
1054                                         ioremap(pci_resource_start(pdev, BAR_1),
1055                                                 pci_resource_len(pdev, BAR_1));
1056
1057                 if (!hw->ce4100_gbe_mdio_base_virt)
1058                         goto err_mdio_ioremap;
1059         }
1060
1061         if (hw->mac_type >= e1000_82543) {
1062                 netdev->hw_features = NETIF_F_SG |
1063                                    NETIF_F_HW_CSUM |
1064                                    NETIF_F_HW_VLAN_RX;
1065                 netdev->features = NETIF_F_HW_VLAN_TX |
1066                                    NETIF_F_HW_VLAN_FILTER;
1067         }
1068
1069         if ((hw->mac_type >= e1000_82544) &&
1070            (hw->mac_type != e1000_82547))
1071                 netdev->hw_features |= NETIF_F_TSO;
1072
1073         netdev->priv_flags |= IFF_SUPP_NOFCS;
1074
1075         netdev->features |= netdev->hw_features;
1076         netdev->hw_features |= NETIF_F_RXCSUM;
1077         netdev->hw_features |= NETIF_F_RXFCS;
1078
1079         if (pci_using_dac) {
1080                 netdev->features |= NETIF_F_HIGHDMA;
1081                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1082         }
1083
1084         netdev->vlan_features |= NETIF_F_TSO;
1085         netdev->vlan_features |= NETIF_F_HW_CSUM;
1086         netdev->vlan_features |= NETIF_F_SG;
1087
1088         netdev->priv_flags |= IFF_UNICAST_FLT;
1089
1090         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1091
1092         /* initialize eeprom parameters */
1093         if (e1000_init_eeprom_params(hw)) {
1094                 e_err(probe, "EEPROM initialization failed\n");
1095                 goto err_eeprom;
1096         }
1097
1098         /* before reading the EEPROM, reset the controller to
1099          * put the device in a known good starting state */
1100
1101         e1000_reset_hw(hw);
1102
1103         /* make sure the EEPROM is good */
1104         if (e1000_validate_eeprom_checksum(hw) < 0) {
1105                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1106                 e1000_dump_eeprom(adapter);
1107                 /*
1108                  * set MAC address to all zeroes to invalidate and temporary
1109                  * disable this device for the user. This blocks regular
1110                  * traffic while still permitting ethtool ioctls from reaching
1111                  * the hardware as well as allowing the user to run the
1112                  * interface after manually setting a hw addr using
1113                  * `ip set address`
1114                  */
1115                 memset(hw->mac_addr, 0, netdev->addr_len);
1116         } else {
1117                 /* copy the MAC address out of the EEPROM */
1118                 if (e1000_read_mac_addr(hw))
1119                         e_err(probe, "EEPROM Read Error\n");
1120         }
1121         /* don't block initalization here due to bad MAC address */
1122         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1123         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1124
1125         if (!is_valid_ether_addr(netdev->perm_addr))
1126                 e_err(probe, "Invalid MAC Address\n");
1127
1128
1129         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1130         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1131                           e1000_82547_tx_fifo_stall_task);
1132         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1133         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1134
1135         e1000_check_options(adapter);
1136
1137         /* Initial Wake on LAN setting
1138          * If APM wake is enabled in the EEPROM,
1139          * enable the ACPI Magic Packet filter
1140          */
1141
1142         switch (hw->mac_type) {
1143         case e1000_82542_rev2_0:
1144         case e1000_82542_rev2_1:
1145         case e1000_82543:
1146                 break;
1147         case e1000_82544:
1148                 e1000_read_eeprom(hw,
1149                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1150                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1151                 break;
1152         case e1000_82546:
1153         case e1000_82546_rev_3:
1154                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1155                         e1000_read_eeprom(hw,
1156                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1157                         break;
1158                 }
1159                 /* Fall Through */
1160         default:
1161                 e1000_read_eeprom(hw,
1162                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1163                 break;
1164         }
1165         if (eeprom_data & eeprom_apme_mask)
1166                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1167
1168         /* now that we have the eeprom settings, apply the special cases
1169          * where the eeprom may be wrong or the board simply won't support
1170          * wake on lan on a particular port */
1171         switch (pdev->device) {
1172         case E1000_DEV_ID_82546GB_PCIE:
1173                 adapter->eeprom_wol = 0;
1174                 break;
1175         case E1000_DEV_ID_82546EB_FIBER:
1176         case E1000_DEV_ID_82546GB_FIBER:
1177                 /* Wake events only supported on port A for dual fiber
1178                  * regardless of eeprom setting */
1179                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1180                         adapter->eeprom_wol = 0;
1181                 break;
1182         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1183                 /* if quad port adapter, disable WoL on all but port A */
1184                 if (global_quad_port_a != 0)
1185                         adapter->eeprom_wol = 0;
1186                 else
1187                         adapter->quad_port_a = true;
1188                 /* Reset for multiple quad port adapters */
1189                 if (++global_quad_port_a == 4)
1190                         global_quad_port_a = 0;
1191                 break;
1192         }
1193
1194         /* initialize the wol settings based on the eeprom settings */
1195         adapter->wol = adapter->eeprom_wol;
1196         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1197
1198         /* Auto detect PHY address */
1199         if (hw->mac_type == e1000_ce4100) {
1200                 for (i = 0; i < 32; i++) {
1201                         hw->phy_addr = i;
1202                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1203                         if (tmp == 0 || tmp == 0xFF) {
1204                                 if (i == 31)
1205                                         goto err_eeprom;
1206                                 continue;
1207                         } else
1208                                 break;
1209                 }
1210         }
1211
1212         /* reset the hardware with the new settings */
1213         e1000_reset(adapter);
1214
1215         strcpy(netdev->name, "eth%d");
1216         err = register_netdev(netdev);
1217         if (err)
1218                 goto err_register;
1219
1220         e1000_vlan_filter_on_off(adapter, false);
1221
1222         /* print bus type/speed/width info */
1223         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1224                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1225                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1226                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1227                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1228                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1229                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1230                netdev->dev_addr);
1231
1232         /* carrier off reporting is important to ethtool even BEFORE open */
1233         netif_carrier_off(netdev);
1234
1235         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1236
1237         cards_found++;
1238         return 0;
1239
1240 err_register:
1241 err_eeprom:
1242         e1000_phy_hw_reset(hw);
1243
1244         if (hw->flash_address)
1245                 iounmap(hw->flash_address);
1246         kfree(adapter->tx_ring);
1247         kfree(adapter->rx_ring);
1248 err_dma:
1249 err_sw_init:
1250 err_mdio_ioremap:
1251         iounmap(hw->ce4100_gbe_mdio_base_virt);
1252         iounmap(hw->hw_addr);
1253 err_ioremap:
1254         free_netdev(netdev);
1255 err_alloc_etherdev:
1256         pci_release_selected_regions(pdev, bars);
1257 err_pci_reg:
1258         pci_disable_device(pdev);
1259         return err;
1260 }
1261
1262 /**
1263  * e1000_remove - Device Removal Routine
1264  * @pdev: PCI device information struct
1265  *
1266  * e1000_remove is called by the PCI subsystem to alert the driver
1267  * that it should release a PCI device.  The could be caused by a
1268  * Hot-Plug event, or because the driver is going to be removed from
1269  * memory.
1270  **/
1271
1272 static void __devexit e1000_remove(struct pci_dev *pdev)
1273 {
1274         struct net_device *netdev = pci_get_drvdata(pdev);
1275         struct e1000_adapter *adapter = netdev_priv(netdev);
1276         struct e1000_hw *hw = &adapter->hw;
1277
1278         e1000_down_and_stop(adapter);
1279         e1000_release_manageability(adapter);
1280
1281         unregister_netdev(netdev);
1282
1283         e1000_phy_hw_reset(hw);
1284
1285         kfree(adapter->tx_ring);
1286         kfree(adapter->rx_ring);
1287
1288         if (hw->mac_type == e1000_ce4100)
1289                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1290         iounmap(hw->hw_addr);
1291         if (hw->flash_address)
1292                 iounmap(hw->flash_address);
1293         pci_release_selected_regions(pdev, adapter->bars);
1294
1295         free_netdev(netdev);
1296
1297         pci_disable_device(pdev);
1298 }
1299
1300 /**
1301  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1302  * @adapter: board private structure to initialize
1303  *
1304  * e1000_sw_init initializes the Adapter private data structure.
1305  * e1000_init_hw_struct MUST be called before this function
1306  **/
1307
1308 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1309 {
1310         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1311
1312         adapter->num_tx_queues = 1;
1313         adapter->num_rx_queues = 1;
1314
1315         if (e1000_alloc_queues(adapter)) {
1316                 e_err(probe, "Unable to allocate memory for queues\n");
1317                 return -ENOMEM;
1318         }
1319
1320         /* Explicitly disable IRQ since the NIC can be in any state. */
1321         e1000_irq_disable(adapter);
1322
1323         spin_lock_init(&adapter->stats_lock);
1324         mutex_init(&adapter->mutex);
1325
1326         set_bit(__E1000_DOWN, &adapter->flags);
1327
1328         return 0;
1329 }
1330
1331 /**
1332  * e1000_alloc_queues - Allocate memory for all rings
1333  * @adapter: board private structure to initialize
1334  *
1335  * We allocate one ring per queue at run-time since we don't know the
1336  * number of queues at compile-time.
1337  **/
1338
1339 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1340 {
1341         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1342                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1343         if (!adapter->tx_ring)
1344                 return -ENOMEM;
1345
1346         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1347                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1348         if (!adapter->rx_ring) {
1349                 kfree(adapter->tx_ring);
1350                 return -ENOMEM;
1351         }
1352
1353         return E1000_SUCCESS;
1354 }
1355
1356 /**
1357  * e1000_open - Called when a network interface is made active
1358  * @netdev: network interface device structure
1359  *
1360  * Returns 0 on success, negative value on failure
1361  *
1362  * The open entry point is called when a network interface is made
1363  * active by the system (IFF_UP).  At this point all resources needed
1364  * for transmit and receive operations are allocated, the interrupt
1365  * handler is registered with the OS, the watchdog task is started,
1366  * and the stack is notified that the interface is ready.
1367  **/
1368
1369 static int e1000_open(struct net_device *netdev)
1370 {
1371         struct e1000_adapter *adapter = netdev_priv(netdev);
1372         struct e1000_hw *hw = &adapter->hw;
1373         int err;
1374
1375         /* disallow open during test */
1376         if (test_bit(__E1000_TESTING, &adapter->flags))
1377                 return -EBUSY;
1378
1379         netif_carrier_off(netdev);
1380
1381         /* allocate transmit descriptors */
1382         err = e1000_setup_all_tx_resources(adapter);
1383         if (err)
1384                 goto err_setup_tx;
1385
1386         /* allocate receive descriptors */
1387         err = e1000_setup_all_rx_resources(adapter);
1388         if (err)
1389                 goto err_setup_rx;
1390
1391         e1000_power_up_phy(adapter);
1392
1393         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1394         if ((hw->mng_cookie.status &
1395                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1396                 e1000_update_mng_vlan(adapter);
1397         }
1398
1399         /* before we allocate an interrupt, we must be ready to handle it.
1400          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1401          * as soon as we call pci_request_irq, so we have to setup our
1402          * clean_rx handler before we do so.  */
1403         e1000_configure(adapter);
1404
1405         err = e1000_request_irq(adapter);
1406         if (err)
1407                 goto err_req_irq;
1408
1409         /* From here on the code is the same as e1000_up() */
1410         clear_bit(__E1000_DOWN, &adapter->flags);
1411
1412         napi_enable(&adapter->napi);
1413
1414         e1000_irq_enable(adapter);
1415
1416         netif_start_queue(netdev);
1417
1418         /* fire a link status change interrupt to start the watchdog */
1419         ew32(ICS, E1000_ICS_LSC);
1420
1421         return E1000_SUCCESS;
1422
1423 err_req_irq:
1424         e1000_power_down_phy(adapter);
1425         e1000_free_all_rx_resources(adapter);
1426 err_setup_rx:
1427         e1000_free_all_tx_resources(adapter);
1428 err_setup_tx:
1429         e1000_reset(adapter);
1430
1431         return err;
1432 }
1433
1434 /**
1435  * e1000_close - Disables a network interface
1436  * @netdev: network interface device structure
1437  *
1438  * Returns 0, this is not allowed to fail
1439  *
1440  * The close entry point is called when an interface is de-activated
1441  * by the OS.  The hardware is still under the drivers control, but
1442  * needs to be disabled.  A global MAC reset is issued to stop the
1443  * hardware, and all transmit and receive resources are freed.
1444  **/
1445
1446 static int e1000_close(struct net_device *netdev)
1447 {
1448         struct e1000_adapter *adapter = netdev_priv(netdev);
1449         struct e1000_hw *hw = &adapter->hw;
1450
1451         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1452         e1000_down(adapter);
1453         e1000_power_down_phy(adapter);
1454         e1000_free_irq(adapter);
1455
1456         e1000_free_all_tx_resources(adapter);
1457         e1000_free_all_rx_resources(adapter);
1458
1459         /* kill manageability vlan ID if supported, but not if a vlan with
1460          * the same ID is registered on the host OS (let 8021q kill it) */
1461         if ((hw->mng_cookie.status &
1462                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1463              !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1464                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1465         }
1466
1467         return 0;
1468 }
1469
1470 /**
1471  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1472  * @adapter: address of board private structure
1473  * @start: address of beginning of memory
1474  * @len: length of memory
1475  **/
1476 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1477                                   unsigned long len)
1478 {
1479         struct e1000_hw *hw = &adapter->hw;
1480         unsigned long begin = (unsigned long)start;
1481         unsigned long end = begin + len;
1482
1483         /* First rev 82545 and 82546 need to not allow any memory
1484          * write location to cross 64k boundary due to errata 23 */
1485         if (hw->mac_type == e1000_82545 ||
1486             hw->mac_type == e1000_ce4100 ||
1487             hw->mac_type == e1000_82546) {
1488                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1489         }
1490
1491         return true;
1492 }
1493
1494 /**
1495  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1496  * @adapter: board private structure
1497  * @txdr:    tx descriptor ring (for a specific queue) to setup
1498  *
1499  * Return 0 on success, negative on failure
1500  **/
1501
1502 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1503                                     struct e1000_tx_ring *txdr)
1504 {
1505         struct pci_dev *pdev = adapter->pdev;
1506         int size;
1507
1508         size = sizeof(struct e1000_buffer) * txdr->count;
1509         txdr->buffer_info = vzalloc(size);
1510         if (!txdr->buffer_info) {
1511                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1512                       "ring\n");
1513                 return -ENOMEM;
1514         }
1515
1516         /* round up to nearest 4K */
1517
1518         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1519         txdr->size = ALIGN(txdr->size, 4096);
1520
1521         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1522                                         GFP_KERNEL);
1523         if (!txdr->desc) {
1524 setup_tx_desc_die:
1525                 vfree(txdr->buffer_info);
1526                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1527                       "ring\n");
1528                 return -ENOMEM;
1529         }
1530
1531         /* Fix for errata 23, can't cross 64kB boundary */
1532         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1533                 void *olddesc = txdr->desc;
1534                 dma_addr_t olddma = txdr->dma;
1535                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1536                       txdr->size, txdr->desc);
1537                 /* Try again, without freeing the previous */
1538                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1539                                                 &txdr->dma, GFP_KERNEL);
1540                 /* Failed allocation, critical failure */
1541                 if (!txdr->desc) {
1542                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543                                           olddma);
1544                         goto setup_tx_desc_die;
1545                 }
1546
1547                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1548                         /* give up */
1549                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1550                                           txdr->dma);
1551                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1552                                           olddma);
1553                         e_err(probe, "Unable to allocate aligned memory "
1554                               "for the transmit descriptor ring\n");
1555                         vfree(txdr->buffer_info);
1556                         return -ENOMEM;
1557                 } else {
1558                         /* Free old allocation, new allocation was successful */
1559                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1560                                           olddma);
1561                 }
1562         }
1563         memset(txdr->desc, 0, txdr->size);
1564
1565         txdr->next_to_use = 0;
1566         txdr->next_to_clean = 0;
1567
1568         return 0;
1569 }
1570
1571 /**
1572  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1573  *                                (Descriptors) for all queues
1574  * @adapter: board private structure
1575  *
1576  * Return 0 on success, negative on failure
1577  **/
1578
1579 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1580 {
1581         int i, err = 0;
1582
1583         for (i = 0; i < adapter->num_tx_queues; i++) {
1584                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1585                 if (err) {
1586                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1587                         for (i-- ; i >= 0; i--)
1588                                 e1000_free_tx_resources(adapter,
1589                                                         &adapter->tx_ring[i]);
1590                         break;
1591                 }
1592         }
1593
1594         return err;
1595 }
1596
1597 /**
1598  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1599  * @adapter: board private structure
1600  *
1601  * Configure the Tx unit of the MAC after a reset.
1602  **/
1603
1604 static void e1000_configure_tx(struct e1000_adapter *adapter)
1605 {
1606         u64 tdba;
1607         struct e1000_hw *hw = &adapter->hw;
1608         u32 tdlen, tctl, tipg;
1609         u32 ipgr1, ipgr2;
1610
1611         /* Setup the HW Tx Head and Tail descriptor pointers */
1612
1613         switch (adapter->num_tx_queues) {
1614         case 1:
1615         default:
1616                 tdba = adapter->tx_ring[0].dma;
1617                 tdlen = adapter->tx_ring[0].count *
1618                         sizeof(struct e1000_tx_desc);
1619                 ew32(TDLEN, tdlen);
1620                 ew32(TDBAH, (tdba >> 32));
1621                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1622                 ew32(TDT, 0);
1623                 ew32(TDH, 0);
1624                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1625                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1626                 break;
1627         }
1628
1629         /* Set the default values for the Tx Inter Packet Gap timer */
1630         if ((hw->media_type == e1000_media_type_fiber ||
1631              hw->media_type == e1000_media_type_internal_serdes))
1632                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1633         else
1634                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1635
1636         switch (hw->mac_type) {
1637         case e1000_82542_rev2_0:
1638         case e1000_82542_rev2_1:
1639                 tipg = DEFAULT_82542_TIPG_IPGT;
1640                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1641                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1642                 break;
1643         default:
1644                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1645                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1646                 break;
1647         }
1648         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1649         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1650         ew32(TIPG, tipg);
1651
1652         /* Set the Tx Interrupt Delay register */
1653
1654         ew32(TIDV, adapter->tx_int_delay);
1655         if (hw->mac_type >= e1000_82540)
1656                 ew32(TADV, adapter->tx_abs_int_delay);
1657
1658         /* Program the Transmit Control Register */
1659
1660         tctl = er32(TCTL);
1661         tctl &= ~E1000_TCTL_CT;
1662         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1663                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1664
1665         e1000_config_collision_dist(hw);
1666
1667         /* Setup Transmit Descriptor Settings for eop descriptor */
1668         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1669
1670         /* only set IDE if we are delaying interrupts using the timers */
1671         if (adapter->tx_int_delay)
1672                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1673
1674         if (hw->mac_type < e1000_82543)
1675                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1676         else
1677                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1678
1679         /* Cache if we're 82544 running in PCI-X because we'll
1680          * need this to apply a workaround later in the send path. */
1681         if (hw->mac_type == e1000_82544 &&
1682             hw->bus_type == e1000_bus_type_pcix)
1683                 adapter->pcix_82544 = true;
1684
1685         ew32(TCTL, tctl);
1686
1687 }
1688
1689 /**
1690  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1691  * @adapter: board private structure
1692  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1693  *
1694  * Returns 0 on success, negative on failure
1695  **/
1696
1697 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1698                                     struct e1000_rx_ring *rxdr)
1699 {
1700         struct pci_dev *pdev = adapter->pdev;
1701         int size, desc_len;
1702
1703         size = sizeof(struct e1000_buffer) * rxdr->count;
1704         rxdr->buffer_info = vzalloc(size);
1705         if (!rxdr->buffer_info) {
1706                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1707                       "ring\n");
1708                 return -ENOMEM;
1709         }
1710
1711         desc_len = sizeof(struct e1000_rx_desc);
1712
1713         /* Round up to nearest 4K */
1714
1715         rxdr->size = rxdr->count * desc_len;
1716         rxdr->size = ALIGN(rxdr->size, 4096);
1717
1718         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1719                                         GFP_KERNEL);
1720
1721         if (!rxdr->desc) {
1722                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1723                       "ring\n");
1724 setup_rx_desc_die:
1725                 vfree(rxdr->buffer_info);
1726                 return -ENOMEM;
1727         }
1728
1729         /* Fix for errata 23, can't cross 64kB boundary */
1730         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731                 void *olddesc = rxdr->desc;
1732                 dma_addr_t olddma = rxdr->dma;
1733                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1734                       rxdr->size, rxdr->desc);
1735                 /* Try again, without freeing the previous */
1736                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1737                                                 &rxdr->dma, GFP_KERNEL);
1738                 /* Failed allocation, critical failure */
1739                 if (!rxdr->desc) {
1740                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1741                                           olddma);
1742                         e_err(probe, "Unable to allocate memory for the Rx "
1743                               "descriptor ring\n");
1744                         goto setup_rx_desc_die;
1745                 }
1746
1747                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1748                         /* give up */
1749                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1750                                           rxdr->dma);
1751                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1752                                           olddma);
1753                         e_err(probe, "Unable to allocate aligned memory for "
1754                               "the Rx descriptor ring\n");
1755                         goto setup_rx_desc_die;
1756                 } else {
1757                         /* Free old allocation, new allocation was successful */
1758                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1759                                           olddma);
1760                 }
1761         }
1762         memset(rxdr->desc, 0, rxdr->size);
1763
1764         rxdr->next_to_clean = 0;
1765         rxdr->next_to_use = 0;
1766         rxdr->rx_skb_top = NULL;
1767
1768         return 0;
1769 }
1770
1771 /**
1772  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1773  *                                (Descriptors) for all queues
1774  * @adapter: board private structure
1775  *
1776  * Return 0 on success, negative on failure
1777  **/
1778
1779 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1780 {
1781         int i, err = 0;
1782
1783         for (i = 0; i < adapter->num_rx_queues; i++) {
1784                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1785                 if (err) {
1786                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1787                         for (i-- ; i >= 0; i--)
1788                                 e1000_free_rx_resources(adapter,
1789                                                         &adapter->rx_ring[i]);
1790                         break;
1791                 }
1792         }
1793
1794         return err;
1795 }
1796
1797 /**
1798  * e1000_setup_rctl - configure the receive control registers
1799  * @adapter: Board private structure
1800  **/
1801 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1802 {
1803         struct e1000_hw *hw = &adapter->hw;
1804         u32 rctl;
1805
1806         rctl = er32(RCTL);
1807
1808         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1809
1810         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1811                 E1000_RCTL_RDMTS_HALF |
1812                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1813
1814         if (hw->tbi_compatibility_on == 1)
1815                 rctl |= E1000_RCTL_SBP;
1816         else
1817                 rctl &= ~E1000_RCTL_SBP;
1818
1819         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1820                 rctl &= ~E1000_RCTL_LPE;
1821         else
1822                 rctl |= E1000_RCTL_LPE;
1823
1824         /* Setup buffer sizes */
1825         rctl &= ~E1000_RCTL_SZ_4096;
1826         rctl |= E1000_RCTL_BSEX;
1827         switch (adapter->rx_buffer_len) {
1828                 case E1000_RXBUFFER_2048:
1829                 default:
1830                         rctl |= E1000_RCTL_SZ_2048;
1831                         rctl &= ~E1000_RCTL_BSEX;
1832                         break;
1833                 case E1000_RXBUFFER_4096:
1834                         rctl |= E1000_RCTL_SZ_4096;
1835                         break;
1836                 case E1000_RXBUFFER_8192:
1837                         rctl |= E1000_RCTL_SZ_8192;
1838                         break;
1839                 case E1000_RXBUFFER_16384:
1840                         rctl |= E1000_RCTL_SZ_16384;
1841                         break;
1842         }
1843
1844         ew32(RCTL, rctl);
1845 }
1846
1847 /**
1848  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1849  * @adapter: board private structure
1850  *
1851  * Configure the Rx unit of the MAC after a reset.
1852  **/
1853
1854 static void e1000_configure_rx(struct e1000_adapter *adapter)
1855 {
1856         u64 rdba;
1857         struct e1000_hw *hw = &adapter->hw;
1858         u32 rdlen, rctl, rxcsum;
1859
1860         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1861                 rdlen = adapter->rx_ring[0].count *
1862                         sizeof(struct e1000_rx_desc);
1863                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1864                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1865         } else {
1866                 rdlen = adapter->rx_ring[0].count *
1867                         sizeof(struct e1000_rx_desc);
1868                 adapter->clean_rx = e1000_clean_rx_irq;
1869                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1870         }
1871
1872         /* disable receives while setting up the descriptors */
1873         rctl = er32(RCTL);
1874         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1875
1876         /* set the Receive Delay Timer Register */
1877         ew32(RDTR, adapter->rx_int_delay);
1878
1879         if (hw->mac_type >= e1000_82540) {
1880                 ew32(RADV, adapter->rx_abs_int_delay);
1881                 if (adapter->itr_setting != 0)
1882                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1883         }
1884
1885         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1886          * the Base and Length of the Rx Descriptor Ring */
1887         switch (adapter->num_rx_queues) {
1888         case 1:
1889         default:
1890                 rdba = adapter->rx_ring[0].dma;
1891                 ew32(RDLEN, rdlen);
1892                 ew32(RDBAH, (rdba >> 32));
1893                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1894                 ew32(RDT, 0);
1895                 ew32(RDH, 0);
1896                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1897                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1898                 break;
1899         }
1900
1901         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1902         if (hw->mac_type >= e1000_82543) {
1903                 rxcsum = er32(RXCSUM);
1904                 if (adapter->rx_csum)
1905                         rxcsum |= E1000_RXCSUM_TUOFL;
1906                 else
1907                         /* don't need to clear IPPCSE as it defaults to 0 */
1908                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1909                 ew32(RXCSUM, rxcsum);
1910         }
1911
1912         /* Enable Receives */
1913         ew32(RCTL, rctl | E1000_RCTL_EN);
1914 }
1915
1916 /**
1917  * e1000_free_tx_resources - Free Tx Resources per Queue
1918  * @adapter: board private structure
1919  * @tx_ring: Tx descriptor ring for a specific queue
1920  *
1921  * Free all transmit software resources
1922  **/
1923
1924 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925                                     struct e1000_tx_ring *tx_ring)
1926 {
1927         struct pci_dev *pdev = adapter->pdev;
1928
1929         e1000_clean_tx_ring(adapter, tx_ring);
1930
1931         vfree(tx_ring->buffer_info);
1932         tx_ring->buffer_info = NULL;
1933
1934         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935                           tx_ring->dma);
1936
1937         tx_ring->desc = NULL;
1938 }
1939
1940 /**
1941  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942  * @adapter: board private structure
1943  *
1944  * Free all transmit software resources
1945  **/
1946
1947 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1948 {
1949         int i;
1950
1951         for (i = 0; i < adapter->num_tx_queues; i++)
1952                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1953 }
1954
1955 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956                                              struct e1000_buffer *buffer_info)
1957 {
1958         if (buffer_info->dma) {
1959                 if (buffer_info->mapped_as_page)
1960                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1961                                        buffer_info->length, DMA_TO_DEVICE);
1962                 else
1963                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1964                                          buffer_info->length,
1965                                          DMA_TO_DEVICE);
1966                 buffer_info->dma = 0;
1967         }
1968         if (buffer_info->skb) {
1969                 dev_kfree_skb_any(buffer_info->skb);
1970                 buffer_info->skb = NULL;
1971         }
1972         buffer_info->time_stamp = 0;
1973         /* buffer_info must be completely set up in the transmit path */
1974 }
1975
1976 /**
1977  * e1000_clean_tx_ring - Free Tx Buffers
1978  * @adapter: board private structure
1979  * @tx_ring: ring to be cleaned
1980  **/
1981
1982 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1983                                 struct e1000_tx_ring *tx_ring)
1984 {
1985         struct e1000_hw *hw = &adapter->hw;
1986         struct e1000_buffer *buffer_info;
1987         unsigned long size;
1988         unsigned int i;
1989
1990         /* Free all the Tx ring sk_buffs */
1991
1992         for (i = 0; i < tx_ring->count; i++) {
1993                 buffer_info = &tx_ring->buffer_info[i];
1994                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1995         }
1996
1997         size = sizeof(struct e1000_buffer) * tx_ring->count;
1998         memset(tx_ring->buffer_info, 0, size);
1999
2000         /* Zero out the descriptor ring */
2001
2002         memset(tx_ring->desc, 0, tx_ring->size);
2003
2004         tx_ring->next_to_use = 0;
2005         tx_ring->next_to_clean = 0;
2006         tx_ring->last_tx_tso = false;
2007
2008         writel(0, hw->hw_addr + tx_ring->tdh);
2009         writel(0, hw->hw_addr + tx_ring->tdt);
2010 }
2011
2012 /**
2013  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2014  * @adapter: board private structure
2015  **/
2016
2017 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2018 {
2019         int i;
2020
2021         for (i = 0; i < adapter->num_tx_queues; i++)
2022                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2023 }
2024
2025 /**
2026  * e1000_free_rx_resources - Free Rx Resources
2027  * @adapter: board private structure
2028  * @rx_ring: ring to clean the resources from
2029  *
2030  * Free all receive software resources
2031  **/
2032
2033 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2034                                     struct e1000_rx_ring *rx_ring)
2035 {
2036         struct pci_dev *pdev = adapter->pdev;
2037
2038         e1000_clean_rx_ring(adapter, rx_ring);
2039
2040         vfree(rx_ring->buffer_info);
2041         rx_ring->buffer_info = NULL;
2042
2043         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2044                           rx_ring->dma);
2045
2046         rx_ring->desc = NULL;
2047 }
2048
2049 /**
2050  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2051  * @adapter: board private structure
2052  *
2053  * Free all receive software resources
2054  **/
2055
2056 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2057 {
2058         int i;
2059
2060         for (i = 0; i < adapter->num_rx_queues; i++)
2061                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2062 }
2063
2064 /**
2065  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2066  * @adapter: board private structure
2067  * @rx_ring: ring to free buffers from
2068  **/
2069
2070 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2071                                 struct e1000_rx_ring *rx_ring)
2072 {
2073         struct e1000_hw *hw = &adapter->hw;
2074         struct e1000_buffer *buffer_info;
2075         struct pci_dev *pdev = adapter->pdev;
2076         unsigned long size;
2077         unsigned int i;
2078
2079         /* Free all the Rx ring sk_buffs */
2080         for (i = 0; i < rx_ring->count; i++) {
2081                 buffer_info = &rx_ring->buffer_info[i];
2082                 if (buffer_info->dma &&
2083                     adapter->clean_rx == e1000_clean_rx_irq) {
2084                         dma_unmap_single(&pdev->dev, buffer_info->dma,
2085                                          buffer_info->length,
2086                                          DMA_FROM_DEVICE);
2087                 } else if (buffer_info->dma &&
2088                            adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2089                         dma_unmap_page(&pdev->dev, buffer_info->dma,
2090                                        buffer_info->length,
2091                                        DMA_FROM_DEVICE);
2092                 }
2093
2094                 buffer_info->dma = 0;
2095                 if (buffer_info->page) {
2096                         put_page(buffer_info->page);
2097                         buffer_info->page = NULL;
2098                 }
2099                 if (buffer_info->skb) {
2100                         dev_kfree_skb(buffer_info->skb);
2101                         buffer_info->skb = NULL;
2102                 }
2103         }
2104
2105         /* there also may be some cached data from a chained receive */
2106         if (rx_ring->rx_skb_top) {
2107                 dev_kfree_skb(rx_ring->rx_skb_top);
2108                 rx_ring->rx_skb_top = NULL;
2109         }
2110
2111         size = sizeof(struct e1000_buffer) * rx_ring->count;
2112         memset(rx_ring->buffer_info, 0, size);
2113
2114         /* Zero out the descriptor ring */
2115         memset(rx_ring->desc, 0, rx_ring->size);
2116
2117         rx_ring->next_to_clean = 0;
2118         rx_ring->next_to_use = 0;
2119
2120         writel(0, hw->hw_addr + rx_ring->rdh);
2121         writel(0, hw->hw_addr + rx_ring->rdt);
2122 }
2123
2124 /**
2125  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2126  * @adapter: board private structure
2127  **/
2128
2129 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2130 {
2131         int i;
2132
2133         for (i = 0; i < adapter->num_rx_queues; i++)
2134                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2135 }
2136
2137 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2138  * and memory write and invalidate disabled for certain operations
2139  */
2140 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2141 {
2142         struct e1000_hw *hw = &adapter->hw;
2143         struct net_device *netdev = adapter->netdev;
2144         u32 rctl;
2145
2146         e1000_pci_clear_mwi(hw);
2147
2148         rctl = er32(RCTL);
2149         rctl |= E1000_RCTL_RST;
2150         ew32(RCTL, rctl);
2151         E1000_WRITE_FLUSH();
2152         mdelay(5);
2153
2154         if (netif_running(netdev))
2155                 e1000_clean_all_rx_rings(adapter);
2156 }
2157
2158 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2159 {
2160         struct e1000_hw *hw = &adapter->hw;
2161         struct net_device *netdev = adapter->netdev;
2162         u32 rctl;
2163
2164         rctl = er32(RCTL);
2165         rctl &= ~E1000_RCTL_RST;
2166         ew32(RCTL, rctl);
2167         E1000_WRITE_FLUSH();
2168         mdelay(5);
2169
2170         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2171                 e1000_pci_set_mwi(hw);
2172
2173         if (netif_running(netdev)) {
2174                 /* No need to loop, because 82542 supports only 1 queue */
2175                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2176                 e1000_configure_rx(adapter);
2177                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2178         }
2179 }
2180
2181 /**
2182  * e1000_set_mac - Change the Ethernet Address of the NIC
2183  * @netdev: network interface device structure
2184  * @p: pointer to an address structure
2185  *
2186  * Returns 0 on success, negative on failure
2187  **/
2188
2189 static int e1000_set_mac(struct net_device *netdev, void *p)
2190 {
2191         struct e1000_adapter *adapter = netdev_priv(netdev);
2192         struct e1000_hw *hw = &adapter->hw;
2193         struct sockaddr *addr = p;
2194
2195         if (!is_valid_ether_addr(addr->sa_data))
2196                 return -EADDRNOTAVAIL;
2197
2198         /* 82542 2.0 needs to be in reset to write receive address registers */
2199
2200         if (hw->mac_type == e1000_82542_rev2_0)
2201                 e1000_enter_82542_rst(adapter);
2202
2203         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2204         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2205
2206         e1000_rar_set(hw, hw->mac_addr, 0);
2207
2208         if (hw->mac_type == e1000_82542_rev2_0)
2209                 e1000_leave_82542_rst(adapter);
2210
2211         return 0;
2212 }
2213
2214 /**
2215  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2216  * @netdev: network interface device structure
2217  *
2218  * The set_rx_mode entry point is called whenever the unicast or multicast
2219  * address lists or the network interface flags are updated. This routine is
2220  * responsible for configuring the hardware for proper unicast, multicast,
2221  * promiscuous mode, and all-multi behavior.
2222  **/
2223
2224 static void e1000_set_rx_mode(struct net_device *netdev)
2225 {
2226         struct e1000_adapter *adapter = netdev_priv(netdev);
2227         struct e1000_hw *hw = &adapter->hw;
2228         struct netdev_hw_addr *ha;
2229         bool use_uc = false;
2230         u32 rctl;
2231         u32 hash_value;
2232         int i, rar_entries = E1000_RAR_ENTRIES;
2233         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2234         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2235
2236         if (!mcarray) {
2237                 e_err(probe, "memory allocation failed\n");
2238                 return;
2239         }
2240
2241         /* Check for Promiscuous and All Multicast modes */
2242
2243         rctl = er32(RCTL);
2244
2245         if (netdev->flags & IFF_PROMISC) {
2246                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2247                 rctl &= ~E1000_RCTL_VFE;
2248         } else {
2249                 if (netdev->flags & IFF_ALLMULTI)
2250                         rctl |= E1000_RCTL_MPE;
2251                 else
2252                         rctl &= ~E1000_RCTL_MPE;
2253                 /* Enable VLAN filter if there is a VLAN */
2254                 if (e1000_vlan_used(adapter))
2255                         rctl |= E1000_RCTL_VFE;
2256         }
2257
2258         if (netdev_uc_count(netdev) > rar_entries - 1) {
2259                 rctl |= E1000_RCTL_UPE;
2260         } else if (!(netdev->flags & IFF_PROMISC)) {
2261                 rctl &= ~E1000_RCTL_UPE;
2262                 use_uc = true;
2263         }
2264
2265         ew32(RCTL, rctl);
2266
2267         /* 82542 2.0 needs to be in reset to write receive address registers */
2268
2269         if (hw->mac_type == e1000_82542_rev2_0)
2270                 e1000_enter_82542_rst(adapter);
2271
2272         /* load the first 14 addresses into the exact filters 1-14. Unicast
2273          * addresses take precedence to avoid disabling unicast filtering
2274          * when possible.
2275          *
2276          * RAR 0 is used for the station MAC address
2277          * if there are not 14 addresses, go ahead and clear the filters
2278          */
2279         i = 1;
2280         if (use_uc)
2281                 netdev_for_each_uc_addr(ha, netdev) {
2282                         if (i == rar_entries)
2283                                 break;
2284                         e1000_rar_set(hw, ha->addr, i++);
2285                 }
2286
2287         netdev_for_each_mc_addr(ha, netdev) {
2288                 if (i == rar_entries) {
2289                         /* load any remaining addresses into the hash table */
2290                         u32 hash_reg, hash_bit, mta;
2291                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2292                         hash_reg = (hash_value >> 5) & 0x7F;
2293                         hash_bit = hash_value & 0x1F;
2294                         mta = (1 << hash_bit);
2295                         mcarray[hash_reg] |= mta;
2296                 } else {
2297                         e1000_rar_set(hw, ha->addr, i++);
2298                 }
2299         }
2300
2301         for (; i < rar_entries; i++) {
2302                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2303                 E1000_WRITE_FLUSH();
2304                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2305                 E1000_WRITE_FLUSH();
2306         }
2307
2308         /* write the hash table completely, write from bottom to avoid
2309          * both stupid write combining chipsets, and flushing each write */
2310         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2311                 /*
2312                  * If we are on an 82544 has an errata where writing odd
2313                  * offsets overwrites the previous even offset, but writing
2314                  * backwards over the range solves the issue by always
2315                  * writing the odd offset first
2316                  */
2317                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2318         }
2319         E1000_WRITE_FLUSH();
2320
2321         if (hw->mac_type == e1000_82542_rev2_0)
2322                 e1000_leave_82542_rst(adapter);
2323
2324         kfree(mcarray);
2325 }
2326
2327 /**
2328  * e1000_update_phy_info_task - get phy info
2329  * @work: work struct contained inside adapter struct
2330  *
2331  * Need to wait a few seconds after link up to get diagnostic information from
2332  * the phy
2333  */
2334 static void e1000_update_phy_info_task(struct work_struct *work)
2335 {
2336         struct e1000_adapter *adapter = container_of(work,
2337                                                      struct e1000_adapter,
2338                                                      phy_info_task.work);
2339         if (test_bit(__E1000_DOWN, &adapter->flags))
2340                 return;
2341         mutex_lock(&adapter->mutex);
2342         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2343         mutex_unlock(&adapter->mutex);
2344 }
2345
2346 /**
2347  * e1000_82547_tx_fifo_stall_task - task to complete work
2348  * @work: work struct contained inside adapter struct
2349  **/
2350 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2351 {
2352         struct e1000_adapter *adapter = container_of(work,
2353                                                      struct e1000_adapter,
2354                                                      fifo_stall_task.work);
2355         struct e1000_hw *hw = &adapter->hw;
2356         struct net_device *netdev = adapter->netdev;
2357         u32 tctl;
2358
2359         if (test_bit(__E1000_DOWN, &adapter->flags))
2360                 return;
2361         mutex_lock(&adapter->mutex);
2362         if (atomic_read(&adapter->tx_fifo_stall)) {
2363                 if ((er32(TDT) == er32(TDH)) &&
2364                    (er32(TDFT) == er32(TDFH)) &&
2365                    (er32(TDFTS) == er32(TDFHS))) {
2366                         tctl = er32(TCTL);
2367                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2368                         ew32(TDFT, adapter->tx_head_addr);
2369                         ew32(TDFH, adapter->tx_head_addr);
2370                         ew32(TDFTS, adapter->tx_head_addr);
2371                         ew32(TDFHS, adapter->tx_head_addr);
2372                         ew32(TCTL, tctl);
2373                         E1000_WRITE_FLUSH();
2374
2375                         adapter->tx_fifo_head = 0;
2376                         atomic_set(&adapter->tx_fifo_stall, 0);
2377                         netif_wake_queue(netdev);
2378                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2379                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2380                 }
2381         }
2382         mutex_unlock(&adapter->mutex);
2383 }
2384
2385 bool e1000_has_link(struct e1000_adapter *adapter)
2386 {
2387         struct e1000_hw *hw = &adapter->hw;
2388         bool link_active = false;
2389
2390         /* get_link_status is set on LSC (link status) interrupt or rx
2391          * sequence error interrupt (except on intel ce4100).
2392          * get_link_status will stay false until the
2393          * e1000_check_for_link establishes link for copper adapters
2394          * ONLY
2395          */
2396         switch (hw->media_type) {
2397         case e1000_media_type_copper:
2398                 if (hw->mac_type == e1000_ce4100)
2399                         hw->get_link_status = 1;
2400                 if (hw->get_link_status) {
2401                         e1000_check_for_link(hw);
2402                         link_active = !hw->get_link_status;
2403                 } else {
2404                         link_active = true;
2405                 }
2406                 break;
2407         case e1000_media_type_fiber:
2408                 e1000_check_for_link(hw);
2409                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2410                 break;
2411         case e1000_media_type_internal_serdes:
2412                 e1000_check_for_link(hw);
2413                 link_active = hw->serdes_has_link;
2414                 break;
2415         default:
2416                 break;
2417         }
2418
2419         return link_active;
2420 }
2421
2422 /**
2423  * e1000_watchdog - work function
2424  * @work: work struct contained inside adapter struct
2425  **/
2426 static void e1000_watchdog(struct work_struct *work)
2427 {
2428         struct e1000_adapter *adapter = container_of(work,
2429                                                      struct e1000_adapter,
2430                                                      watchdog_task.work);
2431         struct e1000_hw *hw = &adapter->hw;
2432         struct net_device *netdev = adapter->netdev;
2433         struct e1000_tx_ring *txdr = adapter->tx_ring;
2434         u32 link, tctl;
2435
2436         if (test_bit(__E1000_DOWN, &adapter->flags))
2437                 return;
2438
2439         mutex_lock(&adapter->mutex);
2440         link = e1000_has_link(adapter);
2441         if ((netif_carrier_ok(netdev)) && link)
2442                 goto link_up;
2443
2444         if (link) {
2445                 if (!netif_carrier_ok(netdev)) {
2446                         u32 ctrl;
2447                         bool txb2b = true;
2448                         /* update snapshot of PHY registers on LSC */
2449                         e1000_get_speed_and_duplex(hw,
2450                                                    &adapter->link_speed,
2451                                                    &adapter->link_duplex);
2452
2453                         ctrl = er32(CTRL);
2454                         pr_info("%s NIC Link is Up %d Mbps %s, "
2455                                 "Flow Control: %s\n",
2456                                 netdev->name,
2457                                 adapter->link_speed,
2458                                 adapter->link_duplex == FULL_DUPLEX ?
2459                                 "Full Duplex" : "Half Duplex",
2460                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2461                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2462                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2463                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2464
2465                         /* adjust timeout factor according to speed/duplex */
2466                         adapter->tx_timeout_factor = 1;
2467                         switch (adapter->link_speed) {
2468                         case SPEED_10:
2469                                 txb2b = false;
2470                                 adapter->tx_timeout_factor = 16;
2471                                 break;
2472                         case SPEED_100:
2473                                 txb2b = false;
2474                                 /* maybe add some timeout factor ? */
2475                                 break;
2476                         }
2477
2478                         /* enable transmits in the hardware */
2479                         tctl = er32(TCTL);
2480                         tctl |= E1000_TCTL_EN;
2481                         ew32(TCTL, tctl);
2482
2483                         netif_carrier_on(netdev);
2484                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2485                                 schedule_delayed_work(&adapter->phy_info_task,
2486                                                       2 * HZ);
2487                         adapter->smartspeed = 0;
2488                 }
2489         } else {
2490                 if (netif_carrier_ok(netdev)) {
2491                         adapter->link_speed = 0;
2492                         adapter->link_duplex = 0;
2493                         pr_info("%s NIC Link is Down\n",
2494                                 netdev->name);
2495                         netif_carrier_off(netdev);
2496
2497                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2498                                 schedule_delayed_work(&adapter->phy_info_task,
2499                                                       2 * HZ);
2500                 }
2501
2502                 e1000_smartspeed(adapter);
2503         }
2504
2505 link_up:
2506         e1000_update_stats(adapter);
2507
2508         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2509         adapter->tpt_old = adapter->stats.tpt;
2510         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2511         adapter->colc_old = adapter->stats.colc;
2512
2513         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2514         adapter->gorcl_old = adapter->stats.gorcl;
2515         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2516         adapter->gotcl_old = adapter->stats.gotcl;
2517
2518         e1000_update_adaptive(hw);
2519
2520         if (!netif_carrier_ok(netdev)) {
2521                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2522                         /* We've lost link, so the controller stops DMA,
2523                          * but we've got queued Tx work that's never going
2524                          * to get done, so reset controller to flush Tx.
2525                          * (Do the reset outside of interrupt context). */
2526                         adapter->tx_timeout_count++;
2527                         schedule_work(&adapter->reset_task);
2528                         /* exit immediately since reset is imminent */
2529                         goto unlock;
2530                 }
2531         }
2532
2533         /* Simple mode for Interrupt Throttle Rate (ITR) */
2534         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2535                 /*
2536                  * Symmetric Tx/Rx gets a reduced ITR=2000;
2537                  * Total asymmetrical Tx or Rx gets ITR=8000;
2538                  * everyone else is between 2000-8000.
2539                  */
2540                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2541                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2542                             adapter->gotcl - adapter->gorcl :
2543                             adapter->gorcl - adapter->gotcl) / 10000;
2544                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2545
2546                 ew32(ITR, 1000000000 / (itr * 256));
2547         }
2548
2549         /* Cause software interrupt to ensure rx ring is cleaned */
2550         ew32(ICS, E1000_ICS_RXDMT0);
2551
2552         /* Force detection of hung controller every watchdog period */
2553         adapter->detect_tx_hung = true;
2554
2555         /* Reschedule the task */
2556         if (!test_bit(__E1000_DOWN, &adapter->flags))
2557                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2558
2559 unlock:
2560         mutex_unlock(&adapter->mutex);
2561 }
2562
2563 enum latency_range {
2564         lowest_latency = 0,
2565         low_latency = 1,
2566         bulk_latency = 2,
2567         latency_invalid = 255
2568 };
2569
2570 /**
2571  * e1000_update_itr - update the dynamic ITR value based on statistics
2572  * @adapter: pointer to adapter
2573  * @itr_setting: current adapter->itr
2574  * @packets: the number of packets during this measurement interval
2575  * @bytes: the number of bytes during this measurement interval
2576  *
2577  *      Stores a new ITR value based on packets and byte
2578  *      counts during the last interrupt.  The advantage of per interrupt
2579  *      computation is faster updates and more accurate ITR for the current
2580  *      traffic pattern.  Constants in this function were computed
2581  *      based on theoretical maximum wire speed and thresholds were set based
2582  *      on testing data as well as attempting to minimize response time
2583  *      while increasing bulk throughput.
2584  *      this functionality is controlled by the InterruptThrottleRate module
2585  *      parameter (see e1000_param.c)
2586  **/
2587 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2588                                      u16 itr_setting, int packets, int bytes)
2589 {
2590         unsigned int retval = itr_setting;
2591         struct e1000_hw *hw = &adapter->hw;
2592
2593         if (unlikely(hw->mac_type < e1000_82540))
2594                 goto update_itr_done;
2595
2596         if (packets == 0)
2597                 goto update_itr_done;
2598
2599         switch (itr_setting) {
2600         case lowest_latency:
2601                 /* jumbo frames get bulk treatment*/
2602                 if (bytes/packets > 8000)
2603                         retval = bulk_latency;
2604                 else if ((packets < 5) && (bytes > 512))
2605                         retval = low_latency;
2606                 break;
2607         case low_latency:  /* 50 usec aka 20000 ints/s */
2608                 if (bytes > 10000) {
2609                         /* jumbo frames need bulk latency setting */
2610                         if (bytes/packets > 8000)
2611                                 retval = bulk_latency;
2612                         else if ((packets < 10) || ((bytes/packets) > 1200))
2613                                 retval = bulk_latency;
2614                         else if ((packets > 35))
2615                                 retval = lowest_latency;
2616                 } else if (bytes/packets > 2000)
2617                         retval = bulk_latency;
2618                 else if (packets <= 2 && bytes < 512)
2619                         retval = lowest_latency;
2620                 break;
2621         case bulk_latency: /* 250 usec aka 4000 ints/s */
2622                 if (bytes > 25000) {
2623                         if (packets > 35)
2624                                 retval = low_latency;
2625                 } else if (bytes < 6000) {
2626                         retval = low_latency;
2627                 }
2628                 break;
2629         }
2630
2631 update_itr_done:
2632         return retval;
2633 }
2634
2635 static void e1000_set_itr(struct e1000_adapter *adapter)
2636 {
2637         struct e1000_hw *hw = &adapter->hw;
2638         u16 current_itr;
2639         u32 new_itr = adapter->itr;
2640
2641         if (unlikely(hw->mac_type < e1000_82540))
2642                 return;
2643
2644         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2645         if (unlikely(adapter->link_speed != SPEED_1000)) {
2646                 current_itr = 0;
2647                 new_itr = 4000;
2648                 goto set_itr_now;
2649         }
2650
2651         adapter->tx_itr = e1000_update_itr(adapter,
2652                                     adapter->tx_itr,
2653                                     adapter->total_tx_packets,
2654                                     adapter->total_tx_bytes);
2655         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2656         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2657                 adapter->tx_itr = low_latency;
2658
2659         adapter->rx_itr = e1000_update_itr(adapter,
2660                                     adapter->rx_itr,
2661                                     adapter->total_rx_packets,
2662                                     adapter->total_rx_bytes);
2663         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2664         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2665                 adapter->rx_itr = low_latency;
2666
2667         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2668
2669         switch (current_itr) {
2670         /* counts and packets in update_itr are dependent on these numbers */
2671         case lowest_latency:
2672                 new_itr = 70000;
2673                 break;
2674         case low_latency:
2675                 new_itr = 20000; /* aka hwitr = ~200 */
2676                 break;
2677         case bulk_latency:
2678                 new_itr = 4000;
2679                 break;
2680         default:
2681                 break;
2682         }
2683
2684 set_itr_now:
2685         if (new_itr != adapter->itr) {
2686                 /* this attempts to bias the interrupt rate towards Bulk
2687                  * by adding intermediate steps when interrupt rate is
2688                  * increasing */
2689                 new_itr = new_itr > adapter->itr ?
2690                              min(adapter->itr + (new_itr >> 2), new_itr) :
2691                              new_itr;
2692                 adapter->itr = new_itr;
2693                 ew32(ITR, 1000000000 / (new_itr * 256));
2694         }
2695 }
2696
2697 #define E1000_TX_FLAGS_CSUM             0x00000001
2698 #define E1000_TX_FLAGS_VLAN             0x00000002
2699 #define E1000_TX_FLAGS_TSO              0x00000004
2700 #define E1000_TX_FLAGS_IPV4             0x00000008
2701 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2702 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2703 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2704
2705 static int e1000_tso(struct e1000_adapter *adapter,
2706                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2707 {
2708         struct e1000_context_desc *context_desc;
2709         struct e1000_buffer *buffer_info;
2710         unsigned int i;
2711         u32 cmd_length = 0;
2712         u16 ipcse = 0, tucse, mss;
2713         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2714         int err;
2715
2716         if (skb_is_gso(skb)) {
2717                 if (skb_header_cloned(skb)) {
2718                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2719                         if (err)
2720                                 return err;
2721                 }
2722
2723                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2724                 mss = skb_shinfo(skb)->gso_size;
2725                 if (skb->protocol == htons(ETH_P_IP)) {
2726                         struct iphdr *iph = ip_hdr(skb);
2727                         iph->tot_len = 0;
2728                         iph->check = 0;
2729                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2730                                                                  iph->daddr, 0,
2731                                                                  IPPROTO_TCP,
2732                                                                  0);
2733                         cmd_length = E1000_TXD_CMD_IP;
2734                         ipcse = skb_transport_offset(skb) - 1;
2735                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2736                         ipv6_hdr(skb)->payload_len = 0;
2737                         tcp_hdr(skb)->check =
2738                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2739                                                  &ipv6_hdr(skb)->daddr,
2740                                                  0, IPPROTO_TCP, 0);
2741                         ipcse = 0;
2742                 }
2743                 ipcss = skb_network_offset(skb);
2744                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2745                 tucss = skb_transport_offset(skb);
2746                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2747                 tucse = 0;
2748
2749                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2750                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2751
2752                 i = tx_ring->next_to_use;
2753                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2754                 buffer_info = &tx_ring->buffer_info[i];
2755
2756                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2757                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2758                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2759                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2760                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2761                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2762                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2763                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2764                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2765
2766                 buffer_info->time_stamp = jiffies;
2767                 buffer_info->next_to_watch = i;
2768
2769                 if (++i == tx_ring->count) i = 0;
2770                 tx_ring->next_to_use = i;
2771
2772                 return true;
2773         }
2774         return false;
2775 }
2776
2777 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2778                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2779 {
2780         struct e1000_context_desc *context_desc;
2781         struct e1000_buffer *buffer_info;
2782         unsigned int i;
2783         u8 css;
2784         u32 cmd_len = E1000_TXD_CMD_DEXT;
2785
2786         if (skb->ip_summed != CHECKSUM_PARTIAL)
2787                 return false;
2788
2789         switch (skb->protocol) {
2790         case cpu_to_be16(ETH_P_IP):
2791                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2792                         cmd_len |= E1000_TXD_CMD_TCP;
2793                 break;
2794         case cpu_to_be16(ETH_P_IPV6):
2795                 /* XXX not handling all IPV6 headers */
2796                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2797                         cmd_len |= E1000_TXD_CMD_TCP;
2798                 break;
2799         default:
2800                 if (unlikely(net_ratelimit()))
2801                         e_warn(drv, "checksum_partial proto=%x!\n",
2802                                skb->protocol);
2803                 break;
2804         }
2805
2806         css = skb_checksum_start_offset(skb);
2807
2808         i = tx_ring->next_to_use;
2809         buffer_info = &tx_ring->buffer_info[i];
2810         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2811
2812         context_desc->lower_setup.ip_config = 0;
2813         context_desc->upper_setup.tcp_fields.tucss = css;
2814         context_desc->upper_setup.tcp_fields.tucso =
2815                 css + skb->csum_offset;
2816         context_desc->upper_setup.tcp_fields.tucse = 0;
2817         context_desc->tcp_seg_setup.data = 0;
2818         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2819
2820         buffer_info->time_stamp = jiffies;
2821         buffer_info->next_to_watch = i;
2822
2823         if (unlikely(++i == tx_ring->count)) i = 0;
2824         tx_ring->next_to_use = i;
2825
2826         return true;
2827 }
2828
2829 #define E1000_MAX_TXD_PWR       12
2830 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2831
2832 static int e1000_tx_map(struct e1000_adapter *adapter,
2833                         struct e1000_tx_ring *tx_ring,
2834                         struct sk_buff *skb, unsigned int first,
2835                         unsigned int max_per_txd, unsigned int nr_frags,
2836                         unsigned int mss)
2837 {
2838         struct e1000_hw *hw = &adapter->hw;
2839         struct pci_dev *pdev = adapter->pdev;
2840         struct e1000_buffer *buffer_info;
2841         unsigned int len = skb_headlen(skb);
2842         unsigned int offset = 0, size, count = 0, i;
2843         unsigned int f, bytecount, segs;
2844
2845         i = tx_ring->next_to_use;
2846
2847         while (len) {
2848                 buffer_info = &tx_ring->buffer_info[i];
2849                 size = min(len, max_per_txd);
2850                 /* Workaround for Controller erratum --
2851                  * descriptor for non-tso packet in a linear SKB that follows a
2852                  * tso gets written back prematurely before the data is fully
2853                  * DMA'd to the controller */
2854                 if (!skb->data_len && tx_ring->last_tx_tso &&
2855                     !skb_is_gso(skb)) {
2856                         tx_ring->last_tx_tso = false;
2857                         size -= 4;
2858                 }
2859
2860                 /* Workaround for premature desc write-backs
2861                  * in TSO mode.  Append 4-byte sentinel desc */
2862                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2863                         size -= 4;
2864                 /* work-around for errata 10 and it applies
2865                  * to all controllers in PCI-X mode
2866                  * The fix is to make sure that the first descriptor of a
2867                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2868                  */
2869                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2870                                 (size > 2015) && count == 0))
2871                         size = 2015;
2872
2873                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2874                  * terminating buffers within evenly-aligned dwords. */
2875                 if (unlikely(adapter->pcix_82544 &&
2876                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2877                    size > 4))
2878                         size -= 4;
2879
2880                 buffer_info->length = size;
2881                 /* set time_stamp *before* dma to help avoid a possible race */
2882                 buffer_info->time_stamp = jiffies;
2883                 buffer_info->mapped_as_page = false;
2884                 buffer_info->dma = dma_map_single(&pdev->dev,
2885                                                   skb->data + offset,
2886                                                   size, DMA_TO_DEVICE);
2887                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2888                         goto dma_error;
2889                 buffer_info->next_to_watch = i;
2890
2891                 len -= size;
2892                 offset += size;
2893                 count++;
2894                 if (len) {
2895                         i++;
2896                         if (unlikely(i == tx_ring->count))
2897                                 i = 0;
2898                 }
2899         }
2900
2901         for (f = 0; f < nr_frags; f++) {
2902                 const struct skb_frag_struct *frag;
2903
2904                 frag = &skb_shinfo(skb)->frags[f];
2905                 len = skb_frag_size(frag);
2906                 offset = 0;
2907
2908                 while (len) {
2909                         unsigned long bufend;
2910                         i++;
2911                         if (unlikely(i == tx_ring->count))
2912                                 i = 0;
2913
2914                         buffer_info = &tx_ring->buffer_info[i];
2915                         size = min(len, max_per_txd);
2916                         /* Workaround for premature desc write-backs
2917                          * in TSO mode.  Append 4-byte sentinel desc */
2918                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2919                                 size -= 4;
2920                         /* Workaround for potential 82544 hang in PCI-X.
2921                          * Avoid terminating buffers within evenly-aligned
2922                          * dwords. */
2923                         bufend = (unsigned long)
2924                                 page_to_phys(skb_frag_page(frag));
2925                         bufend += offset + size - 1;
2926                         if (unlikely(adapter->pcix_82544 &&
2927                                      !(bufend & 4) &&
2928                                      size > 4))
2929                                 size -= 4;
2930
2931                         buffer_info->length = size;
2932                         buffer_info->time_stamp = jiffies;
2933                         buffer_info->mapped_as_page = true;
2934                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2935                                                 offset, size, DMA_TO_DEVICE);
2936                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2937                                 goto dma_error;
2938                         buffer_info->next_to_watch = i;
2939
2940                         len -= size;
2941                         offset += size;
2942                         count++;
2943                 }
2944         }
2945
2946         segs = skb_shinfo(skb)->gso_segs ?: 1;
2947         /* multiply data chunks by size of headers */
2948         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2949
2950         tx_ring->buffer_info[i].skb = skb;
2951         tx_ring->buffer_info[i].segs = segs;
2952         tx_ring->buffer_info[i].bytecount = bytecount;
2953         tx_ring->buffer_info[first].next_to_watch = i;
2954
2955         return count;
2956
2957 dma_error:
2958         dev_err(&pdev->dev, "TX DMA map failed\n");
2959         buffer_info->dma = 0;
2960         if (count)
2961                 count--;
2962
2963         while (count--) {
2964                 if (i==0)
2965                         i += tx_ring->count;
2966                 i--;
2967                 buffer_info = &tx_ring->buffer_info[i];
2968                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2969         }
2970
2971         return 0;
2972 }
2973
2974 static void e1000_tx_queue(struct e1000_adapter *adapter,
2975                            struct e1000_tx_ring *tx_ring, int tx_flags,
2976                            int count)
2977 {
2978         struct e1000_hw *hw = &adapter->hw;
2979         struct e1000_tx_desc *tx_desc = NULL;
2980         struct e1000_buffer *buffer_info;
2981         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2982         unsigned int i;
2983
2984         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2985                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2986                              E1000_TXD_CMD_TSE;
2987                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2988
2989                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2990                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2991         }
2992
2993         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2994                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2995                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2996         }
2997
2998         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2999                 txd_lower |= E1000_TXD_CMD_VLE;
3000                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3001         }
3002
3003         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3004                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3005
3006         i = tx_ring->next_to_use;
3007
3008         while (count--) {
3009                 buffer_info = &tx_ring->buffer_info[i];
3010                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3011                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3012                 tx_desc->lower.data =
3013                         cpu_to_le32(txd_lower | buffer_info->length);
3014                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3015                 if (unlikely(++i == tx_ring->count)) i = 0;
3016         }
3017
3018         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3019
3020         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3021         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3022                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3023
3024         /* Force memory writes to complete before letting h/w
3025          * know there are new descriptors to fetch.  (Only
3026          * applicable for weak-ordered memory model archs,
3027          * such as IA-64). */
3028         wmb();
3029
3030         tx_ring->next_to_use = i;
3031         writel(i, hw->hw_addr + tx_ring->tdt);
3032         /* we need this if more than one processor can write to our tail
3033          * at a time, it syncronizes IO on IA64/Altix systems */
3034         mmiowb();
3035 }
3036
3037 /**
3038  * 82547 workaround to avoid controller hang in half-duplex environment.
3039  * The workaround is to avoid queuing a large packet that would span
3040  * the internal Tx FIFO ring boundary by notifying the stack to resend
3041  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3042  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3043  * to the beginning of the Tx FIFO.
3044  **/
3045
3046 #define E1000_FIFO_HDR                  0x10
3047 #define E1000_82547_PAD_LEN             0x3E0
3048
3049 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3050                                        struct sk_buff *skb)
3051 {
3052         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3053         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3054
3055         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3056
3057         if (adapter->link_duplex != HALF_DUPLEX)
3058                 goto no_fifo_stall_required;
3059
3060         if (atomic_read(&adapter->tx_fifo_stall))
3061                 return 1;
3062
3063         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3064                 atomic_set(&adapter->tx_fifo_stall, 1);
3065                 return 1;
3066         }
3067
3068 no_fifo_stall_required:
3069         adapter->tx_fifo_head += skb_fifo_len;
3070         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3071                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3072         return 0;
3073 }
3074
3075 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3076 {
3077         struct e1000_adapter *adapter = netdev_priv(netdev);
3078         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3079
3080         netif_stop_queue(netdev);
3081         /* Herbert's original patch had:
3082          *  smp_mb__after_netif_stop_queue();
3083          * but since that doesn't exist yet, just open code it. */
3084         smp_mb();
3085
3086         /* We need to check again in a case another CPU has just
3087          * made room available. */
3088         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3089                 return -EBUSY;
3090
3091         /* A reprieve! */
3092         netif_start_queue(netdev);
3093         ++adapter->restart_queue;
3094         return 0;
3095 }
3096
3097 static int e1000_maybe_stop_tx(struct net_device *netdev,
3098                                struct e1000_tx_ring *tx_ring, int size)
3099 {
3100         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3101                 return 0;
3102         return __e1000_maybe_stop_tx(netdev, size);
3103 }
3104
3105 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3106 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3107                                     struct net_device *netdev)
3108 {
3109         struct e1000_adapter *adapter = netdev_priv(netdev);
3110         struct e1000_hw *hw = &adapter->hw;
3111         struct e1000_tx_ring *tx_ring;
3112         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3113         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3114         unsigned int tx_flags = 0;
3115         unsigned int len = skb_headlen(skb);
3116         unsigned int nr_frags;
3117         unsigned int mss;
3118         int count = 0;
3119         int tso;
3120         unsigned int f;
3121
3122         /* This goes back to the question of how to logically map a tx queue
3123          * to a flow.  Right now, performance is impacted slightly negatively
3124          * if using multiple tx queues.  If the stack breaks away from a
3125          * single qdisc implementation, we can look at this again. */
3126         tx_ring = adapter->tx_ring;
3127
3128         if (unlikely(skb->len <= 0)) {
3129                 dev_kfree_skb_any(skb);
3130                 return NETDEV_TX_OK;
3131         }
3132
3133         mss = skb_shinfo(skb)->gso_size;
3134         /* The controller does a simple calculation to
3135          * make sure there is enough room in the FIFO before
3136          * initiating the DMA for each buffer.  The calc is:
3137          * 4 = ceil(buffer len/mss).  To make sure we don't
3138          * overrun the FIFO, adjust the max buffer len if mss
3139          * drops. */
3140         if (mss) {
3141                 u8 hdr_len;
3142                 max_per_txd = min(mss << 2, max_per_txd);
3143                 max_txd_pwr = fls(max_per_txd) - 1;
3144
3145                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3146                 if (skb->data_len && hdr_len == len) {
3147                         switch (hw->mac_type) {
3148                                 unsigned int pull_size;
3149                         case e1000_82544:
3150                                 /* Make sure we have room to chop off 4 bytes,
3151                                  * and that the end alignment will work out to
3152                                  * this hardware's requirements
3153                                  * NOTE: this is a TSO only workaround
3154                                  * if end byte alignment not correct move us
3155                                  * into the next dword */
3156                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3157                                         break;
3158                                 /* fall through */
3159                                 pull_size = min((unsigned int)4, skb->data_len);
3160                                 if (!__pskb_pull_tail(skb, pull_size)) {
3161                                         e_err(drv, "__pskb_pull_tail "
3162                                               "failed.\n");
3163                                         dev_kfree_skb_any(skb);
3164                                         return NETDEV_TX_OK;
3165                                 }
3166                                 len = skb_headlen(skb);
3167                                 break;
3168                         default:
3169                                 /* do nothing */
3170                                 break;
3171                         }
3172                 }
3173         }
3174
3175         /* reserve a descriptor for the offload context */
3176         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177                 count++;
3178         count++;
3179
3180         /* Controller Erratum workaround */
3181         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182                 count++;
3183
3184         count += TXD_USE_COUNT(len, max_txd_pwr);
3185
3186         if (adapter->pcix_82544)
3187                 count++;
3188
3189         /* work-around for errata 10 and it applies to all controllers
3190          * in PCI-X mode, so add one more descriptor to the count
3191          */
3192         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193                         (len > 2015)))
3194                 count++;
3195
3196         nr_frags = skb_shinfo(skb)->nr_frags;
3197         for (f = 0; f < nr_frags; f++)
3198                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199                                        max_txd_pwr);
3200         if (adapter->pcix_82544)
3201                 count += nr_frags;
3202
3203         /* need: count + 2 desc gap to keep tail from touching
3204          * head, otherwise try next time */
3205         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3206                 return NETDEV_TX_BUSY;
3207
3208         if (unlikely((hw->mac_type == e1000_82547) &&
3209                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3210                 netif_stop_queue(netdev);
3211                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3212                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3213                 return NETDEV_TX_BUSY;
3214         }
3215
3216         if (vlan_tx_tag_present(skb)) {
3217                 tx_flags |= E1000_TX_FLAGS_VLAN;
3218                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3219         }
3220
3221         first = tx_ring->next_to_use;
3222
3223         tso = e1000_tso(adapter, tx_ring, skb);
3224         if (tso < 0) {
3225                 dev_kfree_skb_any(skb);
3226                 return NETDEV_TX_OK;
3227         }
3228
3229         if (likely(tso)) {
3230                 if (likely(hw->mac_type != e1000_82544))
3231                         tx_ring->last_tx_tso = true;
3232                 tx_flags |= E1000_TX_FLAGS_TSO;
3233         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3234                 tx_flags |= E1000_TX_FLAGS_CSUM;
3235
3236         if (likely(skb->protocol == htons(ETH_P_IP)))
3237                 tx_flags |= E1000_TX_FLAGS_IPV4;
3238
3239         if (unlikely(skb->no_fcs))
3240                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3241
3242         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3243                              nr_frags, mss);
3244
3245         if (count) {
3246                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3247                 /* Make sure there is space in the ring for the next send. */
3248                 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3249
3250         } else {
3251                 dev_kfree_skb_any(skb);
3252                 tx_ring->buffer_info[first].time_stamp = 0;
3253                 tx_ring->next_to_use = first;
3254         }
3255
3256         return NETDEV_TX_OK;
3257 }
3258
3259 #define NUM_REGS 38 /* 1 based count */
3260 static void e1000_regdump(struct e1000_adapter *adapter)
3261 {
3262         struct e1000_hw *hw = &adapter->hw;
3263         u32 regs[NUM_REGS];
3264         u32 *regs_buff = regs;
3265         int i = 0;
3266
3267         static const char * const reg_name[] = {
3268                 "CTRL",  "STATUS",
3269                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3270                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3271                 "TIDV", "TXDCTL", "TADV", "TARC0",
3272                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3273                 "TXDCTL1", "TARC1",
3274                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3275                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3276                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3277         };
3278
3279         regs_buff[0]  = er32(CTRL);
3280         regs_buff[1]  = er32(STATUS);
3281
3282         regs_buff[2]  = er32(RCTL);
3283         regs_buff[3]  = er32(RDLEN);
3284         regs_buff[4]  = er32(RDH);
3285         regs_buff[5]  = er32(RDT);
3286         regs_buff[6]  = er32(RDTR);
3287
3288         regs_buff[7]  = er32(TCTL);
3289         regs_buff[8]  = er32(TDBAL);
3290         regs_buff[9]  = er32(TDBAH);
3291         regs_buff[10] = er32(TDLEN);
3292         regs_buff[11] = er32(TDH);
3293         regs_buff[12] = er32(TDT);
3294         regs_buff[13] = er32(TIDV);
3295         regs_buff[14] = er32(TXDCTL);
3296         regs_buff[15] = er32(TADV);
3297         regs_buff[16] = er32(TARC0);
3298
3299         regs_buff[17] = er32(TDBAL1);
3300         regs_buff[18] = er32(TDBAH1);
3301         regs_buff[19] = er32(TDLEN1);
3302         regs_buff[20] = er32(TDH1);
3303         regs_buff[21] = er32(TDT1);
3304         regs_buff[22] = er32(TXDCTL1);
3305         regs_buff[23] = er32(TARC1);
3306         regs_buff[24] = er32(CTRL_EXT);
3307         regs_buff[25] = er32(ERT);
3308         regs_buff[26] = er32(RDBAL0);
3309         regs_buff[27] = er32(RDBAH0);
3310         regs_buff[28] = er32(TDFH);
3311         regs_buff[29] = er32(TDFT);
3312         regs_buff[30] = er32(TDFHS);
3313         regs_buff[31] = er32(TDFTS);
3314         regs_buff[32] = er32(TDFPC);
3315         regs_buff[33] = er32(RDFH);
3316         regs_buff[34] = er32(RDFT);
3317         regs_buff[35] = er32(RDFHS);
3318         regs_buff[36] = er32(RDFTS);
3319         regs_buff[37] = er32(RDFPC);
3320
3321         pr_info("Register dump\n");
3322         for (i = 0; i < NUM_REGS; i++)
3323                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3324 }
3325
3326 /*
3327  * e1000_dump: Print registers, tx ring and rx ring
3328  */
3329 static void e1000_dump(struct e1000_adapter *adapter)
3330 {
3331         /* this code doesn't handle multiple rings */
3332         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3333         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3334         int i;
3335
3336         if (!netif_msg_hw(adapter))
3337                 return;
3338
3339         /* Print Registers */
3340         e1000_regdump(adapter);
3341
3342         /*
3343          * transmit dump
3344          */
3345         pr_info("TX Desc ring0 dump\n");
3346
3347         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3348          *
3349          * Legacy Transmit Descriptor
3350          *   +--------------------------------------------------------------+
3351          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3352          *   +--------------------------------------------------------------+
3353          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3354          *   +--------------------------------------------------------------+
3355          *   63       48 47        36 35    32 31     24 23    16 15        0
3356          *
3357          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3358          *   63      48 47    40 39       32 31             16 15    8 7      0
3359          *   +----------------------------------------------------------------+
3360          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3361          *   +----------------------------------------------------------------+
3362          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3363          *   +----------------------------------------------------------------+
3364          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3365          *
3366          * Extended Data Descriptor (DTYP=0x1)
3367          *   +----------------------------------------------------------------+
3368          * 0 |                     Buffer Address [63:0]                      |
3369          *   +----------------------------------------------------------------+
3370          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3371          *   +----------------------------------------------------------------+
3372          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3373          */
3374         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3375         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3376
3377         if (!netif_msg_tx_done(adapter))
3378                 goto rx_ring_summary;
3379
3380         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3381                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3382                 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3383                 struct my_u { __le64 a; __le64 b; };
3384                 struct my_u *u = (struct my_u *)tx_desc;
3385                 const char *type;
3386
3387                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3388                         type = "NTC/U";
3389                 else if (i == tx_ring->next_to_use)
3390                         type = "NTU";
3391                 else if (i == tx_ring->next_to_clean)
3392                         type = "NTC";
3393                 else
3394                         type = "";
3395
3396                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3397                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3398                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3399                         (u64)buffer_info->dma, buffer_info->length,
3400                         buffer_info->next_to_watch,
3401                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3402         }
3403
3404 rx_ring_summary:
3405         /*
3406          * receive dump
3407          */
3408         pr_info("\nRX Desc ring dump\n");
3409
3410         /* Legacy Receive Descriptor Format
3411          *
3412          * +-----------------------------------------------------+
3413          * |                Buffer Address [63:0]                |
3414          * +-----------------------------------------------------+
3415          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3416          * +-----------------------------------------------------+
3417          * 63       48 47    40 39      32 31         16 15      0
3418          */
3419         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3420
3421         if (!netif_msg_rx_status(adapter))
3422                 goto exit;
3423
3424         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3425                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3426                 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3427                 struct my_u { __le64 a; __le64 b; };
3428                 struct my_u *u = (struct my_u *)rx_desc;
3429                 const char *type;
3430
3431                 if (i == rx_ring->next_to_use)
3432                         type = "NTU";
3433                 else if (i == rx_ring->next_to_clean)
3434                         type = "NTC";
3435                 else
3436                         type = "";
3437
3438                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3439                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3440                         (u64)buffer_info->dma, buffer_info->skb, type);
3441         } /* for */
3442
3443         /* dump the descriptor caches */
3444         /* rx */
3445         pr_info("Rx descriptor cache in 64bit format\n");
3446         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3447                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3448                         i,
3449                         readl(adapter->hw.hw_addr + i+4),
3450                         readl(adapter->hw.hw_addr + i),
3451                         readl(adapter->hw.hw_addr + i+12),
3452                         readl(adapter->hw.hw_addr + i+8));
3453         }
3454         /* tx */
3455         pr_info("Tx descriptor cache in 64bit format\n");
3456         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3457                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3458                         i,
3459                         readl(adapter->hw.hw_addr + i+4),
3460                         readl(adapter->hw.hw_addr + i),
3461                         readl(adapter->hw.hw_addr + i+12),
3462                         readl(adapter->hw.hw_addr + i+8));
3463         }
3464 exit:
3465         return;
3466 }
3467
3468 /**
3469  * e1000_tx_timeout - Respond to a Tx Hang
3470  * @netdev: network interface device structure
3471  **/
3472
3473 static void e1000_tx_timeout(struct net_device *netdev)
3474 {
3475         struct e1000_adapter *adapter = netdev_priv(netdev);
3476
3477         /* Do the reset outside of interrupt context */
3478         adapter->tx_timeout_count++;
3479         schedule_work(&adapter->reset_task);
3480 }
3481
3482 static void e1000_reset_task(struct work_struct *work)
3483 {
3484         struct e1000_adapter *adapter =
3485                 container_of(work, struct e1000_adapter, reset_task);
3486
3487         if (test_bit(__E1000_DOWN, &adapter->flags))
3488                 return;
3489         e_err(drv, "Reset adapter\n");
3490         e1000_reinit_safe(adapter);
3491 }
3492
3493 /**
3494  * e1000_get_stats - Get System Network Statistics
3495  * @netdev: network interface device structure
3496  *
3497  * Returns the address of the device statistics structure.
3498  * The statistics are actually updated from the watchdog.
3499  **/
3500
3501 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3502 {
3503         /* only return the current stats */
3504         return &netdev->stats;
3505 }
3506
3507 /**
3508  * e1000_change_mtu - Change the Maximum Transfer Unit
3509  * @netdev: network interface device structure
3510  * @new_mtu: new value for maximum frame size
3511  *
3512  * Returns 0 on success, negative on failure
3513  **/
3514
3515 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3516 {
3517         struct e1000_adapter *adapter = netdev_priv(netdev);
3518         struct e1000_hw *hw = &adapter->hw;
3519         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3520
3521         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3522             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3523                 e_err(probe, "Invalid MTU setting\n");
3524                 return -EINVAL;
3525         }
3526
3527         /* Adapter-specific max frame size limits. */
3528         switch (hw->mac_type) {
3529         case e1000_undefined ... e1000_82542_rev2_1:
3530                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3531                         e_err(probe, "Jumbo Frames not supported.\n");
3532                         return -EINVAL;
3533                 }
3534                 break;
3535         default:
3536                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3537                 break;
3538         }
3539
3540         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3541                 msleep(1);
3542         /* e1000_down has a dependency on max_frame_size */
3543         hw->max_frame_size = max_frame;
3544         if (netif_running(netdev))
3545                 e1000_down(adapter);
3546
3547         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3548          * means we reserve 2 more, this pushes us to allocate from the next
3549          * larger slab size.
3550          * i.e. RXBUFFER_2048 --> size-4096 slab
3551          *  however with the new *_jumbo_rx* routines, jumbo receives will use
3552          *  fragmented skbs */
3553
3554         if (max_frame <= E1000_RXBUFFER_2048)
3555                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3556         else
3557 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3558                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3559 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3560                 adapter->rx_buffer_len = PAGE_SIZE;
3561 #endif
3562
3563         /* adjust allocation if LPE protects us, and we aren't using SBP */
3564         if (!hw->tbi_compatibility_on &&
3565             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3566              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3567                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3568
3569         pr_info("%s changing MTU from %d to %d\n",
3570                 netdev->name, netdev->mtu, new_mtu);
3571         netdev->mtu = new_mtu;
3572
3573         if (netif_running(netdev))
3574                 e1000_up(adapter);
3575         else
3576                 e1000_reset(adapter);
3577
3578         clear_bit(__E1000_RESETTING, &adapter->flags);
3579
3580         return 0;
3581 }
3582
3583 /**
3584  * e1000_update_stats - Update the board statistics counters
3585  * @adapter: board private structure
3586  **/
3587
3588 void e1000_update_stats(struct e1000_adapter *adapter)
3589 {
3590         struct net_device *netdev = adapter->netdev;
3591         struct e1000_hw *hw = &adapter->hw;
3592         struct pci_dev *pdev = adapter->pdev;
3593         unsigned long flags;
3594         u16 phy_tmp;
3595
3596 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3597
3598         /*
3599          * Prevent stats update while adapter is being reset, or if the pci
3600          * connection is down.
3601          */
3602         if (adapter->link_speed == 0)
3603                 return;
3604         if (pci_channel_offline(pdev))
3605                 return;
3606
3607         spin_lock_irqsave(&adapter->stats_lock, flags);
3608
3609         /* these counters are modified from e1000_tbi_adjust_stats,
3610          * called from the interrupt context, so they must only
3611          * be written while holding adapter->stats_lock
3612          */
3613
3614         adapter->stats.crcerrs += er32(CRCERRS);
3615         adapter->stats.gprc += er32(GPRC);
3616         adapter->stats.gorcl += er32(GORCL);
3617         adapter->stats.gorch += er32(GORCH);
3618         adapter->stats.bprc += er32(BPRC);
3619         adapter->stats.mprc += er32(MPRC);
3620         adapter->stats.roc += er32(ROC);
3621
3622         adapter->stats.prc64 += er32(PRC64);
3623         adapter->stats.prc127 += er32(PRC127);
3624         adapter->stats.prc255 += er32(PRC255);
3625         adapter->stats.prc511 += er32(PRC511);
3626         adapter->stats.prc1023 += er32(PRC1023);
3627         adapter->stats.prc1522 += er32(PRC1522);
3628
3629         adapter->stats.symerrs += er32(SYMERRS);
3630         adapter->stats.mpc += er32(MPC);
3631         adapter->stats.scc += er32(SCC);
3632         adapter->stats.ecol += er32(ECOL);
3633         adapter->stats.mcc += er32(MCC);
3634         adapter->stats.latecol += er32(LATECOL);
3635         adapter->stats.dc += er32(DC);
3636         adapter->stats.sec += er32(SEC);
3637         adapter->stats.rlec += er32(RLEC);
3638         adapter->stats.xonrxc += er32(XONRXC);
3639         adapter->stats.xontxc += er32(XONTXC);
3640         adapter->stats.xoffrxc += er32(XOFFRXC);
3641         adapter->stats.xofftxc += er32(XOFFTXC);
3642         adapter->stats.fcruc += er32(FCRUC);
3643         adapter->stats.gptc += er32(GPTC);
3644         adapter->stats.gotcl += er32(GOTCL);
3645         adapter->stats.gotch += er32(GOTCH);
3646         adapter->stats.rnbc += er32(RNBC);
3647         adapter->stats.ruc += er32(RUC);
3648         adapter->stats.rfc += er32(RFC);
3649         adapter->stats.rjc += er32(RJC);
3650         adapter->stats.torl += er32(TORL);
3651         adapter->stats.torh += er32(TORH);
3652         adapter->stats.totl += er32(TOTL);
3653         adapter->stats.toth += er32(TOTH);
3654         adapter->stats.tpr += er32(TPR);
3655
3656         adapter->stats.ptc64 += er32(PTC64);
3657         adapter->stats.ptc127 += er32(PTC127);
3658         adapter->stats.ptc255 += er32(PTC255);
3659         adapter->stats.ptc511 += er32(PTC511);
3660         adapter->stats.ptc1023 += er32(PTC1023);
3661         adapter->stats.ptc1522 += er32(PTC1522);
3662
3663         adapter->stats.mptc += er32(MPTC);
3664         adapter->stats.bptc += er32(BPTC);
3665
3666         /* used for adaptive IFS */
3667
3668         hw->tx_packet_delta = er32(TPT);
3669         adapter->stats.tpt += hw->tx_packet_delta;
3670         hw->collision_delta = er32(COLC);
3671         adapter->stats.colc += hw->collision_delta;
3672
3673         if (hw->mac_type >= e1000_82543) {
3674                 adapter->stats.algnerrc += er32(ALGNERRC);
3675                 adapter->stats.rxerrc += er32(RXERRC);
3676                 adapter->stats.tncrs += er32(TNCRS);
3677                 adapter->stats.cexterr += er32(CEXTERR);
3678                 adapter->stats.tsctc += er32(TSCTC);
3679                 adapter->stats.tsctfc += er32(TSCTFC);
3680         }
3681
3682         /* Fill out the OS statistics structure */
3683         netdev->stats.multicast = adapter->stats.mprc;
3684         netdev->stats.collisions = adapter->stats.colc;
3685
3686         /* Rx Errors */
3687
3688         /* RLEC on some newer hardware can be incorrect so build
3689         * our own version based on RUC and ROC */
3690         netdev->stats.rx_errors = adapter->stats.rxerrc +
3691                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3692                 adapter->stats.ruc + adapter->stats.roc +
3693                 adapter->stats.cexterr;
3694         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3695         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3696         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3697         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3698         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3699
3700         /* Tx Errors */
3701         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3702         netdev->stats.tx_errors = adapter->stats.txerrc;
3703         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3704         netdev->stats.tx_window_errors = adapter->stats.latecol;
3705         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3706         if (hw->bad_tx_carr_stats_fd &&
3707             adapter->link_duplex == FULL_DUPLEX) {
3708                 netdev->stats.tx_carrier_errors = 0;
3709                 adapter->stats.tncrs = 0;
3710         }
3711
3712         /* Tx Dropped needs to be maintained elsewhere */
3713
3714         /* Phy Stats */
3715         if (hw->media_type == e1000_media_type_copper) {
3716                 if ((adapter->link_speed == SPEED_1000) &&
3717                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3718                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3719                         adapter->phy_stats.idle_errors += phy_tmp;
3720                 }
3721
3722                 if ((hw->mac_type <= e1000_82546) &&
3723                    (hw->phy_type == e1000_phy_m88) &&
3724                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3725                         adapter->phy_stats.receive_errors += phy_tmp;
3726         }
3727
3728         /* Management Stats */
3729         if (hw->has_smbus) {
3730                 adapter->stats.mgptc += er32(MGTPTC);
3731                 adapter->stats.mgprc += er32(MGTPRC);
3732                 adapter->stats.mgpdc += er32(MGTPDC);
3733         }
3734
3735         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3736 }
3737
3738 /**
3739  * e1000_intr - Interrupt Handler
3740  * @irq: interrupt number
3741  * @data: pointer to a network interface device structure
3742  **/
3743
3744 static irqreturn_t e1000_intr(int irq, void *data)
3745 {
3746         struct net_device *netdev = data;
3747         struct e1000_adapter *adapter = netdev_priv(netdev);
3748         struct e1000_hw *hw = &adapter->hw;
3749         u32 icr = er32(ICR);
3750
3751         if (unlikely((!icr)))
3752                 return IRQ_NONE;  /* Not our interrupt */
3753
3754         /*
3755          * we might have caused the interrupt, but the above
3756          * read cleared it, and just in case the driver is
3757          * down there is nothing to do so return handled
3758          */
3759         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760                 return IRQ_HANDLED;
3761
3762         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763                 hw->get_link_status = 1;
3764                 /* guard against interrupt when we're going down */
3765                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3766                         schedule_delayed_work(&adapter->watchdog_task, 1);
3767         }
3768
3769         /* disable interrupts, without the synchronize_irq bit */
3770         ew32(IMC, ~0);
3771         E1000_WRITE_FLUSH();
3772
3773         if (likely(napi_schedule_prep(&adapter->napi))) {
3774                 adapter->total_tx_bytes = 0;
3775                 adapter->total_tx_packets = 0;
3776                 adapter->total_rx_bytes = 0;
3777                 adapter->total_rx_packets = 0;
3778                 __napi_schedule(&adapter->napi);
3779         } else {
3780                 /* this really should not happen! if it does it is basically a
3781                  * bug, but not a hard error, so enable ints and continue */
3782                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3783                         e1000_irq_enable(adapter);
3784         }
3785
3786         return IRQ_HANDLED;
3787 }
3788
3789 /**
3790  * e1000_clean - NAPI Rx polling callback
3791  * @adapter: board private structure
3792  **/
3793 static int e1000_clean(struct napi_struct *napi, int budget)
3794 {
3795         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3796         int tx_clean_complete = 0, work_done = 0;
3797
3798         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3799
3800         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3801
3802         if (!tx_clean_complete)
3803                 work_done = budget;
3804
3805         /* If budget not fully consumed, exit the polling mode */
3806         if (work_done < budget) {
3807                 if (likely(adapter->itr_setting & 3))
3808                         e1000_set_itr(adapter);
3809                 napi_complete(napi);
3810                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3811                         e1000_irq_enable(adapter);
3812         }
3813
3814         return work_done;
3815 }
3816
3817 /**
3818  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3819  * @adapter: board private structure
3820  **/
3821 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3822                                struct e1000_tx_ring *tx_ring)
3823 {
3824         struct e1000_hw *hw = &adapter->hw;
3825         struct net_device *netdev = adapter->netdev;
3826         struct e1000_tx_desc *tx_desc, *eop_desc;
3827         struct e1000_buffer *buffer_info;
3828         unsigned int i, eop;
3829         unsigned int count = 0;
3830         unsigned int total_tx_bytes=0, total_tx_packets=0;
3831
3832         i = tx_ring->next_to_clean;
3833         eop = tx_ring->buffer_info[i].next_to_watch;
3834         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3835
3836         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3837                (count < tx_ring->count)) {
3838                 bool cleaned = false;
3839                 rmb();  /* read buffer_info after eop_desc */
3840                 for ( ; !cleaned; count++) {
3841                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3842                         buffer_info = &tx_ring->buffer_info[i];
3843                         cleaned = (i == eop);
3844
3845                         if (cleaned) {
3846                                 total_tx_packets += buffer_info->segs;
3847                                 total_tx_bytes += buffer_info->bytecount;
3848                         }
3849                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3850                         tx_desc->upper.data = 0;
3851
3852                         if (unlikely(++i == tx_ring->count)) i = 0;
3853                 }
3854
3855                 eop = tx_ring->buffer_info[i].next_to_watch;
3856                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3857         }
3858
3859         tx_ring->next_to_clean = i;
3860
3861 #define TX_WAKE_THRESHOLD 32
3862         if (unlikely(count && netif_carrier_ok(netdev) &&
3863                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3864                 /* Make sure that anybody stopping the queue after this
3865                  * sees the new next_to_clean.
3866                  */
3867                 smp_mb();
3868
3869                 if (netif_queue_stopped(netdev) &&
3870                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3871                         netif_wake_queue(netdev);
3872                         ++adapter->restart_queue;
3873                 }
3874         }
3875
3876         if (adapter->detect_tx_hung) {
3877                 /* Detect a transmit hang in hardware, this serializes the
3878                  * check with the clearing of time_stamp and movement of i */
3879                 adapter->detect_tx_hung = false;
3880                 if (tx_ring->buffer_info[eop].time_stamp &&
3881                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3882                                (adapter->tx_timeout_factor * HZ)) &&
3883                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3884
3885                         /* detected Tx unit hang */
3886                         e_err(drv, "Detected Tx Unit Hang\n"
3887                               "  Tx Queue             <%lu>\n"
3888                               "  TDH                  <%x>\n"
3889                               "  TDT                  <%x>\n"
3890                               "  next_to_use          <%x>\n"
3891                               "  next_to_clean        <%x>\n"
3892                               "buffer_info[next_to_clean]\n"
3893                               "  time_stamp           <%lx>\n"
3894                               "  next_to_watch        <%x>\n"
3895                               "  jiffies              <%lx>\n"
3896                               "  next_to_watch.status <%x>\n",
3897                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3898                                         sizeof(struct e1000_tx_ring)),
3899                                 readl(hw->hw_addr + tx_ring->tdh),
3900                                 readl(hw->hw_addr + tx_ring->tdt),
3901                                 tx_ring->next_to_use,
3902                                 tx_ring->next_to_clean,
3903                                 tx_ring->buffer_info[eop].time_stamp,
3904                                 eop,
3905                                 jiffies,
3906                                 eop_desc->upper.fields.status);
3907                         e1000_dump(adapter);
3908                         netif_stop_queue(netdev);
3909                 }
3910         }
3911         adapter->total_tx_bytes += total_tx_bytes;
3912         adapter->total_tx_packets += total_tx_packets;
3913         netdev->stats.tx_bytes += total_tx_bytes;
3914         netdev->stats.tx_packets += total_tx_packets;
3915         return count < tx_ring->count;
3916 }
3917
3918 /**
3919  * e1000_rx_checksum - Receive Checksum Offload for 82543
3920  * @adapter:     board private structure
3921  * @status_err:  receive descriptor status and error fields
3922  * @csum:        receive descriptor csum field
3923  * @sk_buff:     socket buffer with received data
3924  **/
3925
3926 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3927                               u32 csum, struct sk_buff *skb)
3928 {
3929         struct e1000_hw *hw = &adapter->hw;
3930         u16 status = (u16)status_err;
3931         u8 errors = (u8)(status_err >> 24);
3932
3933         skb_checksum_none_assert(skb);
3934
3935         /* 82543 or newer only */
3936         if (unlikely(hw->mac_type < e1000_82543)) return;
3937         /* Ignore Checksum bit is set */
3938         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3939         /* TCP/UDP checksum error bit is set */
3940         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3941                 /* let the stack verify checksum errors */
3942                 adapter->hw_csum_err++;
3943                 return;
3944         }
3945         /* TCP/UDP Checksum has not been calculated */
3946         if (!(status & E1000_RXD_STAT_TCPCS))
3947                 return;
3948
3949         /* It must be a TCP or UDP packet with a valid checksum */
3950         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3951                 /* TCP checksum is good */
3952                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3953         }
3954         adapter->hw_csum_good++;
3955 }
3956
3957 /**
3958  * e1000_consume_page - helper function
3959  **/
3960 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3961                                u16 length)
3962 {
3963         bi->page = NULL;
3964         skb->len += length;
3965         skb->data_len += length;
3966         skb->truesize += PAGE_SIZE;
3967 }
3968
3969 /**
3970  * e1000_receive_skb - helper function to handle rx indications
3971  * @adapter: board private structure
3972  * @status: descriptor status field as written by hardware
3973  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3974  * @skb: pointer to sk_buff to be indicated to stack
3975  */
3976 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3977                               __le16 vlan, struct sk_buff *skb)
3978 {
3979         skb->protocol = eth_type_trans(skb, adapter->netdev);
3980
3981         if (status & E1000_RXD_STAT_VP) {
3982                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3983
3984                 __vlan_hwaccel_put_tag(skb, vid);
3985         }
3986         napi_gro_receive(&adapter->napi, skb);
3987 }
3988
3989 /**
3990  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3991  * @adapter: board private structure
3992  * @rx_ring: ring to clean
3993  * @work_done: amount of napi work completed this call
3994  * @work_to_do: max amount of work allowed for this call to do
3995  *
3996  * the return value indicates whether actual cleaning was done, there
3997  * is no guarantee that everything was cleaned
3998  */
3999 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4000                                      struct e1000_rx_ring *rx_ring,
4001                                      int *work_done, int work_to_do)
4002 {
4003         struct e1000_hw *hw = &adapter->hw;
4004         struct net_device *netdev = adapter->netdev;
4005         struct pci_dev *pdev = adapter->pdev;
4006         struct e1000_rx_desc *rx_desc, *next_rxd;
4007         struct e1000_buffer *buffer_info, *next_buffer;
4008         unsigned long irq_flags;
4009         u32 length;
4010         unsigned int i;
4011         int cleaned_count = 0;
4012         bool cleaned = false;
4013         unsigned int total_rx_bytes=0, total_rx_packets=0;
4014
4015         i = rx_ring->next_to_clean;
4016         rx_desc = E1000_RX_DESC(*rx_ring, i);
4017         buffer_info = &rx_ring->buffer_info[i];
4018
4019         while (rx_desc->status & E1000_RXD_STAT_DD) {
4020                 struct sk_buff *skb;
4021                 u8 status;
4022
4023                 if (*work_done >= work_to_do)
4024                         break;
4025                 (*work_done)++;
4026                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4027
4028                 status = rx_desc->status;
4029                 skb = buffer_info->skb;
4030                 buffer_info->skb = NULL;
4031
4032                 if (++i == rx_ring->count) i = 0;
4033                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4034                 prefetch(next_rxd);
4035
4036                 next_buffer = &rx_ring->buffer_info[i];
4037
4038                 cleaned = true;
4039                 cleaned_count++;
4040                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4041                                buffer_info->length, DMA_FROM_DEVICE);
4042                 buffer_info->dma = 0;
4043
4044                 length = le16_to_cpu(rx_desc->length);
4045
4046                 /* errors is only valid for DD + EOP descriptors */
4047                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4048                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4049                         u8 last_byte = *(skb->data + length - 1);
4050                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4051                                        last_byte)) {
4052                                 spin_lock_irqsave(&adapter->stats_lock,
4053                                                   irq_flags);
4054                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4055                                                        length, skb->data);
4056                                 spin_unlock_irqrestore(&adapter->stats_lock,
4057                                                        irq_flags);
4058                                 length--;
4059                         } else {
4060                                 /* recycle both page and skb */
4061                                 buffer_info->skb = skb;
4062                                 /* an error means any chain goes out the window
4063                                  * too */
4064                                 if (rx_ring->rx_skb_top)
4065                                         dev_kfree_skb(rx_ring->rx_skb_top);
4066                                 rx_ring->rx_skb_top = NULL;
4067                                 goto next_desc;
4068                         }
4069                 }
4070
4071 #define rxtop rx_ring->rx_skb_top
4072                 if (!(status & E1000_RXD_STAT_EOP)) {
4073                         /* this descriptor is only the beginning (or middle) */
4074                         if (!rxtop) {
4075                                 /* this is the beginning of a chain */
4076                                 rxtop = skb;
4077                                 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4078                                                    0, length);
4079                         } else {
4080                                 /* this is the middle of a chain */
4081                                 skb_fill_page_desc(rxtop,
4082                                     skb_shinfo(rxtop)->nr_frags,
4083                                     buffer_info->page, 0, length);
4084                                 /* re-use the skb, only consumed the page */
4085                                 buffer_info->skb = skb;
4086                         }
4087                         e1000_consume_page(buffer_info, rxtop, length);
4088                         goto next_desc;
4089                 } else {
4090                         if (rxtop) {
4091                                 /* end of the chain */
4092                                 skb_fill_page_desc(rxtop,
4093                                     skb_shinfo(rxtop)->nr_frags,
4094                                     buffer_info->page, 0, length);
4095                                 /* re-use the current skb, we only consumed the
4096                                  * page */
4097                                 buffer_info->skb = skb;
4098                                 skb = rxtop;
4099                                 rxtop = NULL;
4100                                 e1000_consume_page(buffer_info, skb, length);
4101                         } else {
4102                                 /* no chain, got EOP, this buf is the packet
4103                                  * copybreak to save the put_page/alloc_page */
4104                                 if (length <= copybreak &&
4105                                     skb_tailroom(skb) >= length) {
4106                                         u8 *vaddr;
4107                                         vaddr = kmap_atomic(buffer_info->page);
4108                                         memcpy(skb_tail_pointer(skb), vaddr, length);
4109                                         kunmap_atomic(vaddr);
4110                                         /* re-use the page, so don't erase
4111                                          * buffer_info->page */
4112                                         skb_put(skb, length);
4113                                 } else {
4114                                         skb_fill_page_desc(skb, 0,
4115                                                            buffer_info->page, 0,
4116                                                            length);
4117                                         e1000_consume_page(buffer_info, skb,
4118                                                            length);
4119                                 }
4120                         }
4121                 }
4122
4123                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4124                 e1000_rx_checksum(adapter,
4125                                   (u32)(status) |
4126                                   ((u32)(rx_desc->errors) << 24),
4127                                   le16_to_cpu(rx_desc->csum), skb);
4128
4129                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4130                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4131                         pskb_trim(skb, skb->len - 4);
4132                 total_rx_packets++;
4133
4134                 /* eth type trans needs skb->data to point to something */
4135                 if (!pskb_may_pull(skb, ETH_HLEN)) {
4136                         e_err(drv, "pskb_may_pull failed.\n");
4137                         dev_kfree_skb(skb);
4138                         goto next_desc;
4139                 }
4140
4141                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4142
4143 next_desc:
4144                 rx_desc->status = 0;
4145
4146                 /* return some buffers to hardware, one at a time is too slow */
4147                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4148                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4149                         cleaned_count = 0;
4150                 }
4151
4152                 /* use prefetched values */
4153                 rx_desc = next_rxd;
4154                 buffer_info = next_buffer;
4155         }
4156         rx_ring->next_to_clean = i;
4157
4158         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4159         if (cleaned_count)
4160                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4161
4162         adapter->total_rx_packets += total_rx_packets;
4163         adapter->total_rx_bytes += total_rx_bytes;
4164         netdev->stats.rx_bytes += total_rx_bytes;
4165         netdev->stats.rx_packets += total_rx_packets;
4166         return cleaned;
4167 }
4168
4169 /*
4170  * this should improve performance for small packets with large amounts
4171  * of reassembly being done in the stack
4172  */
4173 static void e1000_check_copybreak(struct net_device *netdev,
4174                                  struct e1000_buffer *buffer_info,
4175                                  u32 length, struct sk_buff **skb)
4176 {
4177         struct sk_buff *new_skb;
4178
4179         if (length > copybreak)
4180                 return;
4181
4182         new_skb = netdev_alloc_skb_ip_align(netdev, length);
4183         if (!new_skb)
4184                 return;
4185
4186         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4187                                        (*skb)->data - NET_IP_ALIGN,
4188                                        length + NET_IP_ALIGN);
4189         /* save the skb in buffer_info as good */
4190         buffer_info->skb = *skb;
4191         *skb = new_skb;
4192 }
4193
4194 /**
4195  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4196  * @adapter: board private structure
4197  * @rx_ring: ring to clean
4198  * @work_done: amount of napi work completed this call
4199  * @work_to_do: max amount of work allowed for this call to do
4200  */
4201 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4202                                struct e1000_rx_ring *rx_ring,
4203                                int *work_done, int work_to_do)
4204 {
4205         struct e1000_hw *hw = &adapter->hw;
4206         struct net_device *netdev = adapter->netdev;
4207         struct pci_dev *pdev = adapter->pdev;
4208         struct e1000_rx_desc *rx_desc, *next_rxd;
4209         struct e1000_buffer *buffer_info, *next_buffer;
4210         unsigned long flags;
4211         u32 length;
4212         unsigned int i;
4213         int cleaned_count = 0;
4214         bool cleaned = false;
4215         unsigned int total_rx_bytes=0, total_rx_packets=0;
4216
4217         i = rx_ring->next_to_clean;
4218         rx_desc = E1000_RX_DESC(*rx_ring, i);
4219         buffer_info = &rx_ring->buffer_info[i];
4220
4221         while (rx_desc->status & E1000_RXD_STAT_DD) {
4222                 struct sk_buff *skb;
4223                 u8 status;
4224
4225                 if (*work_done >= work_to_do)
4226                         break;
4227                 (*work_done)++;
4228                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4229
4230                 status = rx_desc->status;
4231                 skb = buffer_info->skb;
4232                 buffer_info->skb = NULL;
4233
4234                 prefetch(skb->data - NET_IP_ALIGN);
4235
4236                 if (++i == rx_ring->count) i = 0;
4237                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4238                 prefetch(next_rxd);
4239
4240                 next_buffer = &rx_ring->buffer_info[i];
4241
4242                 cleaned = true;
4243                 cleaned_count++;
4244                 dma_unmap_single(&pdev->dev, buffer_info->dma,
4245                                  buffer_info->length, DMA_FROM_DEVICE);
4246                 buffer_info->dma = 0;
4247
4248                 length = le16_to_cpu(rx_desc->length);
4249                 /* !EOP means multiple descriptors were used to store a single
4250                  * packet, if thats the case we need to toss it.  In fact, we
4251                  * to toss every packet with the EOP bit clear and the next
4252                  * frame that _does_ have the EOP bit set, as it is by
4253                  * definition only a frame fragment
4254                  */
4255                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4256                         adapter->discarding = true;
4257
4258                 if (adapter->discarding) {
4259                         /* All receives must fit into a single buffer */
4260                         e_dbg("Receive packet consumed multiple buffers\n");
4261                         /* recycle */
4262                         buffer_info->skb = skb;
4263                         if (status & E1000_RXD_STAT_EOP)
4264                                 adapter->discarding = false;
4265                         goto next_desc;
4266                 }
4267
4268                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4269                         u8 last_byte = *(skb->data + length - 1);
4270                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4271                                        last_byte)) {
4272                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4273                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4274                                                        length, skb->data);
4275                                 spin_unlock_irqrestore(&adapter->stats_lock,
4276                                                        flags);
4277                                 length--;
4278                         } else {
4279                                 /* recycle */
4280                                 buffer_info->skb = skb;
4281                                 goto next_desc;
4282                         }
4283                 }
4284
4285                 total_rx_bytes += (length - 4); /* don't count FCS */
4286                 total_rx_packets++;
4287
4288                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4289                         /* adjust length to remove Ethernet CRC, this must be
4290                          * done after the TBI_ACCEPT workaround above
4291                          */
4292                         length -= 4;
4293
4294                 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4295
4296                 skb_put(skb, length);
4297
4298                 /* Receive Checksum Offload */
4299                 e1000_rx_checksum(adapter,
4300                                   (u32)(status) |
4301                                   ((u32)(rx_desc->errors) << 24),
4302                                   le16_to_cpu(rx_desc->csum), skb);
4303
4304                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4305
4306 next_desc:
4307                 rx_desc->status = 0;
4308
4309                 /* return some buffers to hardware, one at a time is too slow */
4310                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4311                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4312                         cleaned_count = 0;
4313                 }
4314
4315                 /* use prefetched values */
4316                 rx_desc = next_rxd;
4317                 buffer_info = next_buffer;
4318         }
4319         rx_ring->next_to_clean = i;
4320
4321         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4322         if (cleaned_count)
4323                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4324
4325         adapter->total_rx_packets += total_rx_packets;
4326         adapter->total_rx_bytes += total_rx_bytes;
4327         netdev->stats.rx_bytes += total_rx_bytes;
4328         netdev->stats.rx_packets += total_rx_packets;
4329         return cleaned;
4330 }
4331
4332 /**
4333  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4334  * @adapter: address of board private structure
4335  * @rx_ring: pointer to receive ring structure
4336  * @cleaned_count: number of buffers to allocate this pass
4337  **/
4338
4339 static void
4340 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4341                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4342 {
4343         struct net_device *netdev = adapter->netdev;
4344         struct pci_dev *pdev = adapter->pdev;
4345         struct e1000_rx_desc *rx_desc;
4346         struct e1000_buffer *buffer_info;
4347         struct sk_buff *skb;
4348         unsigned int i;
4349         unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4350
4351         i = rx_ring->next_to_use;
4352         buffer_info = &rx_ring->buffer_info[i];
4353
4354         while (cleaned_count--) {
4355                 skb = buffer_info->skb;
4356                 if (skb) {
4357                         skb_trim(skb, 0);
4358                         goto check_page;
4359                 }
4360
4361                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4362                 if (unlikely(!skb)) {
4363                         /* Better luck next round */
4364                         adapter->alloc_rx_buff_failed++;
4365                         break;
4366                 }
4367
4368                 /* Fix for errata 23, can't cross 64kB boundary */
4369                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4370                         struct sk_buff *oldskb = skb;
4371                         e_err(rx_err, "skb align check failed: %u bytes at "
4372                               "%p\n", bufsz, skb->data);
4373                         /* Try again, without freeing the previous */
4374                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4375                         /* Failed allocation, critical failure */
4376                         if (!skb) {
4377                                 dev_kfree_skb(oldskb);
4378                                 adapter->alloc_rx_buff_failed++;
4379                                 break;
4380                         }
4381
4382                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4383                                 /* give up */
4384                                 dev_kfree_skb(skb);
4385                                 dev_kfree_skb(oldskb);
4386                                 break; /* while (cleaned_count--) */
4387                         }
4388
4389                         /* Use new allocation */
4390                         dev_kfree_skb(oldskb);
4391                 }
4392                 buffer_info->skb = skb;
4393                 buffer_info->length = adapter->rx_buffer_len;
4394 check_page:
4395                 /* allocate a new page if necessary */
4396                 if (!buffer_info->page) {
4397                         buffer_info->page = alloc_page(GFP_ATOMIC);
4398                         if (unlikely(!buffer_info->page)) {
4399                                 adapter->alloc_rx_buff_failed++;
4400                                 break;
4401                         }
4402                 }
4403
4404                 if (!buffer_info->dma) {
4405                         buffer_info->dma = dma_map_page(&pdev->dev,
4406                                                         buffer_info->page, 0,
4407                                                         buffer_info->length,
4408                                                         DMA_FROM_DEVICE);
4409                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4410                                 put_page(buffer_info->page);
4411                                 dev_kfree_skb(skb);
4412                                 buffer_info->page = NULL;
4413                                 buffer_info->skb = NULL;
4414                                 buffer_info->dma = 0;
4415                                 adapter->alloc_rx_buff_failed++;
4416                                 break; /* while !buffer_info->skb */
4417                         }
4418                 }
4419
4420                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4421                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4422
4423                 if (unlikely(++i == rx_ring->count))
4424                         i = 0;
4425                 buffer_info = &rx_ring->buffer_info[i];
4426         }
4427
4428         if (likely(rx_ring->next_to_use != i)) {
4429                 rx_ring->next_to_use = i;
4430                 if (unlikely(i-- == 0))
4431                         i = (rx_ring->count - 1);
4432
4433                 /* Force memory writes to complete before letting h/w
4434                  * know there are new descriptors to fetch.  (Only
4435                  * applicable for weak-ordered memory model archs,
4436                  * such as IA-64). */
4437                 wmb();
4438                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4439         }
4440 }
4441
4442 /**
4443  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4444  * @adapter: address of board private structure
4445  **/
4446
4447 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4448                                    struct e1000_rx_ring *rx_ring,
4449                                    int cleaned_count)
4450 {
4451         struct e1000_hw *hw = &adapter->hw;
4452         struct net_device *netdev = adapter->netdev;
4453         struct pci_dev *pdev = adapter->pdev;
4454         struct e1000_rx_desc *rx_desc;
4455         struct e1000_buffer *buffer_info;
4456         struct sk_buff *skb;
4457         unsigned int i;
4458         unsigned int bufsz = adapter->rx_buffer_len;
4459
4460         i = rx_ring->next_to_use;
4461         buffer_info = &rx_ring->buffer_info[i];
4462
4463         while (cleaned_count--) {
4464                 skb = buffer_info->skb;
4465                 if (skb) {
4466                         skb_trim(skb, 0);
4467                         goto map_skb;
4468                 }
4469
4470                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4471                 if (unlikely(!skb)) {
4472                         /* Better luck next round */
4473                         adapter->alloc_rx_buff_failed++;
4474                         break;
4475                 }
4476
4477                 /* Fix for errata 23, can't cross 64kB boundary */
4478                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4479                         struct sk_buff *oldskb = skb;
4480                         e_err(rx_err, "skb align check failed: %u bytes at "
4481                               "%p\n", bufsz, skb->data);
4482                         /* Try again, without freeing the previous */
4483                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4484                         /* Failed allocation, critical failure */
4485                         if (!skb) {
4486                                 dev_kfree_skb(oldskb);
4487                                 adapter->alloc_rx_buff_failed++;
4488                                 break;
4489                         }
4490
4491                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4492                                 /* give up */
4493                                 dev_kfree_skb(skb);
4494                                 dev_kfree_skb(oldskb);
4495                                 adapter->alloc_rx_buff_failed++;
4496                                 break; /* while !buffer_info->skb */
4497                         }
4498
4499                         /* Use new allocation */
4500                         dev_kfree_skb(oldskb);
4501                 }
4502                 buffer_info->skb = skb;
4503                 buffer_info->length = adapter->rx_buffer_len;
4504 map_skb:
4505                 buffer_info->dma = dma_map_single(&pdev->dev,
4506                                                   skb->data,
4507                                                   buffer_info->length,
4508                                                   DMA_FROM_DEVICE);
4509                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4510                         dev_kfree_skb(skb);
4511                         buffer_info->skb = NULL;
4512                         buffer_info->dma = 0;
4513                         adapter->alloc_rx_buff_failed++;
4514                         break; /* while !buffer_info->skb */
4515                 }
4516
4517                 /*
4518                  * XXX if it was allocated cleanly it will never map to a
4519                  * boundary crossing
4520                  */
4521
4522                 /* Fix for errata 23, can't cross 64kB boundary */
4523                 if (!e1000_check_64k_bound(adapter,
4524                                         (void *)(unsigned long)buffer_info->dma,
4525                                         adapter->rx_buffer_len)) {
4526                         e_err(rx_err, "dma align check failed: %u bytes at "
4527                               "%p\n", adapter->rx_buffer_len,
4528                               (void *)(unsigned long)buffer_info->dma);
4529                         dev_kfree_skb(skb);
4530                         buffer_info->skb = NULL;
4531
4532                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4533                                          adapter->rx_buffer_len,
4534                                          DMA_FROM_DEVICE);
4535                         buffer_info->dma = 0;
4536
4537                         adapter->alloc_rx_buff_failed++;
4538                         break; /* while !buffer_info->skb */
4539                 }
4540                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4541                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4542
4543                 if (unlikely(++i == rx_ring->count))
4544                         i = 0;
4545                 buffer_info = &rx_ring->buffer_info[i];
4546         }
4547
4548         if (likely(rx_ring->next_to_use != i)) {
4549                 rx_ring->next_to_use = i;
4550                 if (unlikely(i-- == 0))
4551                         i = (rx_ring->count - 1);
4552
4553                 /* Force memory writes to complete before letting h/w
4554                  * know there are new descriptors to fetch.  (Only
4555                  * applicable for weak-ordered memory model archs,
4556                  * such as IA-64). */
4557                 wmb();
4558                 writel(i, hw->hw_addr + rx_ring->rdt);
4559         }
4560 }
4561
4562 /**
4563  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4564  * @adapter:
4565  **/
4566
4567 static void e1000_smartspeed(struct e1000_adapter *adapter)
4568 {
4569         struct e1000_hw *hw = &adapter->hw;
4570         u16 phy_status;
4571         u16 phy_ctrl;
4572
4573         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4574            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4575                 return;
4576
4577         if (adapter->smartspeed == 0) {
4578                 /* If Master/Slave config fault is asserted twice,
4579                  * we assume back-to-back */
4580                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4581                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4582                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4583                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4584                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4585                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4586                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4587                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4588                                             phy_ctrl);
4589                         adapter->smartspeed++;
4590                         if (!e1000_phy_setup_autoneg(hw) &&
4591                            !e1000_read_phy_reg(hw, PHY_CTRL,
4592                                                &phy_ctrl)) {
4593                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4594                                              MII_CR_RESTART_AUTO_NEG);
4595                                 e1000_write_phy_reg(hw, PHY_CTRL,
4596                                                     phy_ctrl);
4597                         }
4598                 }
4599                 return;
4600         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4601                 /* If still no link, perhaps using 2/3 pair cable */
4602                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4603                 phy_ctrl |= CR_1000T_MS_ENABLE;
4604                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4605                 if (!e1000_phy_setup_autoneg(hw) &&
4606                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4607                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4608                                      MII_CR_RESTART_AUTO_NEG);
4609                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4610                 }
4611         }
4612         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4613         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4614                 adapter->smartspeed = 0;
4615 }
4616
4617 /**
4618  * e1000_ioctl -
4619  * @netdev:
4620  * @ifreq:
4621  * @cmd:
4622  **/
4623
4624 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4625 {
4626         switch (cmd) {
4627         case SIOCGMIIPHY:
4628         case SIOCGMIIREG:
4629         case SIOCSMIIREG:
4630                 return e1000_mii_ioctl(netdev, ifr, cmd);
4631         default:
4632                 return -EOPNOTSUPP;
4633         }
4634 }
4635
4636 /**
4637  * e1000_mii_ioctl -
4638  * @netdev:
4639  * @ifreq:
4640  * @cmd:
4641  **/
4642
4643 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4644                            int cmd)
4645 {
4646         struct e1000_adapter *adapter = netdev_priv(netdev);
4647         struct e1000_hw *hw = &adapter->hw;
4648         struct mii_ioctl_data *data = if_mii(ifr);
4649         int retval;
4650         u16 mii_reg;
4651         unsigned long flags;
4652
4653         if (hw->media_type != e1000_media_type_copper)
4654                 return -EOPNOTSUPP;
4655
4656         switch (cmd) {
4657         case SIOCGMIIPHY:
4658                 data->phy_id = hw->phy_addr;
4659                 break;
4660         case SIOCGMIIREG:
4661                 spin_lock_irqsave(&adapter->stats_lock, flags);
4662                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4663                                    &data->val_out)) {
4664                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4665                         return -EIO;
4666                 }
4667                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4668                 break;
4669         case SIOCSMIIREG:
4670                 if (data->reg_num & ~(0x1F))
4671                         return -EFAULT;
4672                 mii_reg = data->val_in;
4673                 spin_lock_irqsave(&adapter->stats_lock, flags);
4674                 if (e1000_write_phy_reg(hw, data->reg_num,
4675                                         mii_reg)) {
4676                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4677                         return -EIO;
4678                 }
4679                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4680                 if (hw->media_type == e1000_media_type_copper) {
4681                         switch (data->reg_num) {
4682                         case PHY_CTRL:
4683                                 if (mii_reg & MII_CR_POWER_DOWN)
4684                                         break;
4685                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4686                                         hw->autoneg = 1;
4687                                         hw->autoneg_advertised = 0x2F;
4688                                 } else {
4689                                         u32 speed;
4690                                         if (mii_reg & 0x40)
4691                                                 speed = SPEED_1000;
4692                                         else if (mii_reg & 0x2000)
4693                                                 speed = SPEED_100;
4694                                         else
4695                                                 speed = SPEED_10;
4696                                         retval = e1000_set_spd_dplx(
4697                                                 adapter, speed,
4698                                                 ((mii_reg & 0x100)
4699                                                  ? DUPLEX_FULL :
4700                                                  DUPLEX_HALF));
4701                                         if (retval)
4702                                                 return retval;
4703                                 }
4704                                 if (netif_running(adapter->netdev))
4705                                         e1000_reinit_locked(adapter);
4706                                 else
4707                                         e1000_reset(adapter);
4708                                 break;
4709                         case M88E1000_PHY_SPEC_CTRL:
4710                         case M88E1000_EXT_PHY_SPEC_CTRL:
4711                                 if (e1000_phy_reset(hw))
4712                                         return -EIO;
4713                                 break;
4714                         }
4715                 } else {
4716                         switch (data->reg_num) {
4717                         case PHY_CTRL:
4718                                 if (mii_reg & MII_CR_POWER_DOWN)
4719                                         break;
4720                                 if (netif_running(adapter->netdev))
4721                                         e1000_reinit_locked(adapter);
4722                                 else
4723                                         e1000_reset(adapter);
4724                                 break;
4725                         }
4726                 }
4727                 break;
4728         default:
4729                 return -EOPNOTSUPP;
4730         }
4731         return E1000_SUCCESS;
4732 }
4733
4734 void e1000_pci_set_mwi(struct e1000_hw *hw)
4735 {
4736         struct e1000_adapter *adapter = hw->back;
4737         int ret_val = pci_set_mwi(adapter->pdev);
4738
4739         if (ret_val)
4740                 e_err(probe, "Error in setting MWI\n");
4741 }
4742
4743 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4744 {
4745         struct e1000_adapter *adapter = hw->back;
4746
4747         pci_clear_mwi(adapter->pdev);
4748 }
4749
4750 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4751 {
4752         struct e1000_adapter *adapter = hw->back;
4753         return pcix_get_mmrbc(adapter->pdev);
4754 }
4755
4756 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4757 {
4758         struct e1000_adapter *adapter = hw->back;
4759         pcix_set_mmrbc(adapter->pdev, mmrbc);
4760 }
4761
4762 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4763 {
4764         outl(value, port);
4765 }
4766
4767 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4768 {
4769         u16 vid;
4770
4771         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4772                 return true;
4773         return false;
4774 }
4775
4776 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4777                               netdev_features_t features)
4778 {
4779         struct e1000_hw *hw = &adapter->hw;
4780         u32 ctrl;
4781
4782         ctrl = er32(CTRL);
4783         if (features & NETIF_F_HW_VLAN_RX) {
4784                 /* enable VLAN tag insert/strip */
4785                 ctrl |= E1000_CTRL_VME;
4786         } else {
4787                 /* disable VLAN tag insert/strip */
4788                 ctrl &= ~E1000_CTRL_VME;
4789         }
4790         ew32(CTRL, ctrl);
4791 }
4792 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4793                                      bool filter_on)
4794 {
4795         struct e1000_hw *hw = &adapter->hw;
4796         u32 rctl;
4797
4798         if (!test_bit(__E1000_DOWN, &adapter->flags))
4799                 e1000_irq_disable(adapter);
4800
4801         __e1000_vlan_mode(adapter, adapter->netdev->features);
4802         if (filter_on) {
4803                 /* enable VLAN receive filtering */
4804                 rctl = er32(RCTL);
4805                 rctl &= ~E1000_RCTL_CFIEN;
4806                 if (!(adapter->netdev->flags & IFF_PROMISC))
4807                         rctl |= E1000_RCTL_VFE;
4808                 ew32(RCTL, rctl);
4809                 e1000_update_mng_vlan(adapter);
4810         } else {
4811                 /* disable VLAN receive filtering */
4812                 rctl = er32(RCTL);
4813                 rctl &= ~E1000_RCTL_VFE;
4814                 ew32(RCTL, rctl);
4815         }
4816
4817         if (!test_bit(__E1000_DOWN, &adapter->flags))
4818                 e1000_irq_enable(adapter);
4819 }
4820
4821 static void e1000_vlan_mode(struct net_device *netdev,
4822                             netdev_features_t features)
4823 {
4824         struct e1000_adapter *adapter = netdev_priv(netdev);
4825
4826         if (!test_bit(__E1000_DOWN, &adapter->flags))
4827                 e1000_irq_disable(adapter);
4828
4829         __e1000_vlan_mode(adapter, features);
4830
4831         if (!test_bit(__E1000_DOWN, &adapter->flags))
4832                 e1000_irq_enable(adapter);
4833 }
4834
4835 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4836 {
4837         struct e1000_adapter *adapter = netdev_priv(netdev);
4838         struct e1000_hw *hw = &adapter->hw;
4839         u32 vfta, index;
4840
4841         if ((hw->mng_cookie.status &
4842              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4843             (vid == adapter->mng_vlan_id))
4844                 return 0;
4845
4846         if (!e1000_vlan_used(adapter))
4847                 e1000_vlan_filter_on_off(adapter, true);
4848
4849         /* add VID to filter table */
4850         index = (vid >> 5) & 0x7F;
4851         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4852         vfta |= (1 << (vid & 0x1F));
4853         e1000_write_vfta(hw, index, vfta);
4854
4855         set_bit(vid, adapter->active_vlans);
4856
4857         return 0;
4858 }
4859
4860 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4861 {
4862         struct e1000_adapter *adapter = netdev_priv(netdev);
4863         struct e1000_hw *hw = &adapter->hw;
4864         u32 vfta, index;
4865
4866         if (!test_bit(__E1000_DOWN, &adapter->flags))
4867                 e1000_irq_disable(adapter);
4868         if (!test_bit(__E1000_DOWN, &adapter->flags))
4869                 e1000_irq_enable(adapter);
4870
4871         /* remove VID from filter table */
4872         index = (vid >> 5) & 0x7F;
4873         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4874         vfta &= ~(1 << (vid & 0x1F));
4875         e1000_write_vfta(hw, index, vfta);
4876
4877         clear_bit(vid, adapter->active_vlans);
4878
4879         if (!e1000_vlan_used(adapter))
4880                 e1000_vlan_filter_on_off(adapter, false);
4881
4882         return 0;
4883 }
4884
4885 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4886 {
4887         u16 vid;
4888
4889         if (!e1000_vlan_used(adapter))
4890                 return;
4891
4892         e1000_vlan_filter_on_off(adapter, true);
4893         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4894                 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4895 }
4896
4897 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4898 {
4899         struct e1000_hw *hw = &adapter->hw;
4900
4901         hw->autoneg = 0;
4902
4903         /* Make sure dplx is at most 1 bit and lsb of speed is not set
4904          * for the switch() below to work */
4905         if ((spd & 1) || (dplx & ~1))
4906                 goto err_inval;
4907
4908         /* Fiber NICs only allow 1000 gbps Full duplex */
4909         if ((hw->media_type == e1000_media_type_fiber) &&
4910             spd != SPEED_1000 &&
4911             dplx != DUPLEX_FULL)
4912                 goto err_inval;
4913
4914         switch (spd + dplx) {
4915         case SPEED_10 + DUPLEX_HALF:
4916                 hw->forced_speed_duplex = e1000_10_half;
4917                 break;
4918         case SPEED_10 + DUPLEX_FULL:
4919                 hw->forced_speed_duplex = e1000_10_full;
4920                 break;
4921         case SPEED_100 + DUPLEX_HALF:
4922                 hw->forced_speed_duplex = e1000_100_half;
4923                 break;
4924         case SPEED_100 + DUPLEX_FULL:
4925                 hw->forced_speed_duplex = e1000_100_full;
4926                 break;
4927         case SPEED_1000 + DUPLEX_FULL:
4928                 hw->autoneg = 1;
4929                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4930                 break;
4931         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4932         default:
4933                 goto err_inval;
4934         }
4935         return 0;
4936
4937 err_inval:
4938         e_err(probe, "Unsupported Speed/Duplex configuration\n");
4939         return -EINVAL;
4940 }
4941
4942 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4943 {
4944         struct net_device *netdev = pci_get_drvdata(pdev);
4945         struct e1000_adapter *adapter = netdev_priv(netdev);
4946         struct e1000_hw *hw = &adapter->hw;
4947         u32 ctrl, ctrl_ext, rctl, status;
4948         u32 wufc = adapter->wol;
4949 #ifdef CONFIG_PM
4950         int retval = 0;
4951 #endif
4952
4953         netif_device_detach(netdev);
4954
4955         if (netif_running(netdev)) {
4956                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4957                 e1000_down(adapter);
4958         }
4959
4960 #ifdef CONFIG_PM
4961         retval = pci_save_state(pdev);
4962         if (retval)
4963                 return retval;
4964 #endif
4965
4966         status = er32(STATUS);
4967         if (status & E1000_STATUS_LU)
4968                 wufc &= ~E1000_WUFC_LNKC;
4969
4970         if (wufc) {
4971                 e1000_setup_rctl(adapter);
4972                 e1000_set_rx_mode(netdev);
4973
4974                 rctl = er32(RCTL);
4975
4976                 /* turn on all-multi mode if wake on multicast is enabled */
4977                 if (wufc & E1000_WUFC_MC)
4978                         rctl |= E1000_RCTL_MPE;
4979
4980                 /* enable receives in the hardware */
4981                 ew32(RCTL, rctl | E1000_RCTL_EN);
4982
4983                 if (hw->mac_type >= e1000_82540) {
4984                         ctrl = er32(CTRL);
4985                         /* advertise wake from D3Cold */
4986                         #define E1000_CTRL_ADVD3WUC 0x00100000
4987                         /* phy power management enable */
4988                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4989                         ctrl |= E1000_CTRL_ADVD3WUC |
4990                                 E1000_CTRL_EN_PHY_PWR_MGMT;
4991                         ew32(CTRL, ctrl);
4992                 }
4993
4994                 if (hw->media_type == e1000_media_type_fiber ||
4995                     hw->media_type == e1000_media_type_internal_serdes) {
4996                         /* keep the laser running in D3 */
4997                         ctrl_ext = er32(CTRL_EXT);
4998                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4999                         ew32(CTRL_EXT, ctrl_ext);
5000                 }
5001
5002                 ew32(WUC, E1000_WUC_PME_EN);
5003                 ew32(WUFC, wufc);
5004         } else {
5005                 ew32(WUC, 0);
5006                 ew32(WUFC, 0);
5007         }
5008
5009         e1000_release_manageability(adapter);
5010
5011         *enable_wake = !!wufc;
5012
5013         /* make sure adapter isn't asleep if manageability is enabled */
5014         if (adapter->en_mng_pt)
5015                 *enable_wake = true;
5016
5017         if (netif_running(netdev))
5018                 e1000_free_irq(adapter);
5019
5020         pci_disable_device(pdev);
5021
5022         return 0;
5023 }
5024
5025 #ifdef CONFIG_PM
5026 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5027 {
5028         int retval;
5029         bool wake;
5030
5031         retval = __e1000_shutdown(pdev, &wake);
5032         if (retval)
5033                 return retval;
5034
5035         if (wake) {
5036                 pci_prepare_to_sleep(pdev);
5037         } else {
5038                 pci_wake_from_d3(pdev, false);
5039                 pci_set_power_state(pdev, PCI_D3hot);
5040         }
5041
5042         return 0;
5043 }
5044
5045 static int e1000_resume(struct pci_dev *pdev)
5046 {
5047         struct net_device *netdev = pci_get_drvdata(pdev);
5048         struct e1000_adapter *adapter = netdev_priv(netdev);
5049         struct e1000_hw *hw = &adapter->hw;
5050         u32 err;
5051
5052         pci_set_power_state(pdev, PCI_D0);
5053         pci_restore_state(pdev);
5054         pci_save_state(pdev);
5055
5056         if (adapter->need_ioport)
5057                 err = pci_enable_device(pdev);
5058         else
5059                 err = pci_enable_device_mem(pdev);
5060         if (err) {
5061                 pr_err("Cannot enable PCI device from suspend\n");
5062                 return err;
5063         }
5064         pci_set_master(pdev);
5065
5066         pci_enable_wake(pdev, PCI_D3hot, 0);
5067         pci_enable_wake(pdev, PCI_D3cold, 0);
5068
5069         if (netif_running(netdev)) {
5070                 err = e1000_request_irq(adapter);
5071                 if (err)
5072                         return err;
5073         }
5074
5075         e1000_power_up_phy(adapter);
5076         e1000_reset(adapter);
5077         ew32(WUS, ~0);
5078
5079         e1000_init_manageability(adapter);
5080
5081         if (netif_running(netdev))
5082                 e1000_up(adapter);
5083
5084         netif_device_attach(netdev);
5085
5086         return 0;
5087 }
5088 #endif
5089
5090 static void e1000_shutdown(struct pci_dev *pdev)
5091 {
5092         bool wake;
5093
5094         __e1000_shutdown(pdev, &wake);
5095
5096         if (system_state == SYSTEM_POWER_OFF) {
5097                 pci_wake_from_d3(pdev, wake);
5098                 pci_set_power_state(pdev, PCI_D3hot);
5099         }
5100 }
5101
5102 #ifdef CONFIG_NET_POLL_CONTROLLER
5103 /*
5104  * Polling 'interrupt' - used by things like netconsole to send skbs
5105  * without having to re-enable interrupts. It's not called while
5106  * the interrupt routine is executing.
5107  */
5108 static void e1000_netpoll(struct net_device *netdev)
5109 {
5110         struct e1000_adapter *adapter = netdev_priv(netdev);
5111
5112         disable_irq(adapter->pdev->irq);
5113         e1000_intr(adapter->pdev->irq, netdev);
5114         enable_irq(adapter->pdev->irq);
5115 }
5116 #endif
5117
5118 /**
5119  * e1000_io_error_detected - called when PCI error is detected
5120  * @pdev: Pointer to PCI device
5121  * @state: The current pci connection state
5122  *
5123  * This function is called after a PCI bus error affecting
5124  * this device has been detected.
5125  */
5126 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5127                                                 pci_channel_state_t state)
5128 {
5129         struct net_device *netdev = pci_get_drvdata(pdev);
5130         struct e1000_adapter *adapter = netdev_priv(netdev);
5131
5132         netif_device_detach(netdev);
5133
5134         if (state == pci_channel_io_perm_failure)
5135                 return PCI_ERS_RESULT_DISCONNECT;
5136
5137         if (netif_running(netdev))
5138                 e1000_down(adapter);
5139         pci_disable_device(pdev);
5140
5141         /* Request a slot slot reset. */
5142         return PCI_ERS_RESULT_NEED_RESET;
5143 }
5144
5145 /**
5146  * e1000_io_slot_reset - called after the pci bus has been reset.
5147  * @pdev: Pointer to PCI device
5148  *
5149  * Restart the card from scratch, as if from a cold-boot. Implementation
5150  * resembles the first-half of the e1000_resume routine.
5151  */
5152 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5153 {
5154         struct net_device *netdev = pci_get_drvdata(pdev);
5155         struct e1000_adapter *adapter = netdev_priv(netdev);
5156         struct e1000_hw *hw = &adapter->hw;
5157         int err;
5158
5159         if (adapter->need_ioport)
5160                 err = pci_enable_device(pdev);
5161         else
5162                 err = pci_enable_device_mem(pdev);
5163         if (err) {
5164                 pr_err("Cannot re-enable PCI device after reset.\n");
5165                 return PCI_ERS_RESULT_DISCONNECT;
5166         }
5167         pci_set_master(pdev);
5168
5169         pci_enable_wake(pdev, PCI_D3hot, 0);
5170         pci_enable_wake(pdev, PCI_D3cold, 0);
5171
5172         e1000_reset(adapter);
5173         ew32(WUS, ~0);
5174
5175         return PCI_ERS_RESULT_RECOVERED;
5176 }
5177
5178 /**
5179  * e1000_io_resume - called when traffic can start flowing again.
5180  * @pdev: Pointer to PCI device
5181  *
5182  * This callback is called when the error recovery driver tells us that
5183  * its OK to resume normal operation. Implementation resembles the
5184  * second-half of the e1000_resume routine.
5185  */
5186 static void e1000_io_resume(struct pci_dev *pdev)
5187 {
5188         struct net_device *netdev = pci_get_drvdata(pdev);
5189         struct e1000_adapter *adapter = netdev_priv(netdev);
5190
5191         e1000_init_manageability(adapter);
5192
5193         if (netif_running(netdev)) {
5194                 if (e1000_up(adapter)) {
5195                         pr_info("can't bring device back up after reset\n");
5196                         return;
5197                 }
5198         }
5199
5200         netif_device_attach(netdev);
5201 }
5202
5203 /* e1000_main.c */