- Update to 3.4-rc7.
[linux-flexiantxendom0-3.2.10.git] / drivers / net / ethernet / ibm / ehea / ehea_main.c
index a6c4192..e44d164 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/drivers/net/ehea/ehea_main.c
+ *  linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
  *
  *  eHEA ethernet device driver for IBM eServer System p
  *
@@ -62,9 +62,6 @@ static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
 static int use_mcs = 1;
-static int use_lro;
-static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
-static int num_tx_qps = EHEA_NUM_TX_QP;
 static int prop_carrier_state;
 
 module_param(msg_level, int, 0);
@@ -74,11 +71,7 @@ module_param(rq3_entries, int, 0);
 module_param(sq_entries, int, 0);
 module_param(prop_carrier_state, int, 0);
 module_param(use_mcs, int, 0);
-module_param(use_lro, int, 0);
-module_param(lro_max_aggr, int, 0);
-module_param(num_tx_qps, int, 0);
 
-MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
 MODULE_PARM_DESC(msg_level, "msg_level");
 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
                 "port to stack. 1:yes, 0:no.  Default = 0 ");
@@ -97,17 +90,12 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
                 "Default = 1");
 
-MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
-                __MODULE_STRING(EHEA_LRO_MAX_AGGR));
-MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
-                "Default = 0");
-
 static int port_name_cnt;
 static LIST_HEAD(adapter_list);
 static unsigned long ehea_driver_flags;
 static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
 
 
 static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -115,6 +103,19 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
 
 static int __devexit ehea_remove(struct platform_device *dev);
 
+static struct of_device_id ehea_module_device_table[] = {
+       {
+               .name = "lhea",
+               .compatible = "IBM,lhea",
+       },
+       {
+               .type = "network",
+               .compatible = "IBM,lhea-ethernet",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
+
 static struct of_device_id ehea_device_table[] = {
        {
                .name = "lhea",
@@ -122,7 +123,6 @@ static struct of_device_id ehea_device_table[] = {
        },
        {},
 };
-MODULE_DEVICE_TABLE(of, ehea_device_table);
 
 static struct of_platform_driver ehea_driver = {
        .driver = {
@@ -145,7 +145,7 @@ void ehea_dump(void *adr, int len, char *msg)
        }
 }
 
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
 {
        if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
                schedule_work(&port->reset_task);
@@ -174,7 +174,7 @@ static void ehea_update_firmware_handles(void)
                                continue;
 
                        num_ports++;
-                       num_portres += port->num_def_qps + port->num_add_tx_qps;
+                       num_portres += port->num_def_qps;
                }
        }
 
@@ -200,9 +200,7 @@ static void ehea_update_firmware_handles(void)
                            (num_ports == 0))
                                continue;
 
-                       for (l = 0;
-                            l < port->num_def_qps + port->num_add_tx_qps;
-                            l++) {
+                       for (l = 0; l < port->num_def_qps; l++) {
                                struct ehea_port_res *pr = &port->port_res[l];
 
                                arr[i].adh = adapter->handle;
@@ -304,16 +302,18 @@ static void ehea_update_bcmc_registrations(void)
 
                                arr[i].adh = adapter->handle;
                                arr[i].port_id = port->logical_port_id;
-                               arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
-                                                 EHEA_BCMC_MULTICAST |
+                               arr[i].reg_type = EHEA_BCMC_MULTICAST |
                                                  EHEA_BCMC_UNTAGGED;
+                               if (mc_entry->macaddr == 0)
+                                       arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
                                arr[i++].macaddr = mc_entry->macaddr;
 
                                arr[i].adh = adapter->handle;
                                arr[i].port_id = port->logical_port_id;
-                               arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
-                                                 EHEA_BCMC_MULTICAST |
+                               arr[i].reg_type = EHEA_BCMC_MULTICAST |
                                                  EHEA_BCMC_VLANID_ALL;
+                               if (mc_entry->macaddr == 0)
+                                       arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
                                arr[i++].macaddr = mc_entry->macaddr;
                                num_registrations -= 2;
                        }
@@ -328,10 +328,10 @@ out:
        spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
 }
 
-static struct net_device_stats *ehea_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
+                                       struct rtnl_link_stats64 *stats)
 {
        struct ehea_port *port = netdev_priv(dev);
-       struct net_device_stats *stats = &port->stats;
        u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
        int i;
 
@@ -340,7 +340,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
                rx_bytes   += port->port_res[i].rx_bytes;
        }
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                tx_packets += port->port_res[i].tx_packets;
                tx_bytes   += port->port_res[i].tx_bytes;
        }
@@ -350,7 +350,9 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
        stats->tx_bytes = tx_bytes;
        stats->rx_packets = rx_packets;
 
-       return &port->stats;
+       stats->multicast = port->stats.multicast;
+       stats->rx_errors = port->stats.rx_errors;
+       return stats;
 }
 
 static void ehea_update_stats(struct work_struct *work)
@@ -358,7 +360,7 @@ static void ehea_update_stats(struct work_struct *work)
        struct ehea_port *port =
                container_of(work, struct ehea_port, stats_work.work);
        struct net_device *dev = port->netdev;
-       struct net_device_stats *stats = &port->stats;
+       struct rtnl_link_stats64 *stats = &port->stats;
        struct hcp_ehea_port_cb2 *cb2;
        u64 hret;
 
@@ -385,7 +387,8 @@ static void ehea_update_stats(struct work_struct *work)
 out_herr:
        free_page((unsigned long)cb2);
 resched:
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 }
 
 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -661,49 +664,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
        return 0;
 }
 
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
-                      void **tcph, u64 *hdr_flags, void *priv)
-{
-       struct ehea_cqe *cqe = priv;
-       unsigned int ip_len;
-       struct iphdr *iph;
-
-       /* non tcp/udp packets */
-       if (!cqe->header_length)
-               return -1;
-
-       /* non tcp packet */
-       skb_reset_network_header(skb);
-       iph = ip_hdr(skb);
-       if (iph->protocol != IPPROTO_TCP)
-               return -1;
-
-       ip_len = ip_hdrlen(skb);
-       skb_set_transport_header(skb, ip_len);
-       *tcph = tcp_hdr(skb);
-
-       /* check if ip header and tcp header are complete */
-       if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
-               return -1;
-
-       *hdr_flags = LRO_IPV4 | LRO_TCP;
-       *iphdr = iph;
-
-       return 0;
-}
-
-static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
-                         struct sk_buff *skb)
-{
-       if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
-               __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
-
-       if (skb->dev->features & NETIF_F_LRO)
-               lro_receive_skb(&pr->lro_mgr, skb, cqe);
-       else
-               netif_receive_skb(skb);
-}
-
 static int ehea_proc_rwqes(struct net_device *dev,
                           struct ehea_port_res *pr,
                           int budget)
@@ -780,7 +740,11 @@ static int ehea_proc_rwqes(struct net_device *dev,
                        }
 
                        processed_bytes += skb->len;
-                       ehea_proc_skb(pr, cqe, skb);
+
+                       if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+                               __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
+
+                       napi_gro_receive(&pr->napi, skb);
                } else {
                        pr->p_stats.poll_receive_errors++;
                        port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -791,8 +755,6 @@ static int ehea_proc_rwqes(struct net_device *dev,
                }
                cqe = ehea_poll_rq1(qp, &wqe_index);
        }
-       if (dev->features & NETIF_F_LRO)
-               lro_flush_all(&pr->lro_mgr);
 
        pr->rx_packets += processed;
        pr->rx_bytes += processed_bytes;
@@ -810,7 +772,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
 {
        int i;
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
                pr->sq_restart_flag = 0;
        }
@@ -823,7 +785,7 @@ static void check_sqs(struct ehea_port *port)
        int swqe_index;
        int i, k;
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
                int ret;
                k = 0;
@@ -926,7 +888,6 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
        return cqe;
 }
 
-#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
 #define EHEA_POLL_MAX_CQES 65535
 
 static int ehea_poll(struct napi_struct *napi, int budget)
@@ -936,18 +897,13 @@ static int ehea_poll(struct napi_struct *napi, int budget)
        struct net_device *dev = pr->port->netdev;
        struct ehea_cqe *cqe;
        struct ehea_cqe *cqe_skb = NULL;
-       int force_irq, wqe_index;
+       int wqe_index;
        int rx = 0;
 
-       force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
        cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
+       rx += ehea_proc_rwqes(dev, pr, budget - rx);
 
-       if (!force_irq)
-               rx += ehea_proc_rwqes(dev, pr, budget - rx);
-
-       while ((rx != budget) || force_irq) {
-               pr->poll_counter = 0;
-               force_irq = 0;
+       while (rx != budget) {
                napi_complete(napi);
                ehea_reset_cq_ep(pr->recv_cq);
                ehea_reset_cq_ep(pr->send_cq);
@@ -967,7 +923,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
                rx += ehea_proc_rwqes(dev, pr, budget - rx);
        }
 
-       pr->poll_counter++;
        return rx;
 }
 
@@ -1119,13 +1074,6 @@ int ehea_sense_port_attr(struct ehea_port *port)
                goto out_free;
        }
 
-       port->num_tx_qps = num_tx_qps;
-
-       if (port->num_def_qps >= port->num_tx_qps)
-               port->num_add_tx_qps = 0;
-       else
-               port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
-
        ret = 0;
 out_free:
        if (ret || netif_msg_probe(port))
@@ -1366,7 +1314,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
                   port->qp_eq->attr.ist1);
 
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                pr = &port->port_res[i];
                snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
                         "%s-queue%d", dev->name, i);
@@ -1409,7 +1357,7 @@ static void ehea_free_interrupts(struct net_device *dev)
 
        /* send */
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                pr = &port->port_res[i];
                ibmebus_free_irq(pr->eq->attr.ist1, pr);
                netif_info(port, intr, dev,
@@ -1472,7 +1420,7 @@ out:
        return ret;
 }
 
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
 {
        int ret;
        struct ehea_adapter *adapter = pr->port->adapter;
@@ -1494,7 +1442,7 @@ out:
        return -EIO;
 }
 
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
 {
        if ((ehea_rem_mr(&pr->send_mr)) ||
            (ehea_rem_mr(&pr->recv_mr)))
@@ -1630,15 +1578,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
 
        netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
 
-       pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
-       pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
-       pr->lro_mgr.lro_arr = pr->lro_desc;
-       pr->lro_mgr.get_skb_header = get_skb_hdr;
-       pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
-       pr->lro_mgr.dev = port->netdev;
-       pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
-       pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
        ret = 0;
        goto out;
 
@@ -1695,96 +1634,35 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
        return ret;
 }
 
-/*
- * The write_* functions store information in swqe which is used by
- * the hardware to calculate the ip/tcp/udp checksum
- */
-
-static inline void write_ip_start_end(struct ehea_swqe *swqe,
-                                     const struct sk_buff *skb)
-{
-       swqe->ip_start = skb_network_offset(skb);
-       swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
-}
-
-static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
-                                       const struct sk_buff *skb)
-{
-       swqe->tcp_offset =
-               (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
-
-       swqe->tcp_end = (u16)skb->len - 1;
-}
-
-static inline void write_udp_offset_end(struct ehea_swqe *swqe,
-                                       const struct sk_buff *skb)
-{
-       swqe->tcp_offset =
-               (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
-
-       swqe->tcp_end = (u16)skb->len - 1;
-}
-
-
-static void write_swqe2_TSO(struct sk_buff *skb,
-                           struct ehea_swqe *swqe, u32 lkey)
-{
-       struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
-       u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
-       int skb_data_size = skb_headlen(skb);
-       int headersize;
-
-       /* Packet is TCP with TSO enabled */
-       swqe->tx_control |= EHEA_SWQE_TSO;
-       swqe->mss = skb_shinfo(skb)->gso_size;
-       /* copy only eth/ip/tcp headers to immediate data and
-        * the rest of skb->data to sg1entry
-        */
-       headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
-
-       skb_data_size = skb_headlen(skb);
-
-       if (skb_data_size >= headersize) {
-               /* copy immediate data */
-               skb_copy_from_linear_data(skb, imm_data, headersize);
-               swqe->immediate_data_length = headersize;
-
-               if (skb_data_size > headersize) {
-                       /* set sg1entry data */
-                       sg1entry->l_key = lkey;
-                       sg1entry->len = skb_data_size - headersize;
-                       sg1entry->vaddr =
-                               ehea_map_vaddr(skb->data + headersize);
-                       swqe->descriptors++;
-               }
-       } else
-               pr_err("cannot handle fragmented headers\n");
-}
-
-static void write_swqe2_nonTSO(struct sk_buff *skb,
-                              struct ehea_swqe *swqe, u32 lkey)
+static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
+                                 u32 lkey)
 {
        int skb_data_size = skb_headlen(skb);
        u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
        struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+       unsigned int immediate_len = SWQE2_MAX_IMM;
 
-       /* Packet is any nonTSO type
-        *
-        * Copy as much as possible skb->data to immediate data and
-        * the rest to sg1entry
-        */
-       if (skb_data_size >= SWQE2_MAX_IMM) {
-               /* copy immediate data */
-               skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
+       swqe->descriptors = 0;
 
-               swqe->immediate_data_length = SWQE2_MAX_IMM;
+       if (skb_is_gso(skb)) {
+               swqe->tx_control |= EHEA_SWQE_TSO;
+               swqe->mss = skb_shinfo(skb)->gso_size;
+               /*
+                * For TSO packets we only copy the headers into the
+                * immediate area.
+                */
+               immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+       }
 
-               if (skb_data_size > SWQE2_MAX_IMM) {
-                       /* copy sg1entry data */
+       if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
+               skb_copy_from_linear_data(skb, imm_data, immediate_len);
+               swqe->immediate_data_length = immediate_len;
+
+               if (skb_data_size > immediate_len) {
                        sg1entry->l_key = lkey;
-                       sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
+                       sg1entry->len = skb_data_size - immediate_len;
                        sg1entry->vaddr =
-                               ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
+                               ehea_map_vaddr(skb->data + immediate_len);
                        swqe->descriptors++;
                }
        } else {
@@ -1803,13 +1681,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
        nfrags = skb_shinfo(skb)->nr_frags;
        sg1entry = &swqe->u.immdata_desc.sg_entry;
        sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
-       swqe->descriptors = 0;
        sg1entry_contains_frag_data = 0;
 
-       if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
-               write_swqe2_TSO(skb, swqe, lkey);
-       else
-               write_swqe2_nonTSO(skb, swqe, lkey);
+       write_swqe2_immediate(skb, swqe, lkey);
 
        /* write descriptors */
        if (nfrags > 0) {
@@ -1819,7 +1693,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
 
                        /* copy sg1entry data */
                        sg1entry->l_key = lkey;
-                       sg1entry->len = frag->size;
+                       sg1entry->len = skb_frag_size(frag);
                        sg1entry->vaddr =
                                ehea_map_vaddr(skb_frag_address(frag));
                        swqe->descriptors++;
@@ -1832,7 +1706,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
                        sgentry = &sg_list[i - sg1entry_contains_frag_data];
 
                        sgentry->l_key = lkey;
-                       sgentry->len = frag->size;
+                       sgentry->len = skb_frag_size(frag);
                        sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
                        swqe->descriptors++;
                }
@@ -1978,8 +1852,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
        u64 hret;
        u8 reg_type;
 
-       reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
-                | EHEA_BCMC_UNTAGGED;
+       reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
+       if (mc_mac_addr == 0)
+               reg_type |= EHEA_BCMC_SCOPE_ALL;
 
        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
                                     port->logical_port_id,
@@ -1987,8 +1862,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
        if (hret)
                goto out;
 
-       reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
-                | EHEA_BCMC_VLANID_ALL;
+       reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
+       if (mc_mac_addr == 0)
+               reg_type |= EHEA_BCMC_SCOPE_ALL;
 
        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
                                     port->logical_port_id,
@@ -2038,7 +1914,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
                                netdev_err(dev,
                                           "failed enabling IFF_ALLMULTI\n");
                }
-       } else
+       } else {
                if (!enable) {
                        /* Disable ALLMULTI */
                        hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
@@ -2048,6 +1924,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
                                netdev_err(dev,
                                           "failed disabling IFF_ALLMULTI\n");
                }
+       }
 }
 
 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
@@ -2081,11 +1958,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
        struct netdev_hw_addr *ha;
        int ret;
 
-       if (port->promisc) {
-               ehea_promiscuous(dev, 1);
-               return;
-       }
-       ehea_promiscuous(dev, 0);
+       ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
 
        if (dev->flags & IFF_ALLMULTI) {
                ehea_allmulti(dev, 1);
@@ -2124,41 +1997,44 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
-                      struct ehea_swqe *swqe, u32 lkey)
+static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
 {
-       if (skb->protocol == htons(ETH_P_IP)) {
-               const struct iphdr *iph = ip_hdr(skb);
+       swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
 
-               /* IPv4 */
-               swqe->tx_control |= EHEA_SWQE_CRC
-                                | EHEA_SWQE_IP_CHECKSUM
-                                | EHEA_SWQE_TCP_CHECKSUM
-                                | EHEA_SWQE_IMM_DATA_PRESENT
-                                | EHEA_SWQE_DESCRIPTORS_PRESENT;
+       if (skb->protocol != htons(ETH_P_IP))
+               return;
 
-               write_ip_start_end(swqe, skb);
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
 
-               if (iph->protocol == IPPROTO_UDP) {
-                       if ((iph->frag_off & IP_MF) ||
-                           (iph->frag_off & IP_OFFSET))
-                               /* IP fragment, so don't change cs */
-                               swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
-                       else
-                               write_udp_offset_end(swqe, skb);
-               } else if (iph->protocol == IPPROTO_TCP) {
-                       write_tcp_offset_end(swqe, skb);
-               }
+       swqe->ip_start = skb_network_offset(skb);
+       swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
 
-               /* icmp (big data) and ip segmentation packets (all other ip
-                  packets) do not require any special handling */
+       switch (ip_hdr(skb)->protocol) {
+       case IPPROTO_UDP:
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
 
-       } else {
-               /* Other Ethernet Protocol */
-               swqe->tx_control |= EHEA_SWQE_CRC
-                                | EHEA_SWQE_IMM_DATA_PRESENT
-                                | EHEA_SWQE_DESCRIPTORS_PRESENT;
+               swqe->tcp_offset = swqe->ip_end + 1 +
+                                  offsetof(struct udphdr, check);
+               break;
+
+       case IPPROTO_TCP:
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+               swqe->tcp_offset = swqe->ip_end + 1 +
+                                  offsetof(struct tcphdr, check);
+               break;
        }
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+                      struct ehea_swqe *swqe, u32 lkey)
+{
+       swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+       xmit_common(skb, swqe);
 
        write_swqe2_data(skb, dev, swqe, lkey);
 }
@@ -2166,67 +2042,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
                       struct ehea_swqe *swqe)
 {
-       int nfrags = skb_shinfo(skb)->nr_frags;
        u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
-       skb_frag_t *frag;
-       int i;
-
-       if (skb->protocol == htons(ETH_P_IP)) {
-               const struct iphdr *iph = ip_hdr(skb);
-
-               /* IPv4 */
-               write_ip_start_end(swqe, skb);
-
-               if (iph->protocol == IPPROTO_TCP) {
-                       swqe->tx_control |= EHEA_SWQE_CRC
-                                        | EHEA_SWQE_IP_CHECKSUM
-                                        | EHEA_SWQE_TCP_CHECKSUM
-                                        | EHEA_SWQE_IMM_DATA_PRESENT;
-
-                       write_tcp_offset_end(swqe, skb);
 
-               } else if (iph->protocol == IPPROTO_UDP) {
-                       if ((iph->frag_off & IP_MF) ||
-                           (iph->frag_off & IP_OFFSET))
-                               /* IP fragment, so don't change cs */
-                               swqe->tx_control |= EHEA_SWQE_CRC
-                                                | EHEA_SWQE_IMM_DATA_PRESENT;
-                       else {
-                               swqe->tx_control |= EHEA_SWQE_CRC
-                                                | EHEA_SWQE_IP_CHECKSUM
-                                                | EHEA_SWQE_TCP_CHECKSUM
-                                                | EHEA_SWQE_IMM_DATA_PRESENT;
+       xmit_common(skb, swqe);
 
-                               write_udp_offset_end(swqe, skb);
-                       }
-               } else {
-                       /* icmp (big data) and
-                          ip segmentation packets (all other ip packets) */
-                       swqe->tx_control |= EHEA_SWQE_CRC
-                                        | EHEA_SWQE_IP_CHECKSUM
-                                        | EHEA_SWQE_IMM_DATA_PRESENT;
-               }
-       } else {
-               /* Other Ethernet Protocol */
-               swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
-       }
-       /* copy (immediate) data */
-       if (nfrags == 0) {
-               /* data is in a single piece */
+       if (!skb->data_len)
                skb_copy_from_linear_data(skb, imm_data, skb->len);
-       } else {
-               /* first copy data from the skb->data buffer ... */
-               skb_copy_from_linear_data(skb, imm_data,
-                                         skb_headlen(skb));
-               imm_data += skb_headlen(skb);
+       else
+               skb_copy_bits(skb, 0, imm_data, skb->len);
 
-               /* ... then copy data from the fragments */
-               for (i = 0; i < nfrags; i++) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       memcpy(imm_data, skb_frag_address(frag), frag->size);
-                       imm_data += frag->size;
-               }
-       }
        swqe->immediate_data_length = skb->len;
        dev_kfree_skb(skb);
 }
@@ -2305,17 +2129,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2323,6 +2149,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2331,24 +2158,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
-       return;
+       return err;
 }
 
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2356,6 +2187,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2364,13 +2196,16 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
+       return err;
 }
 
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
 {
        int ret = -EIO;
        u64 hret;
@@ -2445,8 +2280,7 @@ out:
        return ret;
 }
 
-static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
-                              int add_tx_qps)
+static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
 {
        int ret, i;
        struct port_res_cfg pr_cfg, pr_cfg_small_rx;
@@ -2479,7 +2313,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
                if (ret)
                        goto out_clean_pr;
        }
-       for (i = def_qps; i < def_qps + add_tx_qps; i++) {
+       for (i = def_qps; i < def_qps; i++) {
                ret = ehea_init_port_res(port, &port->port_res[i],
                                         &pr_cfg_small_rx, i);
                if (ret)
@@ -2502,7 +2336,7 @@ static int ehea_clean_all_portres(struct ehea_port *port)
        int ret = 0;
        int i;
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+       for (i = 0; i < port->num_def_qps; i++)
                ret |= ehea_clean_portres(port, &port->port_res[i]);
 
        ret |= ehea_destroy_eq(port->qp_eq);
@@ -2534,8 +2368,7 @@ static int ehea_up(struct net_device *dev)
        if (port->state == EHEA_PORT_UP)
                return 0;
 
-       ret = ehea_port_res_setup(port, port->num_def_qps,
-                                 port->num_add_tx_qps);
+       ret = ehea_port_res_setup(port, port->num_def_qps);
        if (ret) {
                netdev_err(dev, "port_res_failed\n");
                goto out;
@@ -2554,7 +2387,7 @@ static int ehea_up(struct net_device *dev)
                goto out_clean_pr;
        }
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
                if (ret) {
                        netdev_err(dev, "activate_qp failed\n");
@@ -2600,7 +2433,7 @@ static void port_napi_disable(struct ehea_port *port)
 {
        int i;
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+       for (i = 0; i < port->num_def_qps; i++)
                napi_disable(&port->port_res[i].napi);
 }
 
@@ -2608,7 +2441,7 @@ static void port_napi_enable(struct ehea_port *port)
 {
        int i;
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+       for (i = 0; i < port->num_def_qps; i++)
                napi_enable(&port->port_res[i].napi);
 }
 
@@ -2628,7 +2461,8 @@ static int ehea_open(struct net_device *dev)
        }
 
        mutex_unlock(&port->port_lock);
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 
        return ret;
 }
@@ -2642,6 +2476,7 @@ static int ehea_down(struct net_device *dev)
                return 0;
 
        ehea_drop_multicast_list(dev);
+       ehea_allmulti(dev, 0);
        ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
 
        ehea_free_interrupts(dev);
@@ -2696,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port)
 {
        int i;
 
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+       for (i = 0; i < port->num_def_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
                int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
                int ret;
@@ -2712,7 +2547,7 @@ static void ehea_flush_sq(struct ehea_port *port)
        }
 }
 
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
@@ -2730,7 +2565,7 @@ int ehea_stop_qps(struct net_device *dev)
                goto out;
        }
 
-       for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+       for (i = 0; i < (port->num_def_qps); i++) {
                struct ehea_port_res *pr =  &port->port_res[i];
                struct ehea_qp *qp = pr->qp;
 
@@ -2781,7 +2616,7 @@ out:
        return ret;
 }
 
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
 {
        struct ehea_qp qp = *orig_qp;
        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2814,7 +2649,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
        }
 }
 
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
@@ -2832,7 +2667,7 @@ int ehea_restart_qps(struct net_device *dev)
                goto out;
        }
 
-       for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+       for (i = 0; i < (port->num_def_qps); i++) {
                struct ehea_port_res *pr =  &port->port_res[i];
                struct ehea_qp *qp = pr->qp;
 
@@ -3005,7 +2840,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
                ehea_schedule_port_reset(port);
 }
 
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
 {
        struct hcp_query_ehea *cb;
        u64 hret;
@@ -3033,7 +2868,7 @@ out:
        return ret;
 }
 
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
 {
        struct hcp_ehea_port_cb4 *cb4;
        u64 hret;
@@ -3137,7 +2972,7 @@ static const struct net_device_ops ehea_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ehea_netpoll,
 #endif
-       .ndo_get_stats          = ehea_get_stats,
+       .ndo_get_stats64        = ehea_get_stats64,
        .ndo_set_mac_address    = ehea_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = ehea_set_multicast_list,
@@ -3147,7 +2982,7 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_tx_timeout         = ehea_tx_watchdog,
 };
 
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                                         u32 logical_port_id,
                                         struct device_node *dn)
 {
@@ -3161,7 +2996,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
        dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
 
        if (!dev) {
-               pr_err("no mem for net_device\n");
                ret = -ENOMEM;
                goto out_err;
        }
@@ -3191,8 +3025,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                goto out_free_mc_list;
 
        netif_set_real_num_rx_queues(dev, port->num_def_qps);
-       netif_set_real_num_tx_queues(dev, port->num_def_qps +
-                                    port->num_add_tx_qps);
+       netif_set_real_num_tx_queues(dev, port->num_def_qps);
 
        port_dev = ehea_register_port(port, dn);
        if (!port_dev)
@@ -3206,17 +3039,16 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
        dev->netdev_ops = &ehea_netdev_ops;
        ehea_set_ethtool_ops(dev);
 
-       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+       dev->hw_features = NETIF_F_SG | NETIF_F_TSO
                      | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
        dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
                      | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
                      | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
                      | NETIF_F_RXCSUM;
+       dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
+                       NETIF_F_IP_CSUM;
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
-       if (use_lro)
-               dev->features |= NETIF_F_LRO;
-
        INIT_WORK(&port->reset_task, ehea_reset_port);
        INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
 
@@ -3230,8 +3062,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                goto out_unreg_port;
        }
 
-       port->lro_max_aggr = lro_max_aggr;
-
        ret = ehea_get_jumboframe_status(port, &jumbo);
        if (ret)
                netdev_err(dev, "failed determining jumbo frame status\n");
@@ -3422,7 +3252,7 @@ static ssize_t ehea_remove_port(struct device *dev,
 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
 
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
 {
        int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
        if (ret)
@@ -3433,7 +3263,7 @@ out:
        return ret;
 }
 
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
 {
        device_remove_file(&dev->dev, &dev_attr_probe_port);
        device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3445,6 +3275,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
        struct ehea_adapter *adapter;
        const u64 *adapter_handle;
        int ret;
+       int i;
 
        if (!dev || !dev->dev.of_node) {
                pr_err("Invalid ibmebus device probed\n");
@@ -3498,17 +3329,9 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
        tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
                     (unsigned long)adapter);
 
-       ret = ibmebus_request_irq(adapter->neq->attr.ist1,
-                                 ehea_interrupt_neq, IRQF_DISABLED,
-                                 "ehea_neq", adapter);
-       if (ret) {
-               dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
-               goto out_kill_eq;
-       }
-
        ret = ehea_create_device_sysfs(dev);
        if (ret)
-               goto out_free_irq;
+               goto out_kill_eq;
 
        ret = ehea_setup_ports(adapter);
        if (ret) {
@@ -3516,15 +3339,30 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
                goto out_rem_dev_sysfs;
        }
 
+       ret = ibmebus_request_irq(adapter->neq->attr.ist1,
+                                 ehea_interrupt_neq, IRQF_DISABLED,
+                                 "ehea_neq", adapter);
+       if (ret) {
+               dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
+               goto out_shutdown_ports;
+       }
+
+       /* Handle any events that might be pending. */
+       tasklet_hi_schedule(&adapter->neq_tasklet);
+
        ret = 0;
        goto out;
 
+out_shutdown_ports:
+       for (i = 0; i < EHEA_MAX_PORTS; i++)
+               if (adapter->port[i]) {
+                       ehea_shutdown_single_port(adapter->port[i]);
+                       adapter->port[i] = NULL;
+               }
+
 out_rem_dev_sysfs:
        ehea_remove_device_sysfs(dev);
 
-out_free_irq:
-       ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
-
 out_kill_eq:
        ehea_destroy_eq(adapter->neq);
 
@@ -3564,7 +3402,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
        return 0;
 }
 
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
 {
        int i;
 
@@ -3676,7 +3514,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
                   ehea_show_capabilities, NULL);
 
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
 {
        int ret;