2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/notifier.h>
40 #include <linux/reboot.h>
41 #include <linux/memory.h>
42 #include <asm/kexec.h>
43 #include <linux/mutex.h>
44 #include <linux/prefetch.h>
50 #include "ehea_phyp.h"
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55 MODULE_DESCRIPTION("IBM eServer HEA Driver");
56 MODULE_VERSION(DRV_VERSION);
59 static int msg_level = -1;
60 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64 static int use_mcs = 1;
66 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
67 static int num_tx_qps = EHEA_NUM_TX_QP;
68 static int prop_carrier_state;
70 module_param(msg_level, int, 0);
71 module_param(rq1_entries, int, 0);
72 module_param(rq2_entries, int, 0);
73 module_param(rq3_entries, int, 0);
74 module_param(sq_entries, int, 0);
75 module_param(prop_carrier_state, int, 0);
76 module_param(use_mcs, int, 0);
77 module_param(use_lro, int, 0);
78 module_param(lro_max_aggr, int, 0);
79 module_param(num_tx_qps, int, 0);
81 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
82 MODULE_PARM_DESC(msg_level, "msg_level");
83 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
84 "port to stack. 1:yes, 0:no. Default = 0 ");
85 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
88 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
91 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
94 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
95 "[2^x - 1], x = [6..14]. Default = "
96 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
97 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
100 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
101 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
102 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
105 static int port_name_cnt;
106 static LIST_HEAD(adapter_list);
107 static unsigned long ehea_driver_flags;
108 static DEFINE_MUTEX(dlpar_mem_lock);
109 struct ehea_fw_handle_array ehea_fw_handles;
110 struct ehea_bcmc_reg_array ehea_bcmc_regs;
113 static int __devinit ehea_probe_adapter(struct platform_device *dev,
114 const struct of_device_id *id);
116 static int __devexit ehea_remove(struct platform_device *dev);
118 static struct of_device_id ehea_device_table[] = {
121 .compatible = "IBM,lhea",
125 MODULE_DEVICE_TABLE(of, ehea_device_table);
127 static struct of_platform_driver ehea_driver = {
130 .owner = THIS_MODULE,
131 .of_match_table = ehea_device_table,
133 .probe = ehea_probe_adapter,
134 .remove = ehea_remove,
137 void ehea_dump(void *adr, int len, char *msg)
140 unsigned char *deb = adr;
141 for (x = 0; x < len; x += 16) {
142 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
143 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
148 void ehea_schedule_port_reset(struct ehea_port *port)
150 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
151 schedule_work(&port->reset_task);
154 static void ehea_update_firmware_handles(void)
156 struct ehea_fw_handle_entry *arr = NULL;
157 struct ehea_adapter *adapter;
158 int num_adapters = 0;
162 int num_fw_handles, k, l;
164 /* Determine number of handles */
165 mutex_lock(&ehea_fw_handles.lock);
167 list_for_each_entry(adapter, &adapter_list, list) {
170 for (k = 0; k < EHEA_MAX_PORTS; k++) {
171 struct ehea_port *port = adapter->port[k];
173 if (!port || (port->state != EHEA_PORT_UP))
177 num_portres += port->num_def_qps + port->num_add_tx_qps;
181 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
182 num_ports * EHEA_NUM_PORT_FW_HANDLES +
183 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
185 if (num_fw_handles) {
186 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
188 goto out; /* Keep the existing array */
192 list_for_each_entry(adapter, &adapter_list, list) {
193 if (num_adapters == 0)
196 for (k = 0; k < EHEA_MAX_PORTS; k++) {
197 struct ehea_port *port = adapter->port[k];
199 if (!port || (port->state != EHEA_PORT_UP) ||
204 l < port->num_def_qps + port->num_add_tx_qps;
206 struct ehea_port_res *pr = &port->port_res[l];
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = pr->qp->fw_handle;
210 arr[i].adh = adapter->handle;
211 arr[i++].fwh = pr->send_cq->fw_handle;
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = pr->recv_cq->fw_handle;
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = pr->eq->fw_handle;
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = pr->send_mr.handle;
218 arr[i].adh = adapter->handle;
219 arr[i++].fwh = pr->recv_mr.handle;
221 arr[i].adh = adapter->handle;
222 arr[i++].fwh = port->qp_eq->fw_handle;
226 arr[i].adh = adapter->handle;
227 arr[i++].fwh = adapter->neq->fw_handle;
229 if (adapter->mr.handle) {
230 arr[i].adh = adapter->handle;
231 arr[i++].fwh = adapter->mr.handle;
237 kfree(ehea_fw_handles.arr);
238 ehea_fw_handles.arr = arr;
239 ehea_fw_handles.num_entries = i;
241 mutex_unlock(&ehea_fw_handles.lock);
244 static void ehea_update_bcmc_registrations(void)
247 struct ehea_bcmc_reg_entry *arr = NULL;
248 struct ehea_adapter *adapter;
249 struct ehea_mc_list *mc_entry;
250 int num_registrations = 0;
254 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
256 /* Determine number of registrations */
257 list_for_each_entry(adapter, &adapter_list, list)
258 for (k = 0; k < EHEA_MAX_PORTS; k++) {
259 struct ehea_port *port = adapter->port[k];
261 if (!port || (port->state != EHEA_PORT_UP))
264 num_registrations += 2; /* Broadcast registrations */
266 list_for_each_entry(mc_entry, &port->mc_list->list,list)
267 num_registrations += 2;
270 if (num_registrations) {
271 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
273 goto out; /* Keep the existing array */
277 list_for_each_entry(adapter, &adapter_list, list) {
278 for (k = 0; k < EHEA_MAX_PORTS; k++) {
279 struct ehea_port *port = adapter->port[k];
281 if (!port || (port->state != EHEA_PORT_UP))
284 if (num_registrations == 0)
287 arr[i].adh = adapter->handle;
288 arr[i].port_id = port->logical_port_id;
289 arr[i].reg_type = EHEA_BCMC_BROADCAST |
291 arr[i++].macaddr = port->mac_addr;
293 arr[i].adh = adapter->handle;
294 arr[i].port_id = port->logical_port_id;
295 arr[i].reg_type = EHEA_BCMC_BROADCAST |
296 EHEA_BCMC_VLANID_ALL;
297 arr[i++].macaddr = port->mac_addr;
298 num_registrations -= 2;
300 list_for_each_entry(mc_entry,
301 &port->mc_list->list, list) {
302 if (num_registrations == 0)
305 arr[i].adh = adapter->handle;
306 arr[i].port_id = port->logical_port_id;
307 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
308 EHEA_BCMC_MULTICAST |
310 arr[i++].macaddr = mc_entry->macaddr;
312 arr[i].adh = adapter->handle;
313 arr[i].port_id = port->logical_port_id;
314 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
315 EHEA_BCMC_MULTICAST |
316 EHEA_BCMC_VLANID_ALL;
317 arr[i++].macaddr = mc_entry->macaddr;
318 num_registrations -= 2;
324 kfree(ehea_bcmc_regs.arr);
325 ehea_bcmc_regs.arr = arr;
326 ehea_bcmc_regs.num_entries = i;
328 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
331 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
333 struct ehea_port *port = netdev_priv(dev);
334 struct net_device_stats *stats = &port->stats;
335 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
338 for (i = 0; i < port->num_def_qps; i++) {
339 rx_packets += port->port_res[i].rx_packets;
340 rx_bytes += port->port_res[i].rx_bytes;
343 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
344 tx_packets += port->port_res[i].tx_packets;
345 tx_bytes += port->port_res[i].tx_bytes;
348 stats->tx_packets = tx_packets;
349 stats->rx_bytes = rx_bytes;
350 stats->tx_bytes = tx_bytes;
351 stats->rx_packets = rx_packets;
356 static void ehea_update_stats(struct work_struct *work)
358 struct ehea_port *port =
359 container_of(work, struct ehea_port, stats_work.work);
360 struct net_device *dev = port->netdev;
361 struct net_device_stats *stats = &port->stats;
362 struct hcp_ehea_port_cb2 *cb2;
365 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
367 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
371 hret = ehea_h_query_ehea_port(port->adapter->handle,
372 port->logical_port_id,
373 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
374 if (hret != H_SUCCESS) {
375 netdev_err(dev, "query_ehea_port failed\n");
379 if (netif_msg_hw(port))
380 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
382 stats->multicast = cb2->rxmcp;
383 stats->rx_errors = cb2->rxuerr;
386 free_page((unsigned long)cb2);
388 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
391 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
393 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
394 struct net_device *dev = pr->port->netdev;
395 int max_index_mask = pr->rq1_skba.len - 1;
396 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
400 pr->rq1_skba.os_skbs = 0;
402 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
404 pr->rq1_skba.index = index;
405 pr->rq1_skba.os_skbs = fill_wqes;
409 for (i = 0; i < fill_wqes; i++) {
410 if (!skb_arr_rq1[index]) {
411 skb_arr_rq1[index] = netdev_alloc_skb(dev,
413 if (!skb_arr_rq1[index]) {
414 netdev_info(dev, "Unable to allocate enough skb in the array\n");
415 pr->rq1_skba.os_skbs = fill_wqes - i;
420 index &= max_index_mask;
428 ehea_update_rq1a(pr->qp, adder);
431 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
433 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
434 struct net_device *dev = pr->port->netdev;
437 if (nr_rq1a > pr->rq1_skba.len) {
438 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
442 for (i = 0; i < nr_rq1a; i++) {
443 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
444 if (!skb_arr_rq1[i]) {
445 netdev_info(dev, "Not enough memory to allocate skb array\n");
450 ehea_update_rq1a(pr->qp, i - 1);
453 static int ehea_refill_rq_def(struct ehea_port_res *pr,
454 struct ehea_q_skb_arr *q_skba, int rq_nr,
455 int num_wqes, int wqe_type, int packet_size)
457 struct net_device *dev = pr->port->netdev;
458 struct ehea_qp *qp = pr->qp;
459 struct sk_buff **skb_arr = q_skba->arr;
460 struct ehea_rwqe *rwqe;
461 int i, index, max_index_mask, fill_wqes;
465 fill_wqes = q_skba->os_skbs + num_wqes;
468 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
469 q_skba->os_skbs = fill_wqes;
473 index = q_skba->index;
474 max_index_mask = q_skba->len - 1;
475 for (i = 0; i < fill_wqes; i++) {
479 skb = netdev_alloc_skb_ip_align(dev, packet_size);
481 q_skba->os_skbs = fill_wqes - i;
482 if (q_skba->os_skbs == q_skba->len - 2) {
483 netdev_info(pr->port->netdev,
484 "rq%i ran dry - no mem for skb\n",
491 skb_arr[index] = skb;
492 tmp_addr = ehea_map_vaddr(skb->data);
493 if (tmp_addr == -1) {
495 q_skba->os_skbs = fill_wqes - i;
500 rwqe = ehea_get_next_rwqe(qp, rq_nr);
501 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
502 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
503 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
504 rwqe->sg_list[0].vaddr = tmp_addr;
505 rwqe->sg_list[0].len = packet_size;
506 rwqe->data_segments = 1;
509 index &= max_index_mask;
513 q_skba->index = index;
520 ehea_update_rq2a(pr->qp, adder);
522 ehea_update_rq3a(pr->qp, adder);
528 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
530 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
531 nr_of_wqes, EHEA_RWQE2_TYPE,
536 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
538 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
539 nr_of_wqes, EHEA_RWQE3_TYPE,
540 EHEA_MAX_PACKET_SIZE);
543 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
545 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
546 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
548 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
549 (cqe->header_length == 0))
554 static inline void ehea_fill_skb(struct net_device *dev,
555 struct sk_buff *skb, struct ehea_cqe *cqe,
556 struct ehea_port_res *pr)
558 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
560 skb_put(skb, length);
561 skb->protocol = eth_type_trans(skb, dev);
563 /* The packet was not an IPV4 packet so a complemented checksum was
564 calculated. The value is found in the Internet Checksum field. */
565 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
566 skb->ip_summed = CHECKSUM_COMPLETE;
567 skb->csum = csum_unfold(~cqe->inet_checksum_value);
569 skb->ip_summed = CHECKSUM_UNNECESSARY;
571 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
574 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
576 struct ehea_cqe *cqe)
578 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
589 prefetchw(pref + EHEA_CACHE_LINE);
591 pref = (skb_array[x]->data);
593 prefetch(pref + EHEA_CACHE_LINE);
594 prefetch(pref + EHEA_CACHE_LINE * 2);
595 prefetch(pref + EHEA_CACHE_LINE * 3);
598 skb = skb_array[skb_index];
599 skb_array[skb_index] = NULL;
603 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
604 int arr_len, int wqe_index)
616 prefetchw(pref + EHEA_CACHE_LINE);
618 pref = (skb_array[x]->data);
620 prefetchw(pref + EHEA_CACHE_LINE);
623 skb = skb_array[wqe_index];
624 skb_array[wqe_index] = NULL;
628 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
629 struct ehea_cqe *cqe, int *processed_rq2,
634 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
635 pr->p_stats.err_tcp_cksum++;
636 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
637 pr->p_stats.err_ip_cksum++;
638 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
639 pr->p_stats.err_frame_crc++;
643 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
645 } else if (rq == 3) {
647 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
651 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
652 if (netif_msg_rx_err(pr->port)) {
653 pr_err("Critical receive error for QP %d. Resetting port.\n",
654 pr->qp->init_attr.qp_nr);
655 ehea_dump(cqe, sizeof(*cqe), "CQE");
657 ehea_schedule_port_reset(pr->port);
664 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
665 void **tcph, u64 *hdr_flags, void *priv)
667 struct ehea_cqe *cqe = priv;
671 /* non tcp/udp packets */
672 if (!cqe->header_length)
676 skb_reset_network_header(skb);
678 if (iph->protocol != IPPROTO_TCP)
681 ip_len = ip_hdrlen(skb);
682 skb_set_transport_header(skb, ip_len);
683 *tcph = tcp_hdr(skb);
685 /* check if ip header and tcp header are complete */
686 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
689 *hdr_flags = LRO_IPV4 | LRO_TCP;
695 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
698 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
699 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
701 if (skb->dev->features & NETIF_F_LRO)
702 lro_receive_skb(&pr->lro_mgr, skb, cqe);
704 netif_receive_skb(skb);
707 static int ehea_proc_rwqes(struct net_device *dev,
708 struct ehea_port_res *pr,
711 struct ehea_port *port = pr->port;
712 struct ehea_qp *qp = pr->qp;
713 struct ehea_cqe *cqe;
715 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
716 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
717 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
718 int skb_arr_rq1_len = pr->rq1_skba.len;
719 int skb_arr_rq2_len = pr->rq2_skba.len;
720 int skb_arr_rq3_len = pr->rq3_skba.len;
721 int processed, processed_rq1, processed_rq2, processed_rq3;
722 u64 processed_bytes = 0;
723 int wqe_index, last_wqe_index, rq, port_reset;
725 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
728 cqe = ehea_poll_rq1(qp, &wqe_index);
729 while ((processed < budget) && cqe) {
733 if (netif_msg_rx_status(port))
734 ehea_dump(cqe, sizeof(*cqe), "CQE");
736 last_wqe_index = wqe_index;
738 if (!ehea_check_cqe(cqe, &rq)) {
741 skb = get_skb_by_index_ll(skb_arr_rq1,
744 if (unlikely(!skb)) {
745 netif_info(port, rx_err, dev,
746 "LL rq1: skb=NULL\n");
748 skb = netdev_alloc_skb(dev,
751 netdev_err(dev, "Not enough memory to allocate skb\n");
755 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
756 cqe->num_bytes_transfered - 4);
757 ehea_fill_skb(dev, skb, cqe, pr);
758 } else if (rq == 2) {
760 skb = get_skb_by_index(skb_arr_rq2,
761 skb_arr_rq2_len, cqe);
762 if (unlikely(!skb)) {
763 netif_err(port, rx_err, dev,
767 ehea_fill_skb(dev, skb, cqe, pr);
771 skb = get_skb_by_index(skb_arr_rq3,
772 skb_arr_rq3_len, cqe);
773 if (unlikely(!skb)) {
774 netif_err(port, rx_err, dev,
778 ehea_fill_skb(dev, skb, cqe, pr);
782 processed_bytes += skb->len;
783 ehea_proc_skb(pr, cqe, skb);
785 pr->p_stats.poll_receive_errors++;
786 port_reset = ehea_treat_poll_error(pr, rq, cqe,
792 cqe = ehea_poll_rq1(qp, &wqe_index);
794 if (dev->features & NETIF_F_LRO)
795 lro_flush_all(&pr->lro_mgr);
797 pr->rx_packets += processed;
798 pr->rx_bytes += processed_bytes;
800 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
801 ehea_refill_rq2(pr, processed_rq2);
802 ehea_refill_rq3(pr, processed_rq3);
807 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
809 static void reset_sq_restart_flag(struct ehea_port *port)
813 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
814 struct ehea_port_res *pr = &port->port_res[i];
815 pr->sq_restart_flag = 0;
817 wake_up(&port->restart_wq);
820 static void check_sqs(struct ehea_port *port)
822 struct ehea_swqe *swqe;
826 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
827 struct ehea_port_res *pr = &port->port_res[i];
830 swqe = ehea_get_swqe(pr->qp, &swqe_index);
831 memset(swqe, 0, SWQE_HEADER_SIZE);
832 atomic_dec(&pr->swqe_avail);
834 swqe->tx_control |= EHEA_SWQE_PURGE;
835 swqe->wr_id = SWQE_RESTART_CHECK;
836 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
837 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
838 swqe->immediate_data_length = 80;
840 ehea_post_swqe(pr->qp, swqe);
842 ret = wait_event_timeout(port->restart_wq,
843 pr->sq_restart_flag == 0,
844 msecs_to_jiffies(100));
847 pr_err("HW/SW queues out of sync\n");
848 ehea_schedule_port_reset(pr->port);
855 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
858 struct ehea_cq *send_cq = pr->send_cq;
859 struct ehea_cqe *cqe;
860 int quota = my_quota;
864 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
865 pr - &pr->port->port_res[0]);
867 cqe = ehea_poll_cq(send_cq);
868 while (cqe && (quota > 0)) {
869 ehea_inc_cq(send_cq);
874 if (cqe->wr_id == SWQE_RESTART_CHECK) {
875 pr->sq_restart_flag = 1;
880 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
881 pr_err("Bad send completion status=0x%04X\n",
884 if (netif_msg_tx_err(pr->port))
885 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
887 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
888 pr_err("Resetting port\n");
889 ehea_schedule_port_reset(pr->port);
894 if (netif_msg_tx_done(pr->port))
895 ehea_dump(cqe, sizeof(*cqe), "CQE");
897 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
898 == EHEA_SWQE2_TYPE)) {
900 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
901 skb = pr->sq_skba.arr[index];
903 pr->sq_skba.arr[index] = NULL;
906 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
909 cqe = ehea_poll_cq(send_cq);
912 ehea_update_feca(send_cq, cqe_counter);
913 atomic_add(swqe_av, &pr->swqe_avail);
915 if (unlikely(netif_tx_queue_stopped(txq) &&
916 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
917 __netif_tx_lock(txq, smp_processor_id());
918 if (netif_tx_queue_stopped(txq) &&
919 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
920 netif_tx_wake_queue(txq);
921 __netif_tx_unlock(txq);
924 wake_up(&pr->port->swqe_avail_wq);
929 #define EHEA_POLL_MAX_CQES 65535
931 static int ehea_poll(struct napi_struct *napi, int budget)
933 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
935 struct net_device *dev = pr->port->netdev;
936 struct ehea_cqe *cqe;
937 struct ehea_cqe *cqe_skb = NULL;
941 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
942 rx += ehea_proc_rwqes(dev, pr, budget - rx);
944 while (rx != budget) {
946 ehea_reset_cq_ep(pr->recv_cq);
947 ehea_reset_cq_ep(pr->send_cq);
948 ehea_reset_cq_n1(pr->recv_cq);
949 ehea_reset_cq_n1(pr->send_cq);
951 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
952 cqe_skb = ehea_poll_cq(pr->send_cq);
954 if (!cqe && !cqe_skb)
957 if (!napi_reschedule(napi))
960 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
961 rx += ehea_proc_rwqes(dev, pr, budget - rx);
967 #ifdef CONFIG_NET_POLL_CONTROLLER
968 static void ehea_netpoll(struct net_device *dev)
970 struct ehea_port *port = netdev_priv(dev);
973 for (i = 0; i < port->num_def_qps; i++)
974 napi_schedule(&port->port_res[i].napi);
978 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
980 struct ehea_port_res *pr = param;
982 napi_schedule(&pr->napi);
987 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
989 struct ehea_port *port = param;
990 struct ehea_eqe *eqe;
993 u64 resource_type, aer, aerr;
996 eqe = ehea_poll_eq(port->qp_eq);
999 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
1000 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
1001 eqe->entry, qp_token);
1003 qp = port->port_res[qp_token].qp;
1005 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
1008 if (resource_type == EHEA_AER_RESTYPE_QP) {
1009 if ((aer & EHEA_AER_RESET_MASK) ||
1010 (aerr & EHEA_AERR_RESET_MASK))
1013 reset_port = 1; /* Reset in case of CQ or EQ error */
1015 eqe = ehea_poll_eq(port->qp_eq);
1019 pr_err("Resetting port\n");
1020 ehea_schedule_port_reset(port);
1026 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
1031 for (i = 0; i < EHEA_MAX_PORTS; i++)
1032 if (adapter->port[i])
1033 if (adapter->port[i]->logical_port_id == logical_port)
1034 return adapter->port[i];
1038 int ehea_sense_port_attr(struct ehea_port *port)
1042 struct hcp_ehea_port_cb0 *cb0;
1044 /* may be called via ehea_neq_tasklet() */
1045 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1047 pr_err("no mem for cb0\n");
1052 hret = ehea_h_query_ehea_port(port->adapter->handle,
1053 port->logical_port_id, H_PORT_CB0,
1054 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1056 if (hret != H_SUCCESS) {
1062 port->mac_addr = cb0->port_mac_addr << 16;
1064 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1065 ret = -EADDRNOTAVAIL;
1070 switch (cb0->port_speed) {
1072 port->port_speed = EHEA_SPEED_10M;
1073 port->full_duplex = 0;
1076 port->port_speed = EHEA_SPEED_10M;
1077 port->full_duplex = 1;
1079 case H_SPEED_100M_H:
1080 port->port_speed = EHEA_SPEED_100M;
1081 port->full_duplex = 0;
1083 case H_SPEED_100M_F:
1084 port->port_speed = EHEA_SPEED_100M;
1085 port->full_duplex = 1;
1088 port->port_speed = EHEA_SPEED_1G;
1089 port->full_duplex = 1;
1092 port->port_speed = EHEA_SPEED_10G;
1093 port->full_duplex = 1;
1096 port->port_speed = 0;
1097 port->full_duplex = 0;
1102 port->num_mcs = cb0->num_default_qps;
1104 /* Number of default QPs */
1106 port->num_def_qps = cb0->num_default_qps;
1108 port->num_def_qps = 1;
1110 if (!port->num_def_qps) {
1115 port->num_tx_qps = num_tx_qps;
1117 if (port->num_def_qps >= port->num_tx_qps)
1118 port->num_add_tx_qps = 0;
1120 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
1124 if (ret || netif_msg_probe(port))
1125 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1126 free_page((unsigned long)cb0);
1131 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1133 struct hcp_ehea_port_cb4 *cb4;
1137 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1139 pr_err("no mem for cb4\n");
1144 cb4->port_speed = port_speed;
1146 netif_carrier_off(port->netdev);
1148 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1149 port->logical_port_id,
1150 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1151 if (hret == H_SUCCESS) {
1152 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1154 hret = ehea_h_query_ehea_port(port->adapter->handle,
1155 port->logical_port_id,
1156 H_PORT_CB4, H_PORT_CB4_SPEED,
1158 if (hret == H_SUCCESS) {
1159 switch (cb4->port_speed) {
1161 port->port_speed = EHEA_SPEED_10M;
1162 port->full_duplex = 0;
1165 port->port_speed = EHEA_SPEED_10M;
1166 port->full_duplex = 1;
1168 case H_SPEED_100M_H:
1169 port->port_speed = EHEA_SPEED_100M;
1170 port->full_duplex = 0;
1172 case H_SPEED_100M_F:
1173 port->port_speed = EHEA_SPEED_100M;
1174 port->full_duplex = 1;
1177 port->port_speed = EHEA_SPEED_1G;
1178 port->full_duplex = 1;
1181 port->port_speed = EHEA_SPEED_10G;
1182 port->full_duplex = 1;
1185 port->port_speed = 0;
1186 port->full_duplex = 0;
1190 pr_err("Failed sensing port speed\n");
1194 if (hret == H_AUTHORITY) {
1195 pr_info("Hypervisor denied setting port speed\n");
1199 pr_err("Failed setting port speed\n");
1202 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1203 netif_carrier_on(port->netdev);
1205 free_page((unsigned long)cb4);
1210 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1215 struct ehea_port *port;
1216 struct net_device *dev;
1218 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1219 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1220 port = ehea_get_port(adapter, portnum);
1224 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1227 netdev_err(dev, "unknown portnum %x\n", portnum);
1231 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1232 if (!netif_carrier_ok(dev)) {
1233 ret = ehea_sense_port_attr(port);
1235 netdev_err(dev, "failed resensing port attributes\n");
1239 netif_info(port, link, dev,
1240 "Logical port up: %dMbps %s Duplex\n",
1242 port->full_duplex == 1 ?
1245 netif_carrier_on(dev);
1246 netif_wake_queue(dev);
1249 if (netif_carrier_ok(dev)) {
1250 netif_info(port, link, dev,
1251 "Logical port down\n");
1252 netif_carrier_off(dev);
1253 netif_tx_disable(dev);
1256 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1257 port->phy_link = EHEA_PHY_LINK_UP;
1258 netif_info(port, link, dev,
1259 "Physical port up\n");
1260 if (prop_carrier_state)
1261 netif_carrier_on(dev);
1263 port->phy_link = EHEA_PHY_LINK_DOWN;
1264 netif_info(port, link, dev,
1265 "Physical port down\n");
1266 if (prop_carrier_state)
1267 netif_carrier_off(dev);
1270 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1272 "External switch port is primary port\n");
1275 "External switch port is backup port\n");
1278 case EHEA_EC_ADAPTER_MALFUNC:
1279 netdev_err(dev, "Adapter malfunction\n");
1281 case EHEA_EC_PORT_MALFUNC:
1282 netdev_info(dev, "Port malfunction\n");
1283 netif_carrier_off(dev);
1284 netif_tx_disable(dev);
1287 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1292 static void ehea_neq_tasklet(unsigned long data)
1294 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1295 struct ehea_eqe *eqe;
1298 eqe = ehea_poll_eq(adapter->neq);
1299 pr_debug("eqe=%p\n", eqe);
1302 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1303 ehea_parse_eqe(adapter, eqe->entry);
1304 eqe = ehea_poll_eq(adapter->neq);
1305 pr_debug("next eqe=%p\n", eqe);
1308 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1309 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1310 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1312 ehea_h_reset_events(adapter->handle,
1313 adapter->neq->fw_handle, event_mask);
1316 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1318 struct ehea_adapter *adapter = param;
1319 tasklet_hi_schedule(&adapter->neq_tasklet);
1324 static int ehea_fill_port_res(struct ehea_port_res *pr)
1327 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1329 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1331 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1333 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1338 static int ehea_reg_interrupts(struct net_device *dev)
1340 struct ehea_port *port = netdev_priv(dev);
1341 struct ehea_port_res *pr;
1345 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1348 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1349 ehea_qp_aff_irq_handler,
1350 IRQF_DISABLED, port->int_aff_name, port);
1352 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1353 port->qp_eq->attr.ist1);
1357 netif_info(port, ifup, dev,
1358 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1359 port->qp_eq->attr.ist1);
1362 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1363 pr = &port->port_res[i];
1364 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1365 "%s-queue%d", dev->name, i);
1366 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1367 ehea_recv_irq_handler,
1368 IRQF_DISABLED, pr->int_send_name,
1371 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1372 i, pr->eq->attr.ist1);
1375 netif_info(port, ifup, dev,
1376 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1377 pr->eq->attr.ist1, i);
1385 u32 ist = port->port_res[i].eq->attr.ist1;
1386 ibmebus_free_irq(ist, &port->port_res[i]);
1390 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1391 i = port->num_def_qps;
1397 static void ehea_free_interrupts(struct net_device *dev)
1399 struct ehea_port *port = netdev_priv(dev);
1400 struct ehea_port_res *pr;
1405 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1406 pr = &port->port_res[i];
1407 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1408 netif_info(port, intr, dev,
1409 "free send irq for res %d with handle 0x%X\n",
1410 i, pr->eq->attr.ist1);
1413 /* associated events */
1414 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1415 netif_info(port, intr, dev,
1416 "associated event interrupt for handle 0x%X freed\n",
1417 port->qp_eq->attr.ist1);
1420 static int ehea_configure_port(struct ehea_port *port)
1424 struct hcp_ehea_port_cb0 *cb0;
1427 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1431 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1432 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1433 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1434 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1435 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1436 PXLY_RC_VLAN_FILTER)
1437 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1439 for (i = 0; i < port->num_mcs; i++)
1441 cb0->default_qpn_arr[i] =
1442 port->port_res[i].qp->init_attr.qp_nr;
1444 cb0->default_qpn_arr[i] =
1445 port->port_res[0].qp->init_attr.qp_nr;
1447 if (netif_msg_ifup(port))
1448 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1450 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1451 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1453 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1454 port->logical_port_id,
1455 H_PORT_CB0, mask, cb0);
1457 if (hret != H_SUCCESS)
1463 free_page((unsigned long)cb0);
1468 int ehea_gen_smrs(struct ehea_port_res *pr)
1471 struct ehea_adapter *adapter = pr->port->adapter;
1473 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1477 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1484 ehea_rem_mr(&pr->send_mr);
1486 pr_err("Generating SMRS failed\n");
1490 int ehea_rem_smrs(struct ehea_port_res *pr)
1492 if ((ehea_rem_mr(&pr->send_mr)) ||
1493 (ehea_rem_mr(&pr->recv_mr)))
1499 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1501 int arr_size = sizeof(void *) * max_q_entries;
1503 q_skba->arr = vzalloc(arr_size);
1507 q_skba->len = max_q_entries;
1509 q_skba->os_skbs = 0;
1514 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1515 struct port_res_cfg *pr_cfg, int queue_token)
1517 struct ehea_adapter *adapter = port->adapter;
1518 enum ehea_eq_type eq_type = EHEA_EQ;
1519 struct ehea_qp_init_attr *init_attr = NULL;
1521 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1523 tx_bytes = pr->tx_bytes;
1524 tx_packets = pr->tx_packets;
1525 rx_bytes = pr->rx_bytes;
1526 rx_packets = pr->rx_packets;
1528 memset(pr, 0, sizeof(struct ehea_port_res));
1530 pr->tx_bytes = rx_bytes;
1531 pr->tx_packets = tx_packets;
1532 pr->rx_bytes = rx_bytes;
1533 pr->rx_packets = rx_packets;
1537 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1539 pr_err("create_eq failed (eq)\n");
1543 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1545 port->logical_port_id);
1547 pr_err("create_cq failed (cq_recv)\n");
1551 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1553 port->logical_port_id);
1555 pr_err("create_cq failed (cq_send)\n");
1559 if (netif_msg_ifup(port))
1560 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1561 pr->send_cq->attr.act_nr_of_cqes,
1562 pr->recv_cq->attr.act_nr_of_cqes);
1564 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1567 pr_err("no mem for ehea_qp_init_attr\n");
1571 init_attr->low_lat_rq1 = 1;
1572 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1573 init_attr->rq_count = 3;
1574 init_attr->qp_token = queue_token;
1575 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1576 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1577 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1578 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1579 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1580 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1581 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1582 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1583 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1584 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1585 init_attr->port_nr = port->logical_port_id;
1586 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1587 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1588 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1590 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1592 pr_err("create_qp failed\n");
1597 if (netif_msg_ifup(port))
1598 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1600 init_attr->act_nr_send_wqes,
1601 init_attr->act_nr_rwqes_rq1,
1602 init_attr->act_nr_rwqes_rq2,
1603 init_attr->act_nr_rwqes_rq3);
1605 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1607 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1608 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1609 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1610 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1614 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1615 if (ehea_gen_smrs(pr) != 0) {
1620 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1624 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1626 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1627 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1628 pr->lro_mgr.lro_arr = pr->lro_desc;
1629 pr->lro_mgr.get_skb_header = get_skb_hdr;
1630 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1631 pr->lro_mgr.dev = port->netdev;
1632 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1633 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1640 vfree(pr->sq_skba.arr);
1641 vfree(pr->rq1_skba.arr);
1642 vfree(pr->rq2_skba.arr);
1643 vfree(pr->rq3_skba.arr);
1644 ehea_destroy_qp(pr->qp);
1645 ehea_destroy_cq(pr->send_cq);
1646 ehea_destroy_cq(pr->recv_cq);
1647 ehea_destroy_eq(pr->eq);
1652 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1657 netif_napi_del(&pr->napi);
1659 ret = ehea_destroy_qp(pr->qp);
1662 ehea_destroy_cq(pr->send_cq);
1663 ehea_destroy_cq(pr->recv_cq);
1664 ehea_destroy_eq(pr->eq);
1666 for (i = 0; i < pr->rq1_skba.len; i++)
1667 if (pr->rq1_skba.arr[i])
1668 dev_kfree_skb(pr->rq1_skba.arr[i]);
1670 for (i = 0; i < pr->rq2_skba.len; i++)
1671 if (pr->rq2_skba.arr[i])
1672 dev_kfree_skb(pr->rq2_skba.arr[i]);
1674 for (i = 0; i < pr->rq3_skba.len; i++)
1675 if (pr->rq3_skba.arr[i])
1676 dev_kfree_skb(pr->rq3_skba.arr[i]);
1678 for (i = 0; i < pr->sq_skba.len; i++)
1679 if (pr->sq_skba.arr[i])
1680 dev_kfree_skb(pr->sq_skba.arr[i]);
1682 vfree(pr->rq1_skba.arr);
1683 vfree(pr->rq2_skba.arr);
1684 vfree(pr->rq3_skba.arr);
1685 vfree(pr->sq_skba.arr);
1686 ret = ehea_rem_smrs(pr);
1692 * The write_* functions store information in swqe which is used by
1693 * the hardware to calculate the ip/tcp/udp checksum
1696 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1697 const struct sk_buff *skb)
1699 swqe->ip_start = skb_network_offset(skb);
1700 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1703 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1704 const struct sk_buff *skb)
1707 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1709 swqe->tcp_end = (u16)skb->len - 1;
1712 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1713 const struct sk_buff *skb)
1716 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1718 swqe->tcp_end = (u16)skb->len - 1;
1722 static void write_swqe2_TSO(struct sk_buff *skb,
1723 struct ehea_swqe *swqe, u32 lkey)
1725 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1726 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1727 int skb_data_size = skb_headlen(skb);
1730 /* Packet is TCP with TSO enabled */
1731 swqe->tx_control |= EHEA_SWQE_TSO;
1732 swqe->mss = skb_shinfo(skb)->gso_size;
1733 /* copy only eth/ip/tcp headers to immediate data and
1734 * the rest of skb->data to sg1entry
1736 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1738 skb_data_size = skb_headlen(skb);
1740 if (skb_data_size >= headersize) {
1741 /* copy immediate data */
1742 skb_copy_from_linear_data(skb, imm_data, headersize);
1743 swqe->immediate_data_length = headersize;
1745 if (skb_data_size > headersize) {
1746 /* set sg1entry data */
1747 sg1entry->l_key = lkey;
1748 sg1entry->len = skb_data_size - headersize;
1750 ehea_map_vaddr(skb->data + headersize);
1751 swqe->descriptors++;
1754 pr_err("cannot handle fragmented headers\n");
1757 static void write_swqe2_nonTSO(struct sk_buff *skb,
1758 struct ehea_swqe *swqe, u32 lkey)
1760 int skb_data_size = skb_headlen(skb);
1761 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1762 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1764 /* Packet is any nonTSO type
1766 * Copy as much as possible skb->data to immediate data and
1767 * the rest to sg1entry
1769 if (skb_data_size >= SWQE2_MAX_IMM) {
1770 /* copy immediate data */
1771 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1773 swqe->immediate_data_length = SWQE2_MAX_IMM;
1775 if (skb_data_size > SWQE2_MAX_IMM) {
1776 /* copy sg1entry data */
1777 sg1entry->l_key = lkey;
1778 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1780 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1781 swqe->descriptors++;
1784 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1785 swqe->immediate_data_length = skb_data_size;
1789 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1790 struct ehea_swqe *swqe, u32 lkey)
1792 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1794 int nfrags, sg1entry_contains_frag_data, i;
1796 nfrags = skb_shinfo(skb)->nr_frags;
1797 sg1entry = &swqe->u.immdata_desc.sg_entry;
1798 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1799 swqe->descriptors = 0;
1800 sg1entry_contains_frag_data = 0;
1802 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1803 write_swqe2_TSO(skb, swqe, lkey);
1805 write_swqe2_nonTSO(skb, swqe, lkey);
1807 /* write descriptors */
1809 if (swqe->descriptors == 0) {
1810 /* sg1entry not yet used */
1811 frag = &skb_shinfo(skb)->frags[0];
1813 /* copy sg1entry data */
1814 sg1entry->l_key = lkey;
1815 sg1entry->len = frag->size;
1817 ehea_map_vaddr(skb_frag_address(frag));
1818 swqe->descriptors++;
1819 sg1entry_contains_frag_data = 1;
1822 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1824 frag = &skb_shinfo(skb)->frags[i];
1825 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1827 sgentry->l_key = lkey;
1828 sgentry->len = frag->size;
1829 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1830 swqe->descriptors++;
1835 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1841 /* De/Register untagged packets */
1842 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1843 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1844 port->logical_port_id,
1845 reg_type, port->mac_addr, 0, hcallid);
1846 if (hret != H_SUCCESS) {
1847 pr_err("%sregistering bc address failed (tagged)\n",
1848 hcallid == H_REG_BCMC ? "" : "de");
1853 /* De/Register VLAN packets */
1854 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1855 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1856 port->logical_port_id,
1857 reg_type, port->mac_addr, 0, hcallid);
1858 if (hret != H_SUCCESS) {
1859 pr_err("%sregistering bc address failed (vlan)\n",
1860 hcallid == H_REG_BCMC ? "" : "de");
1867 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1869 struct ehea_port *port = netdev_priv(dev);
1870 struct sockaddr *mac_addr = sa;
1871 struct hcp_ehea_port_cb0 *cb0;
1875 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1876 ret = -EADDRNOTAVAIL;
1880 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1882 pr_err("no mem for cb0\n");
1887 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1889 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1891 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1892 port->logical_port_id, H_PORT_CB0,
1893 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1894 if (hret != H_SUCCESS) {
1899 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1901 /* Deregister old MAC in pHYP */
1902 if (port->state == EHEA_PORT_UP) {
1903 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1908 port->mac_addr = cb0->port_mac_addr << 16;
1910 /* Register new MAC in pHYP */
1911 if (port->state == EHEA_PORT_UP) {
1912 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1920 ehea_update_bcmc_registrations();
1922 free_page((unsigned long)cb0);
1927 static void ehea_promiscuous_error(u64 hret, int enable)
1929 if (hret == H_AUTHORITY)
1930 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1931 enable == 1 ? "en" : "dis");
1933 pr_err("failed %sabling promiscuous mode\n",
1934 enable == 1 ? "en" : "dis");
1937 static void ehea_promiscuous(struct net_device *dev, int enable)
1939 struct ehea_port *port = netdev_priv(dev);
1940 struct hcp_ehea_port_cb7 *cb7;
1943 if (enable == port->promisc)
1946 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1948 pr_err("no mem for cb7\n");
1952 /* Modify Pxs_DUCQPN in CB7 */
1953 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1955 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1956 port->logical_port_id,
1957 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1959 ehea_promiscuous_error(hret, enable);
1963 port->promisc = enable;
1965 free_page((unsigned long)cb7);
1968 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1974 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1975 | EHEA_BCMC_UNTAGGED;
1977 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1978 port->logical_port_id,
1979 reg_type, mc_mac_addr, 0, hcallid);
1983 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1984 | EHEA_BCMC_VLANID_ALL;
1986 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1987 port->logical_port_id,
1988 reg_type, mc_mac_addr, 0, hcallid);
1993 static int ehea_drop_multicast_list(struct net_device *dev)
1995 struct ehea_port *port = netdev_priv(dev);
1996 struct ehea_mc_list *mc_entry = port->mc_list;
1997 struct list_head *pos;
1998 struct list_head *temp;
2002 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
2003 mc_entry = list_entry(pos, struct ehea_mc_list, list);
2005 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2008 pr_err("failed deregistering mcast MAC\n");
2018 static void ehea_allmulti(struct net_device *dev, int enable)
2020 struct ehea_port *port = netdev_priv(dev);
2023 if (!port->allmulti) {
2025 /* Enable ALLMULTI */
2026 ehea_drop_multicast_list(dev);
2027 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
2032 "failed enabling IFF_ALLMULTI\n");
2036 /* Disable ALLMULTI */
2037 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
2042 "failed disabling IFF_ALLMULTI\n");
2046 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2048 struct ehea_mc_list *ehea_mcl_entry;
2051 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2052 if (!ehea_mcl_entry) {
2053 pr_err("no mem for mcl_entry\n");
2057 INIT_LIST_HEAD(&ehea_mcl_entry->list);
2059 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
2061 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
2064 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2066 pr_err("failed registering mcast MAC\n");
2067 kfree(ehea_mcl_entry);
2071 static void ehea_set_multicast_list(struct net_device *dev)
2073 struct ehea_port *port = netdev_priv(dev);
2074 struct netdev_hw_addr *ha;
2077 if (port->promisc) {
2078 ehea_promiscuous(dev, 1);
2081 ehea_promiscuous(dev, 0);
2083 if (dev->flags & IFF_ALLMULTI) {
2084 ehea_allmulti(dev, 1);
2087 ehea_allmulti(dev, 0);
2089 if (!netdev_mc_empty(dev)) {
2090 ret = ehea_drop_multicast_list(dev);
2092 /* Dropping the current multicast list failed.
2093 * Enabling ALL_MULTI is the best we can do.
2095 ehea_allmulti(dev, 1);
2098 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2099 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
2100 port->adapter->max_mc_mac);
2104 netdev_for_each_mc_addr(ha, dev)
2105 ehea_add_multicast_entry(port, ha->addr);
2109 ehea_update_bcmc_registrations();
2112 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2114 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2120 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2121 struct ehea_swqe *swqe, u32 lkey)
2123 if (skb->protocol == htons(ETH_P_IP)) {
2124 const struct iphdr *iph = ip_hdr(skb);
2127 swqe->tx_control |= EHEA_SWQE_CRC
2128 | EHEA_SWQE_IP_CHECKSUM
2129 | EHEA_SWQE_TCP_CHECKSUM
2130 | EHEA_SWQE_IMM_DATA_PRESENT
2131 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2133 write_ip_start_end(swqe, skb);
2135 if (iph->protocol == IPPROTO_UDP) {
2136 if ((iph->frag_off & IP_MF) ||
2137 (iph->frag_off & IP_OFFSET))
2138 /* IP fragment, so don't change cs */
2139 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2141 write_udp_offset_end(swqe, skb);
2142 } else if (iph->protocol == IPPROTO_TCP) {
2143 write_tcp_offset_end(swqe, skb);
2146 /* icmp (big data) and ip segmentation packets (all other ip
2147 packets) do not require any special handling */
2150 /* Other Ethernet Protocol */
2151 swqe->tx_control |= EHEA_SWQE_CRC
2152 | EHEA_SWQE_IMM_DATA_PRESENT
2153 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2156 write_swqe2_data(skb, dev, swqe, lkey);
2159 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2160 struct ehea_swqe *swqe)
2162 int nfrags = skb_shinfo(skb)->nr_frags;
2163 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2167 if (skb->protocol == htons(ETH_P_IP)) {
2168 const struct iphdr *iph = ip_hdr(skb);
2171 write_ip_start_end(swqe, skb);
2173 if (iph->protocol == IPPROTO_TCP) {
2174 swqe->tx_control |= EHEA_SWQE_CRC
2175 | EHEA_SWQE_IP_CHECKSUM
2176 | EHEA_SWQE_TCP_CHECKSUM
2177 | EHEA_SWQE_IMM_DATA_PRESENT;
2179 write_tcp_offset_end(swqe, skb);
2181 } else if (iph->protocol == IPPROTO_UDP) {
2182 if ((iph->frag_off & IP_MF) ||
2183 (iph->frag_off & IP_OFFSET))
2184 /* IP fragment, so don't change cs */
2185 swqe->tx_control |= EHEA_SWQE_CRC
2186 | EHEA_SWQE_IMM_DATA_PRESENT;
2188 swqe->tx_control |= EHEA_SWQE_CRC
2189 | EHEA_SWQE_IP_CHECKSUM
2190 | EHEA_SWQE_TCP_CHECKSUM
2191 | EHEA_SWQE_IMM_DATA_PRESENT;
2193 write_udp_offset_end(swqe, skb);
2196 /* icmp (big data) and
2197 ip segmentation packets (all other ip packets) */
2198 swqe->tx_control |= EHEA_SWQE_CRC
2199 | EHEA_SWQE_IP_CHECKSUM
2200 | EHEA_SWQE_IMM_DATA_PRESENT;
2203 /* Other Ethernet Protocol */
2204 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2206 /* copy (immediate) data */
2208 /* data is in a single piece */
2209 skb_copy_from_linear_data(skb, imm_data, skb->len);
2211 /* first copy data from the skb->data buffer ... */
2212 skb_copy_from_linear_data(skb, imm_data,
2214 imm_data += skb_headlen(skb);
2216 /* ... then copy data from the fragments */
2217 for (i = 0; i < nfrags; i++) {
2218 frag = &skb_shinfo(skb)->frags[i];
2219 memcpy(imm_data, skb_frag_address(frag), frag->size);
2220 imm_data += frag->size;
2223 swqe->immediate_data_length = skb->len;
2227 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2229 struct ehea_port *port = netdev_priv(dev);
2230 struct ehea_swqe *swqe;
2233 struct ehea_port_res *pr;
2234 struct netdev_queue *txq;
2236 pr = &port->port_res[skb_get_queue_mapping(skb)];
2237 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2239 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2240 memset(swqe, 0, SWQE_HEADER_SIZE);
2241 atomic_dec(&pr->swqe_avail);
2243 if (vlan_tx_tag_present(skb)) {
2244 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2245 swqe->vlan_tag = vlan_tx_tag_get(skb);
2249 pr->tx_bytes += skb->len;
2251 if (skb->len <= SWQE3_MAX_IMM) {
2252 u32 sig_iv = port->sig_comp_iv;
2253 u32 swqe_num = pr->swqe_id_counter;
2254 ehea_xmit3(skb, dev, swqe);
2255 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2256 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2257 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2258 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2260 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2261 pr->swqe_ll_count = 0;
2263 pr->swqe_ll_count += 1;
2266 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2267 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2268 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2269 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2270 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2272 pr->sq_skba.index++;
2273 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2275 lkey = pr->send_mr.lkey;
2276 ehea_xmit2(skb, dev, swqe, lkey);
2277 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2279 pr->swqe_id_counter += 1;
2281 netif_info(port, tx_queued, dev,
2282 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2283 if (netif_msg_tx_queued(port))
2284 ehea_dump(swqe, 512, "swqe");
2286 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2287 netif_tx_stop_queue(txq);
2288 swqe->tx_control |= EHEA_SWQE_PURGE;
2291 ehea_post_swqe(pr->qp, swqe);
2293 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2294 pr->p_stats.queue_stopped++;
2295 netif_tx_stop_queue(txq);
2298 return NETDEV_TX_OK;
2301 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2303 struct ehea_port *port = netdev_priv(dev);
2304 struct ehea_adapter *adapter = port->adapter;
2305 struct hcp_ehea_port_cb1 *cb1;
2309 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2311 pr_err("no mem for cb1\n");
2315 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2316 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2317 if (hret != H_SUCCESS) {
2318 pr_err("query_ehea_port failed\n");
2323 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2325 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2326 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2327 if (hret != H_SUCCESS)
2328 pr_err("modify_ehea_port failed\n");
2330 free_page((unsigned long)cb1);
2334 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2336 struct ehea_port *port = netdev_priv(dev);
2337 struct ehea_adapter *adapter = port->adapter;
2338 struct hcp_ehea_port_cb1 *cb1;
2342 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2344 pr_err("no mem for cb1\n");
2348 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2349 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2350 if (hret != H_SUCCESS) {
2351 pr_err("query_ehea_port failed\n");
2356 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2358 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2359 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2360 if (hret != H_SUCCESS)
2361 pr_err("modify_ehea_port failed\n");
2363 free_page((unsigned long)cb1);
2366 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2372 struct hcp_modify_qp_cb0 *cb0;
2374 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2380 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2381 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2382 if (hret != H_SUCCESS) {
2383 pr_err("query_ehea_qp failed (1)\n");
2387 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2388 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2389 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2390 &dummy64, &dummy64, &dummy16, &dummy16);
2391 if (hret != H_SUCCESS) {
2392 pr_err("modify_ehea_qp failed (1)\n");
2396 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2397 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2398 if (hret != H_SUCCESS) {
2399 pr_err("query_ehea_qp failed (2)\n");
2403 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2404 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2405 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2406 &dummy64, &dummy64, &dummy16, &dummy16);
2407 if (hret != H_SUCCESS) {
2408 pr_err("modify_ehea_qp failed (2)\n");
2412 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2413 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2414 if (hret != H_SUCCESS) {
2415 pr_err("query_ehea_qp failed (3)\n");
2419 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2420 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2421 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2422 &dummy64, &dummy64, &dummy16, &dummy16);
2423 if (hret != H_SUCCESS) {
2424 pr_err("modify_ehea_qp failed (3)\n");
2428 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2429 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2430 if (hret != H_SUCCESS) {
2431 pr_err("query_ehea_qp failed (4)\n");
2437 free_page((unsigned long)cb0);
2441 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2445 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2446 enum ehea_eq_type eq_type = EHEA_EQ;
2448 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2449 EHEA_MAX_ENTRIES_EQ, 1);
2452 pr_err("ehea_create_eq failed (qp_eq)\n");
2456 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2457 pr_cfg.max_entries_scq = sq_entries * 2;
2458 pr_cfg.max_entries_sq = sq_entries;
2459 pr_cfg.max_entries_rq1 = rq1_entries;
2460 pr_cfg.max_entries_rq2 = rq2_entries;
2461 pr_cfg.max_entries_rq3 = rq3_entries;
2463 pr_cfg_small_rx.max_entries_rcq = 1;
2464 pr_cfg_small_rx.max_entries_scq = sq_entries;
2465 pr_cfg_small_rx.max_entries_sq = sq_entries;
2466 pr_cfg_small_rx.max_entries_rq1 = 1;
2467 pr_cfg_small_rx.max_entries_rq2 = 1;
2468 pr_cfg_small_rx.max_entries_rq3 = 1;
2470 for (i = 0; i < def_qps; i++) {
2471 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2475 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2476 ret = ehea_init_port_res(port, &port->port_res[i],
2477 &pr_cfg_small_rx, i);
2486 ehea_clean_portres(port, &port->port_res[i]);
2489 ehea_destroy_eq(port->qp_eq);
2493 static int ehea_clean_all_portres(struct ehea_port *port)
2498 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2499 ret |= ehea_clean_portres(port, &port->port_res[i]);
2501 ret |= ehea_destroy_eq(port->qp_eq);
2506 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2508 if (adapter->active_ports)
2511 ehea_rem_mr(&adapter->mr);
2514 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2516 if (adapter->active_ports)
2519 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2522 static int ehea_up(struct net_device *dev)
2525 struct ehea_port *port = netdev_priv(dev);
2527 if (port->state == EHEA_PORT_UP)
2530 ret = ehea_port_res_setup(port, port->num_def_qps,
2531 port->num_add_tx_qps);
2533 netdev_err(dev, "port_res_failed\n");
2537 /* Set default QP for this port */
2538 ret = ehea_configure_port(port);
2540 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2544 ret = ehea_reg_interrupts(dev);
2546 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2550 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2551 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2553 netdev_err(dev, "activate_qp failed\n");
2558 for (i = 0; i < port->num_def_qps; i++) {
2559 ret = ehea_fill_port_res(&port->port_res[i]);
2561 netdev_err(dev, "out_free_irqs\n");
2566 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2572 port->state = EHEA_PORT_UP;
2578 ehea_free_interrupts(dev);
2581 ehea_clean_all_portres(port);
2584 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2586 ehea_update_bcmc_registrations();
2587 ehea_update_firmware_handles();
2592 static void port_napi_disable(struct ehea_port *port)
2596 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2597 napi_disable(&port->port_res[i].napi);
2600 static void port_napi_enable(struct ehea_port *port)
2604 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2605 napi_enable(&port->port_res[i].napi);
2608 static int ehea_open(struct net_device *dev)
2611 struct ehea_port *port = netdev_priv(dev);
2613 mutex_lock(&port->port_lock);
2615 netif_info(port, ifup, dev, "enabling port\n");
2619 port_napi_enable(port);
2620 netif_tx_start_all_queues(dev);
2623 mutex_unlock(&port->port_lock);
2624 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
2629 static int ehea_down(struct net_device *dev)
2632 struct ehea_port *port = netdev_priv(dev);
2634 if (port->state == EHEA_PORT_DOWN)
2637 ehea_drop_multicast_list(dev);
2638 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2640 ehea_free_interrupts(dev);
2642 port->state = EHEA_PORT_DOWN;
2644 ehea_update_bcmc_registrations();
2646 ret = ehea_clean_all_portres(port);
2648 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2650 ehea_update_firmware_handles();
2655 static int ehea_stop(struct net_device *dev)
2658 struct ehea_port *port = netdev_priv(dev);
2660 netif_info(port, ifdown, dev, "disabling port\n");
2662 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2663 cancel_work_sync(&port->reset_task);
2664 cancel_delayed_work_sync(&port->stats_work);
2665 mutex_lock(&port->port_lock);
2666 netif_tx_stop_all_queues(dev);
2667 port_napi_disable(port);
2668 ret = ehea_down(dev);
2669 mutex_unlock(&port->port_lock);
2670 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2674 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2676 struct ehea_qp qp = *orig_qp;
2677 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2678 struct ehea_swqe *swqe;
2682 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2683 swqe = ehea_get_swqe(&qp, &wqe_index);
2684 swqe->tx_control |= EHEA_SWQE_PURGE;
2688 static void ehea_flush_sq(struct ehea_port *port)
2692 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2693 struct ehea_port_res *pr = &port->port_res[i];
2694 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2697 ret = wait_event_timeout(port->swqe_avail_wq,
2698 atomic_read(&pr->swqe_avail) >= swqe_max,
2699 msecs_to_jiffies(100));
2702 pr_err("WARNING: sq not flushed completely\n");
2708 int ehea_stop_qps(struct net_device *dev)
2710 struct ehea_port *port = netdev_priv(dev);
2711 struct ehea_adapter *adapter = port->adapter;
2712 struct hcp_modify_qp_cb0 *cb0;
2720 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2726 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2727 struct ehea_port_res *pr = &port->port_res[i];
2728 struct ehea_qp *qp = pr->qp;
2730 /* Purge send queue */
2733 /* Disable queue pair */
2734 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2735 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2737 if (hret != H_SUCCESS) {
2738 pr_err("query_ehea_qp failed (1)\n");
2742 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2743 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2745 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2746 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2748 &dummy64, &dummy16, &dummy16);
2749 if (hret != H_SUCCESS) {
2750 pr_err("modify_ehea_qp failed (1)\n");
2754 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2755 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2757 if (hret != H_SUCCESS) {
2758 pr_err("query_ehea_qp failed (2)\n");
2762 /* deregister shared memory regions */
2763 dret = ehea_rem_smrs(pr);
2765 pr_err("unreg shared memory region failed\n");
2772 free_page((unsigned long)cb0);
2777 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2779 struct ehea_qp qp = *orig_qp;
2780 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2781 struct ehea_rwqe *rwqe;
2782 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2783 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2784 struct sk_buff *skb;
2785 u32 lkey = pr->recv_mr.lkey;
2791 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2792 rwqe = ehea_get_next_rwqe(&qp, 2);
2793 rwqe->sg_list[0].l_key = lkey;
2794 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2795 skb = skba_rq2[index];
2797 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2800 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2801 rwqe = ehea_get_next_rwqe(&qp, 3);
2802 rwqe->sg_list[0].l_key = lkey;
2803 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2804 skb = skba_rq3[index];
2806 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2810 int ehea_restart_qps(struct net_device *dev)
2812 struct ehea_port *port = netdev_priv(dev);
2813 struct ehea_adapter *adapter = port->adapter;
2817 struct hcp_modify_qp_cb0 *cb0;
2822 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2828 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2829 struct ehea_port_res *pr = &port->port_res[i];
2830 struct ehea_qp *qp = pr->qp;
2832 ret = ehea_gen_smrs(pr);
2834 netdev_err(dev, "creation of shared memory regions failed\n");
2838 ehea_update_rqs(qp, pr);
2840 /* Enable queue pair */
2841 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2842 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2844 if (hret != H_SUCCESS) {
2845 netdev_err(dev, "query_ehea_qp failed (1)\n");
2849 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2850 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2852 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2853 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2855 &dummy64, &dummy16, &dummy16);
2856 if (hret != H_SUCCESS) {
2857 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2861 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2862 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2864 if (hret != H_SUCCESS) {
2865 netdev_err(dev, "query_ehea_qp failed (2)\n");
2869 /* refill entire queue */
2870 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2871 ehea_refill_rq2(pr, 0);
2872 ehea_refill_rq3(pr, 0);
2875 free_page((unsigned long)cb0);
2880 static void ehea_reset_port(struct work_struct *work)
2883 struct ehea_port *port =
2884 container_of(work, struct ehea_port, reset_task);
2885 struct net_device *dev = port->netdev;
2887 mutex_lock(&dlpar_mem_lock);
2889 mutex_lock(&port->port_lock);
2890 netif_tx_disable(dev);
2892 port_napi_disable(port);
2900 ehea_set_multicast_list(dev);
2902 netif_info(port, timer, dev, "reset successful\n");
2904 port_napi_enable(port);
2906 netif_tx_wake_all_queues(dev);
2908 mutex_unlock(&port->port_lock);
2909 mutex_unlock(&dlpar_mem_lock);
2912 static void ehea_rereg_mrs(void)
2915 struct ehea_adapter *adapter;
2917 pr_info("LPAR memory changed - re-initializing driver\n");
2919 list_for_each_entry(adapter, &adapter_list, list)
2920 if (adapter->active_ports) {
2921 /* Shutdown all ports */
2922 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2923 struct ehea_port *port = adapter->port[i];
2924 struct net_device *dev;
2931 if (dev->flags & IFF_UP) {
2932 mutex_lock(&port->port_lock);
2933 netif_tx_disable(dev);
2934 ehea_flush_sq(port);
2935 ret = ehea_stop_qps(dev);
2937 mutex_unlock(&port->port_lock);
2940 port_napi_disable(port);
2941 mutex_unlock(&port->port_lock);
2943 reset_sq_restart_flag(port);
2946 /* Unregister old memory region */
2947 ret = ehea_rem_mr(&adapter->mr);
2949 pr_err("unregister MR failed - driver inoperable!\n");
2954 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2956 list_for_each_entry(adapter, &adapter_list, list)
2957 if (adapter->active_ports) {
2958 /* Register new memory region */
2959 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2961 pr_err("register MR failed - driver inoperable!\n");
2965 /* Restart all ports */
2966 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2967 struct ehea_port *port = adapter->port[i];
2970 struct net_device *dev = port->netdev;
2972 if (dev->flags & IFF_UP) {
2973 mutex_lock(&port->port_lock);
2974 ret = ehea_restart_qps(dev);
2977 port_napi_enable(port);
2978 netif_tx_wake_all_queues(dev);
2980 netdev_err(dev, "Unable to restart QPS\n");
2982 mutex_unlock(&port->port_lock);
2987 pr_info("re-initializing driver complete\n");
2992 static void ehea_tx_watchdog(struct net_device *dev)
2994 struct ehea_port *port = netdev_priv(dev);
2996 if (netif_carrier_ok(dev) &&
2997 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2998 ehea_schedule_port_reset(port);
3001 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
3003 struct hcp_query_ehea *cb;
3007 cb = (void *)get_zeroed_page(GFP_KERNEL);
3013 hret = ehea_h_query_ehea(adapter->handle, cb);
3015 if (hret != H_SUCCESS) {
3020 adapter->max_mc_mac = cb->max_mc_mac - 1;
3024 free_page((unsigned long)cb);
3029 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3031 struct hcp_ehea_port_cb4 *cb4;
3037 /* (Try to) enable *jumbo frames */
3038 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3040 pr_err("no mem for cb4\n");
3044 hret = ehea_h_query_ehea_port(port->adapter->handle,
3045 port->logical_port_id,
3047 H_PORT_CB4_JUMBO, cb4);
3048 if (hret == H_SUCCESS) {
3049 if (cb4->jumbo_frame)
3052 cb4->jumbo_frame = 1;
3053 hret = ehea_h_modify_ehea_port(port->adapter->
3060 if (hret == H_SUCCESS)
3066 free_page((unsigned long)cb4);
3072 static ssize_t ehea_show_port_id(struct device *dev,
3073 struct device_attribute *attr, char *buf)
3075 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3076 return sprintf(buf, "%d", port->logical_port_id);
3079 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3082 static void __devinit logical_port_release(struct device *dev)
3084 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3085 of_node_put(port->ofdev.dev.of_node);
3088 static struct device *ehea_register_port(struct ehea_port *port,
3089 struct device_node *dn)
3093 port->ofdev.dev.of_node = of_node_get(dn);
3094 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3095 port->ofdev.dev.bus = &ibmebus_bus_type;
3097 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
3098 port->ofdev.dev.release = logical_port_release;
3100 ret = of_device_register(&port->ofdev);
3102 pr_err("failed to register device. ret=%d\n", ret);
3106 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3108 pr_err("failed to register attributes, ret=%d\n", ret);
3109 goto out_unreg_of_dev;
3112 return &port->ofdev.dev;
3115 of_device_unregister(&port->ofdev);
3120 static void ehea_unregister_port(struct ehea_port *port)
3122 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3123 of_device_unregister(&port->ofdev);
3126 static const struct net_device_ops ehea_netdev_ops = {
3127 .ndo_open = ehea_open,
3128 .ndo_stop = ehea_stop,
3129 .ndo_start_xmit = ehea_start_xmit,
3130 #ifdef CONFIG_NET_POLL_CONTROLLER
3131 .ndo_poll_controller = ehea_netpoll,
3133 .ndo_get_stats = ehea_get_stats,
3134 .ndo_set_mac_address = ehea_set_mac_addr,
3135 .ndo_validate_addr = eth_validate_addr,
3136 .ndo_set_rx_mode = ehea_set_multicast_list,
3137 .ndo_change_mtu = ehea_change_mtu,
3138 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3139 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3140 .ndo_tx_timeout = ehea_tx_watchdog,
3143 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3144 u32 logical_port_id,
3145 struct device_node *dn)
3148 struct net_device *dev;
3149 struct ehea_port *port;
3150 struct device *port_dev;
3153 /* allocate memory for the port structures */
3154 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
3157 pr_err("no mem for net_device\n");
3162 port = netdev_priv(dev);
3164 mutex_init(&port->port_lock);
3165 port->state = EHEA_PORT_DOWN;
3166 port->sig_comp_iv = sq_entries / 10;
3168 port->adapter = adapter;
3170 port->logical_port_id = logical_port_id;
3172 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3174 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3175 if (!port->mc_list) {
3177 goto out_free_ethdev;
3180 INIT_LIST_HEAD(&port->mc_list->list);
3182 ret = ehea_sense_port_attr(port);
3184 goto out_free_mc_list;
3186 netif_set_real_num_rx_queues(dev, port->num_def_qps);
3187 netif_set_real_num_tx_queues(dev, port->num_def_qps +
3188 port->num_add_tx_qps);
3190 port_dev = ehea_register_port(port, dn);
3192 goto out_free_mc_list;
3194 SET_NETDEV_DEV(dev, port_dev);
3196 /* initialize net_device structure */
3197 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3199 dev->netdev_ops = &ehea_netdev_ops;
3200 ehea_set_ethtool_ops(dev);
3202 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3203 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3204 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3205 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3206 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3208 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3211 dev->features |= NETIF_F_LRO;
3213 INIT_WORK(&port->reset_task, ehea_reset_port);
3214 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3216 init_waitqueue_head(&port->swqe_avail_wq);
3217 init_waitqueue_head(&port->restart_wq);
3219 memset(&port->stats, 0, sizeof(struct net_device_stats));
3220 ret = register_netdev(dev);
3222 pr_err("register_netdev failed. ret=%d\n", ret);
3223 goto out_unreg_port;
3226 port->lro_max_aggr = lro_max_aggr;
3228 ret = ehea_get_jumboframe_status(port, &jumbo);
3230 netdev_err(dev, "failed determining jumbo frame status\n");
3232 netdev_info(dev, "Jumbo frames are %sabled\n",
3233 jumbo == 1 ? "en" : "dis");
3235 adapter->active_ports++;
3240 ehea_unregister_port(port);
3243 kfree(port->mc_list);
3249 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3250 logical_port_id, ret);
3254 static void ehea_shutdown_single_port(struct ehea_port *port)
3256 struct ehea_adapter *adapter = port->adapter;
3258 cancel_work_sync(&port->reset_task);
3259 cancel_delayed_work_sync(&port->stats_work);
3260 unregister_netdev(port->netdev);
3261 ehea_unregister_port(port);
3262 kfree(port->mc_list);
3263 free_netdev(port->netdev);
3264 adapter->active_ports--;
3267 static int ehea_setup_ports(struct ehea_adapter *adapter)
3269 struct device_node *lhea_dn;
3270 struct device_node *eth_dn = NULL;
3272 const u32 *dn_log_port_id;
3275 lhea_dn = adapter->ofdev->dev.of_node;
3276 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3278 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3280 if (!dn_log_port_id) {
3281 pr_err("bad device node: eth_dn name=%s\n",
3286 if (ehea_add_adapter_mr(adapter)) {
3287 pr_err("creating MR failed\n");
3288 of_node_put(eth_dn);
3292 adapter->port[i] = ehea_setup_single_port(adapter,
3295 if (adapter->port[i])
3296 netdev_info(adapter->port[i]->netdev,
3297 "logical port id #%d\n", *dn_log_port_id);
3299 ehea_remove_adapter_mr(adapter);
3306 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3307 u32 logical_port_id)
3309 struct device_node *lhea_dn;
3310 struct device_node *eth_dn = NULL;
3311 const u32 *dn_log_port_id;
3313 lhea_dn = adapter->ofdev->dev.of_node;
3314 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3316 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3319 if (*dn_log_port_id == logical_port_id)
3326 static ssize_t ehea_probe_port(struct device *dev,
3327 struct device_attribute *attr,
3328 const char *buf, size_t count)
3330 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3331 struct ehea_port *port;
3332 struct device_node *eth_dn = NULL;
3335 u32 logical_port_id;
3337 sscanf(buf, "%d", &logical_port_id);
3339 port = ehea_get_port(adapter, logical_port_id);
3342 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3347 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3350 pr_info("no logical port with id %d found\n", logical_port_id);
3354 if (ehea_add_adapter_mr(adapter)) {
3355 pr_err("creating MR failed\n");
3359 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3361 of_node_put(eth_dn);
3364 for (i = 0; i < EHEA_MAX_PORTS; i++)
3365 if (!adapter->port[i]) {
3366 adapter->port[i] = port;
3370 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3373 ehea_remove_adapter_mr(adapter);
3377 return (ssize_t) count;
3380 static ssize_t ehea_remove_port(struct device *dev,
3381 struct device_attribute *attr,
3382 const char *buf, size_t count)
3384 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3385 struct ehea_port *port;
3387 u32 logical_port_id;
3389 sscanf(buf, "%d", &logical_port_id);
3391 port = ehea_get_port(adapter, logical_port_id);
3394 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3397 ehea_shutdown_single_port(port);
3399 for (i = 0; i < EHEA_MAX_PORTS; i++)
3400 if (adapter->port[i] == port) {
3401 adapter->port[i] = NULL;
3405 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3410 ehea_remove_adapter_mr(adapter);
3412 return (ssize_t) count;
3415 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3416 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3418 int ehea_create_device_sysfs(struct platform_device *dev)
3420 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3424 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3429 void ehea_remove_device_sysfs(struct platform_device *dev)
3431 device_remove_file(&dev->dev, &dev_attr_probe_port);
3432 device_remove_file(&dev->dev, &dev_attr_remove_port);
3435 static int __devinit ehea_probe_adapter(struct platform_device *dev,
3436 const struct of_device_id *id)
3438 struct ehea_adapter *adapter;
3439 const u64 *adapter_handle;
3442 if (!dev || !dev->dev.of_node) {
3443 pr_err("Invalid ibmebus device probed\n");
3447 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3450 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3454 list_add(&adapter->list, &adapter_list);
3456 adapter->ofdev = dev;
3458 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3461 adapter->handle = *adapter_handle;
3463 if (!adapter->handle) {
3464 dev_err(&dev->dev, "failed getting handle for adapter"
3465 " '%s'\n", dev->dev.of_node->full_name);
3470 adapter->pd = EHEA_PD_ID;
3472 dev_set_drvdata(&dev->dev, adapter);
3475 /* initialize adapter and ports */
3476 /* get adapter properties */
3477 ret = ehea_sense_adapter_attr(adapter);
3479 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3483 adapter->neq = ehea_create_eq(adapter,
3484 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3485 if (!adapter->neq) {
3487 dev_err(&dev->dev, "NEQ creation failed\n");
3491 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3492 (unsigned long)adapter);
3494 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3495 ehea_interrupt_neq, IRQF_DISABLED,
3496 "ehea_neq", adapter);
3498 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3502 ret = ehea_create_device_sysfs(dev);
3506 ret = ehea_setup_ports(adapter);
3508 dev_err(&dev->dev, "setup_ports failed\n");
3509 goto out_rem_dev_sysfs;
3516 ehea_remove_device_sysfs(dev);
3519 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3522 ehea_destroy_eq(adapter->neq);
3525 list_del(&adapter->list);
3529 ehea_update_firmware_handles();
3534 static int __devexit ehea_remove(struct platform_device *dev)
3536 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3539 for (i = 0; i < EHEA_MAX_PORTS; i++)
3540 if (adapter->port[i]) {
3541 ehea_shutdown_single_port(adapter->port[i]);
3542 adapter->port[i] = NULL;
3545 ehea_remove_device_sysfs(dev);
3547 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3548 tasklet_kill(&adapter->neq_tasklet);
3550 ehea_destroy_eq(adapter->neq);
3551 ehea_remove_adapter_mr(adapter);
3552 list_del(&adapter->list);
3555 ehea_update_firmware_handles();
3560 void ehea_crash_handler(void)
3564 if (ehea_fw_handles.arr)
3565 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3566 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3567 ehea_fw_handles.arr[i].fwh,
3570 if (ehea_bcmc_regs.arr)
3571 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3572 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3573 ehea_bcmc_regs.arr[i].port_id,
3574 ehea_bcmc_regs.arr[i].reg_type,
3575 ehea_bcmc_regs.arr[i].macaddr,
3579 static int ehea_mem_notifier(struct notifier_block *nb,
3580 unsigned long action, void *data)
3582 int ret = NOTIFY_BAD;
3583 struct memory_notify *arg = data;
3585 mutex_lock(&dlpar_mem_lock);
3588 case MEM_CANCEL_OFFLINE:
3589 pr_info("memory offlining canceled");
3590 /* Readd canceled memory block */
3592 pr_info("memory is going online");
3593 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3594 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3598 case MEM_GOING_OFFLINE:
3599 pr_info("memory is going offline");
3600 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3601 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3609 ehea_update_firmware_handles();
3613 mutex_unlock(&dlpar_mem_lock);
3617 static struct notifier_block ehea_mem_nb = {
3618 .notifier_call = ehea_mem_notifier,
3621 static int ehea_reboot_notifier(struct notifier_block *nb,
3622 unsigned long action, void *unused)
3624 if (action == SYS_RESTART) {
3625 pr_info("Reboot: freeing all eHEA resources\n");
3626 ibmebus_unregister_driver(&ehea_driver);
3631 static struct notifier_block ehea_reboot_nb = {
3632 .notifier_call = ehea_reboot_notifier,
3635 static int check_module_parm(void)
3639 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3640 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3641 pr_info("Bad parameter: rq1_entries\n");
3644 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3645 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3646 pr_info("Bad parameter: rq2_entries\n");
3649 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3650 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3651 pr_info("Bad parameter: rq3_entries\n");
3654 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3655 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3656 pr_info("Bad parameter: sq_entries\n");
3663 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3666 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3669 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3670 ehea_show_capabilities, NULL);
3672 int __init ehea_module_init(void)
3676 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3678 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3679 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3681 mutex_init(&ehea_fw_handles.lock);
3682 spin_lock_init(&ehea_bcmc_regs.lock);
3684 ret = check_module_parm();
3688 ret = ehea_create_busmap();
3692 ret = register_reboot_notifier(&ehea_reboot_nb);
3694 pr_info("failed registering reboot notifier\n");
3696 ret = register_memory_notifier(&ehea_mem_nb);
3698 pr_info("failed registering memory remove notifier\n");
3700 ret = crash_shutdown_register(ehea_crash_handler);
3702 pr_info("failed registering crash handler\n");
3704 ret = ibmebus_register_driver(&ehea_driver);
3706 pr_err("failed registering eHEA device driver on ebus\n");
3710 ret = driver_create_file(&ehea_driver.driver,
3711 &driver_attr_capabilities);
3713 pr_err("failed to register capabilities attribute, ret=%d\n",
3721 ibmebus_unregister_driver(&ehea_driver);
3723 unregister_memory_notifier(&ehea_mem_nb);
3724 unregister_reboot_notifier(&ehea_reboot_nb);
3725 crash_shutdown_unregister(ehea_crash_handler);
3730 static void __exit ehea_module_exit(void)
3734 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3735 ibmebus_unregister_driver(&ehea_driver);
3736 unregister_reboot_notifier(&ehea_reboot_nb);
3737 ret = crash_shutdown_unregister(ehea_crash_handler);
3739 pr_info("failed unregistering crash handler\n");
3740 unregister_memory_notifier(&ehea_mem_nb);
3741 kfree(ehea_fw_handles.arr);
3742 kfree(ehea_bcmc_regs.arr);
3743 ehea_destroy_busmap();
3746 module_init(ehea_module_init);
3747 module_exit(ehea_module_exit);