ehea: Remove LRO support
[linux-flexiantxendom0-3.2.10.git] / drivers / net / ethernet / ibm / ehea / ehea_main.c
1 /*
2  *  linux/drivers/net/ehea/ehea_main.c
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/in.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
35 #include <linux/if.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/notifier.h>
40 #include <linux/reboot.h>
41 #include <linux/memory.h>
42 #include <asm/kexec.h>
43 #include <linux/mutex.h>
44 #include <linux/prefetch.h>
45
46 #include <net/ip.h>
47
48 #include "ehea.h"
49 #include "ehea_qmr.h"
50 #include "ehea_phyp.h"
51
52
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55 MODULE_DESCRIPTION("IBM eServer HEA Driver");
56 MODULE_VERSION(DRV_VERSION);
57
58
59 static int msg_level = -1;
60 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64 static int use_mcs = 1;
65 static int prop_carrier_state;
66
67 module_param(msg_level, int, 0);
68 module_param(rq1_entries, int, 0);
69 module_param(rq2_entries, int, 0);
70 module_param(rq3_entries, int, 0);
71 module_param(sq_entries, int, 0);
72 module_param(prop_carrier_state, int, 0);
73 module_param(use_mcs, int, 0);
74
75 MODULE_PARM_DESC(msg_level, "msg_level");
76 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
77                  "port to stack. 1:yes, 0:no.  Default = 0 ");
78 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
79                  "[2^x - 1], x = [6..14]. Default = "
80                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
81 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
82                  "[2^x - 1], x = [6..14]. Default = "
83                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
84 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
85                  "[2^x - 1], x = [6..14]. Default = "
86                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
87 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
88                  "[2^x - 1], x = [6..14]. Default = "
89                  __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
90 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
91                  "Default = 1");
92
93 static int port_name_cnt;
94 static LIST_HEAD(adapter_list);
95 static unsigned long ehea_driver_flags;
96 static DEFINE_MUTEX(dlpar_mem_lock);
97 struct ehea_fw_handle_array ehea_fw_handles;
98 struct ehea_bcmc_reg_array ehea_bcmc_regs;
99
100
101 static int __devinit ehea_probe_adapter(struct platform_device *dev,
102                                         const struct of_device_id *id);
103
104 static int __devexit ehea_remove(struct platform_device *dev);
105
106 static struct of_device_id ehea_device_table[] = {
107         {
108                 .name = "lhea",
109                 .compatible = "IBM,lhea",
110         },
111         {},
112 };
113 MODULE_DEVICE_TABLE(of, ehea_device_table);
114
115 static struct of_platform_driver ehea_driver = {
116         .driver = {
117                 .name = "ehea",
118                 .owner = THIS_MODULE,
119                 .of_match_table = ehea_device_table,
120         },
121         .probe = ehea_probe_adapter,
122         .remove = ehea_remove,
123 };
124
125 void ehea_dump(void *adr, int len, char *msg)
126 {
127         int x;
128         unsigned char *deb = adr;
129         for (x = 0; x < len; x += 16) {
130                 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
131                         msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
132                 deb += 16;
133         }
134 }
135
136 void ehea_schedule_port_reset(struct ehea_port *port)
137 {
138         if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
139                 schedule_work(&port->reset_task);
140 }
141
142 static void ehea_update_firmware_handles(void)
143 {
144         struct ehea_fw_handle_entry *arr = NULL;
145         struct ehea_adapter *adapter;
146         int num_adapters = 0;
147         int num_ports = 0;
148         int num_portres = 0;
149         int i = 0;
150         int num_fw_handles, k, l;
151
152         /* Determine number of handles */
153         mutex_lock(&ehea_fw_handles.lock);
154
155         list_for_each_entry(adapter, &adapter_list, list) {
156                 num_adapters++;
157
158                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
159                         struct ehea_port *port = adapter->port[k];
160
161                         if (!port || (port->state != EHEA_PORT_UP))
162                                 continue;
163
164                         num_ports++;
165                         num_portres += port->num_def_qps;
166                 }
167         }
168
169         num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
170                          num_ports * EHEA_NUM_PORT_FW_HANDLES +
171                          num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
172
173         if (num_fw_handles) {
174                 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
175                 if (!arr)
176                         goto out;  /* Keep the existing array */
177         } else
178                 goto out_update;
179
180         list_for_each_entry(adapter, &adapter_list, list) {
181                 if (num_adapters == 0)
182                         break;
183
184                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
185                         struct ehea_port *port = adapter->port[k];
186
187                         if (!port || (port->state != EHEA_PORT_UP) ||
188                             (num_ports == 0))
189                                 continue;
190
191                         for (l = 0; l < port->num_def_qps; l++) {
192                                 struct ehea_port_res *pr = &port->port_res[l];
193
194                                 arr[i].adh = adapter->handle;
195                                 arr[i++].fwh = pr->qp->fw_handle;
196                                 arr[i].adh = adapter->handle;
197                                 arr[i++].fwh = pr->send_cq->fw_handle;
198                                 arr[i].adh = adapter->handle;
199                                 arr[i++].fwh = pr->recv_cq->fw_handle;
200                                 arr[i].adh = adapter->handle;
201                                 arr[i++].fwh = pr->eq->fw_handle;
202                                 arr[i].adh = adapter->handle;
203                                 arr[i++].fwh = pr->send_mr.handle;
204                                 arr[i].adh = adapter->handle;
205                                 arr[i++].fwh = pr->recv_mr.handle;
206                         }
207                         arr[i].adh = adapter->handle;
208                         arr[i++].fwh = port->qp_eq->fw_handle;
209                         num_ports--;
210                 }
211
212                 arr[i].adh = adapter->handle;
213                 arr[i++].fwh = adapter->neq->fw_handle;
214
215                 if (adapter->mr.handle) {
216                         arr[i].adh = adapter->handle;
217                         arr[i++].fwh = adapter->mr.handle;
218                 }
219                 num_adapters--;
220         }
221
222 out_update:
223         kfree(ehea_fw_handles.arr);
224         ehea_fw_handles.arr = arr;
225         ehea_fw_handles.num_entries = i;
226 out:
227         mutex_unlock(&ehea_fw_handles.lock);
228 }
229
230 static void ehea_update_bcmc_registrations(void)
231 {
232         unsigned long flags;
233         struct ehea_bcmc_reg_entry *arr = NULL;
234         struct ehea_adapter *adapter;
235         struct ehea_mc_list *mc_entry;
236         int num_registrations = 0;
237         int i = 0;
238         int k;
239
240         spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
241
242         /* Determine number of registrations */
243         list_for_each_entry(adapter, &adapter_list, list)
244                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
245                         struct ehea_port *port = adapter->port[k];
246
247                         if (!port || (port->state != EHEA_PORT_UP))
248                                 continue;
249
250                         num_registrations += 2; /* Broadcast registrations */
251
252                         list_for_each_entry(mc_entry, &port->mc_list->list,list)
253                                 num_registrations += 2;
254                 }
255
256         if (num_registrations) {
257                 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
258                 if (!arr)
259                         goto out;  /* Keep the existing array */
260         } else
261                 goto out_update;
262
263         list_for_each_entry(adapter, &adapter_list, list) {
264                 for (k = 0; k < EHEA_MAX_PORTS; k++) {
265                         struct ehea_port *port = adapter->port[k];
266
267                         if (!port || (port->state != EHEA_PORT_UP))
268                                 continue;
269
270                         if (num_registrations == 0)
271                                 goto out_update;
272
273                         arr[i].adh = adapter->handle;
274                         arr[i].port_id = port->logical_port_id;
275                         arr[i].reg_type = EHEA_BCMC_BROADCAST |
276                                           EHEA_BCMC_UNTAGGED;
277                         arr[i++].macaddr = port->mac_addr;
278
279                         arr[i].adh = adapter->handle;
280                         arr[i].port_id = port->logical_port_id;
281                         arr[i].reg_type = EHEA_BCMC_BROADCAST |
282                                           EHEA_BCMC_VLANID_ALL;
283                         arr[i++].macaddr = port->mac_addr;
284                         num_registrations -= 2;
285
286                         list_for_each_entry(mc_entry,
287                                             &port->mc_list->list, list) {
288                                 if (num_registrations == 0)
289                                         goto out_update;
290
291                                 arr[i].adh = adapter->handle;
292                                 arr[i].port_id = port->logical_port_id;
293                                 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
294                                                   EHEA_BCMC_MULTICAST |
295                                                   EHEA_BCMC_UNTAGGED;
296                                 arr[i++].macaddr = mc_entry->macaddr;
297
298                                 arr[i].adh = adapter->handle;
299                                 arr[i].port_id = port->logical_port_id;
300                                 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
301                                                   EHEA_BCMC_MULTICAST |
302                                                   EHEA_BCMC_VLANID_ALL;
303                                 arr[i++].macaddr = mc_entry->macaddr;
304                                 num_registrations -= 2;
305                         }
306                 }
307         }
308
309 out_update:
310         kfree(ehea_bcmc_regs.arr);
311         ehea_bcmc_regs.arr = arr;
312         ehea_bcmc_regs.num_entries = i;
313 out:
314         spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
315 }
316
317 static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
318                                         struct rtnl_link_stats64 *stats)
319 {
320         struct ehea_port *port = netdev_priv(dev);
321         u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
322         int i;
323
324         for (i = 0; i < port->num_def_qps; i++) {
325                 rx_packets += port->port_res[i].rx_packets;
326                 rx_bytes   += port->port_res[i].rx_bytes;
327         }
328
329         for (i = 0; i < port->num_def_qps; i++) {
330                 tx_packets += port->port_res[i].tx_packets;
331                 tx_bytes   += port->port_res[i].tx_bytes;
332         }
333
334         stats->tx_packets = tx_packets;
335         stats->rx_bytes = rx_bytes;
336         stats->tx_bytes = tx_bytes;
337         stats->rx_packets = rx_packets;
338
339         return &port->stats;
340 }
341
342 static void ehea_update_stats(struct work_struct *work)
343 {
344         struct ehea_port *port =
345                 container_of(work, struct ehea_port, stats_work.work);
346         struct net_device *dev = port->netdev;
347         struct rtnl_link_stats64 *stats = &port->stats;
348         struct hcp_ehea_port_cb2 *cb2;
349         u64 hret;
350
351         cb2 = (void *)get_zeroed_page(GFP_KERNEL);
352         if (!cb2) {
353                 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
354                 goto resched;
355         }
356
357         hret = ehea_h_query_ehea_port(port->adapter->handle,
358                                       port->logical_port_id,
359                                       H_PORT_CB2, H_PORT_CB2_ALL, cb2);
360         if (hret != H_SUCCESS) {
361                 netdev_err(dev, "query_ehea_port failed\n");
362                 goto out_herr;
363         }
364
365         if (netif_msg_hw(port))
366                 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
367
368         stats->multicast = cb2->rxmcp;
369         stats->rx_errors = cb2->rxuerr;
370
371 out_herr:
372         free_page((unsigned long)cb2);
373 resched:
374         schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
375 }
376
377 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
378 {
379         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
380         struct net_device *dev = pr->port->netdev;
381         int max_index_mask = pr->rq1_skba.len - 1;
382         int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
383         int adder = 0;
384         int i;
385
386         pr->rq1_skba.os_skbs = 0;
387
388         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
389                 if (nr_of_wqes > 0)
390                         pr->rq1_skba.index = index;
391                 pr->rq1_skba.os_skbs = fill_wqes;
392                 return;
393         }
394
395         for (i = 0; i < fill_wqes; i++) {
396                 if (!skb_arr_rq1[index]) {
397                         skb_arr_rq1[index] = netdev_alloc_skb(dev,
398                                                               EHEA_L_PKT_SIZE);
399                         if (!skb_arr_rq1[index]) {
400                                 netdev_info(dev, "Unable to allocate enough skb in the array\n");
401                                 pr->rq1_skba.os_skbs = fill_wqes - i;
402                                 break;
403                         }
404                 }
405                 index--;
406                 index &= max_index_mask;
407                 adder++;
408         }
409
410         if (adder == 0)
411                 return;
412
413         /* Ring doorbell */
414         ehea_update_rq1a(pr->qp, adder);
415 }
416
417 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
418 {
419         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
420         struct net_device *dev = pr->port->netdev;
421         int i;
422
423         if (nr_rq1a > pr->rq1_skba.len) {
424                 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
425                 return;
426         }
427
428         for (i = 0; i < nr_rq1a; i++) {
429                 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
430                 if (!skb_arr_rq1[i]) {
431                         netdev_info(dev, "Not enough memory to allocate skb array\n");
432                         break;
433                 }
434         }
435         /* Ring doorbell */
436         ehea_update_rq1a(pr->qp, i - 1);
437 }
438
439 static int ehea_refill_rq_def(struct ehea_port_res *pr,
440                               struct ehea_q_skb_arr *q_skba, int rq_nr,
441                               int num_wqes, int wqe_type, int packet_size)
442 {
443         struct net_device *dev = pr->port->netdev;
444         struct ehea_qp *qp = pr->qp;
445         struct sk_buff **skb_arr = q_skba->arr;
446         struct ehea_rwqe *rwqe;
447         int i, index, max_index_mask, fill_wqes;
448         int adder = 0;
449         int ret = 0;
450
451         fill_wqes = q_skba->os_skbs + num_wqes;
452         q_skba->os_skbs = 0;
453
454         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
455                 q_skba->os_skbs = fill_wqes;
456                 return ret;
457         }
458
459         index = q_skba->index;
460         max_index_mask = q_skba->len - 1;
461         for (i = 0; i < fill_wqes; i++) {
462                 u64 tmp_addr;
463                 struct sk_buff *skb;
464
465                 skb = netdev_alloc_skb_ip_align(dev, packet_size);
466                 if (!skb) {
467                         q_skba->os_skbs = fill_wqes - i;
468                         if (q_skba->os_skbs == q_skba->len - 2) {
469                                 netdev_info(pr->port->netdev,
470                                             "rq%i ran dry - no mem for skb\n",
471                                             rq_nr);
472                                 ret = -ENOMEM;
473                         }
474                         break;
475                 }
476
477                 skb_arr[index] = skb;
478                 tmp_addr = ehea_map_vaddr(skb->data);
479                 if (tmp_addr == -1) {
480                         dev_kfree_skb(skb);
481                         q_skba->os_skbs = fill_wqes - i;
482                         ret = 0;
483                         break;
484                 }
485
486                 rwqe = ehea_get_next_rwqe(qp, rq_nr);
487                 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
488                             | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
489                 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
490                 rwqe->sg_list[0].vaddr = tmp_addr;
491                 rwqe->sg_list[0].len = packet_size;
492                 rwqe->data_segments = 1;
493
494                 index++;
495                 index &= max_index_mask;
496                 adder++;
497         }
498
499         q_skba->index = index;
500         if (adder == 0)
501                 goto out;
502
503         /* Ring doorbell */
504         iosync();
505         if (rq_nr == 2)
506                 ehea_update_rq2a(pr->qp, adder);
507         else
508                 ehea_update_rq3a(pr->qp, adder);
509 out:
510         return ret;
511 }
512
513
514 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
515 {
516         return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
517                                   nr_of_wqes, EHEA_RWQE2_TYPE,
518                                   EHEA_RQ2_PKT_SIZE);
519 }
520
521
522 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
523 {
524         return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
525                                   nr_of_wqes, EHEA_RWQE3_TYPE,
526                                   EHEA_MAX_PACKET_SIZE);
527 }
528
529 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
530 {
531         *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
532         if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
533                 return 0;
534         if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
535             (cqe->header_length == 0))
536                 return 0;
537         return -EINVAL;
538 }
539
540 static inline void ehea_fill_skb(struct net_device *dev,
541                                  struct sk_buff *skb, struct ehea_cqe *cqe,
542                                  struct ehea_port_res *pr)
543 {
544         int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
545
546         skb_put(skb, length);
547         skb->protocol = eth_type_trans(skb, dev);
548
549         /* The packet was not an IPV4 packet so a complemented checksum was
550            calculated. The value is found in the Internet Checksum field. */
551         if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
552                 skb->ip_summed = CHECKSUM_COMPLETE;
553                 skb->csum = csum_unfold(~cqe->inet_checksum_value);
554         } else
555                 skb->ip_summed = CHECKSUM_UNNECESSARY;
556
557         skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
558 }
559
560 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
561                                                int arr_len,
562                                                struct ehea_cqe *cqe)
563 {
564         int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
565         struct sk_buff *skb;
566         void *pref;
567         int x;
568
569         x = skb_index + 1;
570         x &= (arr_len - 1);
571
572         pref = skb_array[x];
573         if (pref) {
574                 prefetchw(pref);
575                 prefetchw(pref + EHEA_CACHE_LINE);
576
577                 pref = (skb_array[x]->data);
578                 prefetch(pref);
579                 prefetch(pref + EHEA_CACHE_LINE);
580                 prefetch(pref + EHEA_CACHE_LINE * 2);
581                 prefetch(pref + EHEA_CACHE_LINE * 3);
582         }
583
584         skb = skb_array[skb_index];
585         skb_array[skb_index] = NULL;
586         return skb;
587 }
588
589 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
590                                                   int arr_len, int wqe_index)
591 {
592         struct sk_buff *skb;
593         void *pref;
594         int x;
595
596         x = wqe_index + 1;
597         x &= (arr_len - 1);
598
599         pref = skb_array[x];
600         if (pref) {
601                 prefetchw(pref);
602                 prefetchw(pref + EHEA_CACHE_LINE);
603
604                 pref = (skb_array[x]->data);
605                 prefetchw(pref);
606                 prefetchw(pref + EHEA_CACHE_LINE);
607         }
608
609         skb = skb_array[wqe_index];
610         skb_array[wqe_index] = NULL;
611         return skb;
612 }
613
614 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
615                                  struct ehea_cqe *cqe, int *processed_rq2,
616                                  int *processed_rq3)
617 {
618         struct sk_buff *skb;
619
620         if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
621                 pr->p_stats.err_tcp_cksum++;
622         if (cqe->status & EHEA_CQE_STAT_ERR_IP)
623                 pr->p_stats.err_ip_cksum++;
624         if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
625                 pr->p_stats.err_frame_crc++;
626
627         if (rq == 2) {
628                 *processed_rq2 += 1;
629                 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
630                 dev_kfree_skb(skb);
631         } else if (rq == 3) {
632                 *processed_rq3 += 1;
633                 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
634                 dev_kfree_skb(skb);
635         }
636
637         if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
638                 if (netif_msg_rx_err(pr->port)) {
639                         pr_err("Critical receive error for QP %d. Resetting port.\n",
640                                pr->qp->init_attr.qp_nr);
641                         ehea_dump(cqe, sizeof(*cqe), "CQE");
642                 }
643                 ehea_schedule_port_reset(pr->port);
644                 return 1;
645         }
646
647         return 0;
648 }
649
650 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
651                           struct sk_buff *skb)
652 {
653         if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
654                 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
655
656         netif_receive_skb(skb);
657 }
658
659 static int ehea_proc_rwqes(struct net_device *dev,
660                            struct ehea_port_res *pr,
661                            int budget)
662 {
663         struct ehea_port *port = pr->port;
664         struct ehea_qp *qp = pr->qp;
665         struct ehea_cqe *cqe;
666         struct sk_buff *skb;
667         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
668         struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
669         struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
670         int skb_arr_rq1_len = pr->rq1_skba.len;
671         int skb_arr_rq2_len = pr->rq2_skba.len;
672         int skb_arr_rq3_len = pr->rq3_skba.len;
673         int processed, processed_rq1, processed_rq2, processed_rq3;
674         u64 processed_bytes = 0;
675         int wqe_index, last_wqe_index, rq, port_reset;
676
677         processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
678         last_wqe_index = 0;
679
680         cqe = ehea_poll_rq1(qp, &wqe_index);
681         while ((processed < budget) && cqe) {
682                 ehea_inc_rq1(qp);
683                 processed_rq1++;
684                 processed++;
685                 if (netif_msg_rx_status(port))
686                         ehea_dump(cqe, sizeof(*cqe), "CQE");
687
688                 last_wqe_index = wqe_index;
689                 rmb();
690                 if (!ehea_check_cqe(cqe, &rq)) {
691                         if (rq == 1) {
692                                 /* LL RQ1 */
693                                 skb = get_skb_by_index_ll(skb_arr_rq1,
694                                                           skb_arr_rq1_len,
695                                                           wqe_index);
696                                 if (unlikely(!skb)) {
697                                         netif_info(port, rx_err, dev,
698                                                   "LL rq1: skb=NULL\n");
699
700                                         skb = netdev_alloc_skb(dev,
701                                                                EHEA_L_PKT_SIZE);
702                                         if (!skb) {
703                                                 netdev_err(dev, "Not enough memory to allocate skb\n");
704                                                 break;
705                                         }
706                                 }
707                                 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
708                                                  cqe->num_bytes_transfered - 4);
709                                 ehea_fill_skb(dev, skb, cqe, pr);
710                         } else if (rq == 2) {
711                                 /* RQ2 */
712                                 skb = get_skb_by_index(skb_arr_rq2,
713                                                        skb_arr_rq2_len, cqe);
714                                 if (unlikely(!skb)) {
715                                         netif_err(port, rx_err, dev,
716                                                   "rq2: skb=NULL\n");
717                                         break;
718                                 }
719                                 ehea_fill_skb(dev, skb, cqe, pr);
720                                 processed_rq2++;
721                         } else {
722                                 /* RQ3 */
723                                 skb = get_skb_by_index(skb_arr_rq3,
724                                                        skb_arr_rq3_len, cqe);
725                                 if (unlikely(!skb)) {
726                                         netif_err(port, rx_err, dev,
727                                                   "rq3: skb=NULL\n");
728                                         break;
729                                 }
730                                 ehea_fill_skb(dev, skb, cqe, pr);
731                                 processed_rq3++;
732                         }
733
734                         processed_bytes += skb->len;
735                         ehea_proc_skb(pr, cqe, skb);
736                 } else {
737                         pr->p_stats.poll_receive_errors++;
738                         port_reset = ehea_treat_poll_error(pr, rq, cqe,
739                                                            &processed_rq2,
740                                                            &processed_rq3);
741                         if (port_reset)
742                                 break;
743                 }
744                 cqe = ehea_poll_rq1(qp, &wqe_index);
745         }
746
747         pr->rx_packets += processed;
748         pr->rx_bytes += processed_bytes;
749
750         ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
751         ehea_refill_rq2(pr, processed_rq2);
752         ehea_refill_rq3(pr, processed_rq3);
753
754         return processed;
755 }
756
757 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
758
759 static void reset_sq_restart_flag(struct ehea_port *port)
760 {
761         int i;
762
763         for (i = 0; i < port->num_def_qps; i++) {
764                 struct ehea_port_res *pr = &port->port_res[i];
765                 pr->sq_restart_flag = 0;
766         }
767         wake_up(&port->restart_wq);
768 }
769
770 static void check_sqs(struct ehea_port *port)
771 {
772         struct ehea_swqe *swqe;
773         int swqe_index;
774         int i, k;
775
776         for (i = 0; i < port->num_def_qps; i++) {
777                 struct ehea_port_res *pr = &port->port_res[i];
778                 int ret;
779                 k = 0;
780                 swqe = ehea_get_swqe(pr->qp, &swqe_index);
781                 memset(swqe, 0, SWQE_HEADER_SIZE);
782                 atomic_dec(&pr->swqe_avail);
783
784                 swqe->tx_control |= EHEA_SWQE_PURGE;
785                 swqe->wr_id = SWQE_RESTART_CHECK;
786                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
787                 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
788                 swqe->immediate_data_length = 80;
789
790                 ehea_post_swqe(pr->qp, swqe);
791
792                 ret = wait_event_timeout(port->restart_wq,
793                                          pr->sq_restart_flag == 0,
794                                          msecs_to_jiffies(100));
795
796                 if (!ret) {
797                         pr_err("HW/SW queues out of sync\n");
798                         ehea_schedule_port_reset(pr->port);
799                         return;
800                 }
801         }
802 }
803
804
805 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
806 {
807         struct sk_buff *skb;
808         struct ehea_cq *send_cq = pr->send_cq;
809         struct ehea_cqe *cqe;
810         int quota = my_quota;
811         int cqe_counter = 0;
812         int swqe_av = 0;
813         int index;
814         struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
815                                                 pr - &pr->port->port_res[0]);
816
817         cqe = ehea_poll_cq(send_cq);
818         while (cqe && (quota > 0)) {
819                 ehea_inc_cq(send_cq);
820
821                 cqe_counter++;
822                 rmb();
823
824                 if (cqe->wr_id == SWQE_RESTART_CHECK) {
825                         pr->sq_restart_flag = 1;
826                         swqe_av++;
827                         break;
828                 }
829
830                 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
831                         pr_err("Bad send completion status=0x%04X\n",
832                                cqe->status);
833
834                         if (netif_msg_tx_err(pr->port))
835                                 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
836
837                         if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
838                                 pr_err("Resetting port\n");
839                                 ehea_schedule_port_reset(pr->port);
840                                 break;
841                         }
842                 }
843
844                 if (netif_msg_tx_done(pr->port))
845                         ehea_dump(cqe, sizeof(*cqe), "CQE");
846
847                 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
848                            == EHEA_SWQE2_TYPE)) {
849
850                         index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
851                         skb = pr->sq_skba.arr[index];
852                         dev_kfree_skb(skb);
853                         pr->sq_skba.arr[index] = NULL;
854                 }
855
856                 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
857                 quota--;
858
859                 cqe = ehea_poll_cq(send_cq);
860         }
861
862         ehea_update_feca(send_cq, cqe_counter);
863         atomic_add(swqe_av, &pr->swqe_avail);
864
865         if (unlikely(netif_tx_queue_stopped(txq) &&
866                      (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
867                 __netif_tx_lock(txq, smp_processor_id());
868                 if (netif_tx_queue_stopped(txq) &&
869                     (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
870                         netif_tx_wake_queue(txq);
871                 __netif_tx_unlock(txq);
872         }
873
874         wake_up(&pr->port->swqe_avail_wq);
875
876         return cqe;
877 }
878
879 #define EHEA_POLL_MAX_CQES 65535
880
881 static int ehea_poll(struct napi_struct *napi, int budget)
882 {
883         struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
884                                                 napi);
885         struct net_device *dev = pr->port->netdev;
886         struct ehea_cqe *cqe;
887         struct ehea_cqe *cqe_skb = NULL;
888         int wqe_index;
889         int rx = 0;
890
891         cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
892         rx += ehea_proc_rwqes(dev, pr, budget - rx);
893
894         while (rx != budget) {
895                 napi_complete(napi);
896                 ehea_reset_cq_ep(pr->recv_cq);
897                 ehea_reset_cq_ep(pr->send_cq);
898                 ehea_reset_cq_n1(pr->recv_cq);
899                 ehea_reset_cq_n1(pr->send_cq);
900                 rmb();
901                 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
902                 cqe_skb = ehea_poll_cq(pr->send_cq);
903
904                 if (!cqe && !cqe_skb)
905                         return rx;
906
907                 if (!napi_reschedule(napi))
908                         return rx;
909
910                 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
911                 rx += ehea_proc_rwqes(dev, pr, budget - rx);
912         }
913
914         return rx;
915 }
916
917 #ifdef CONFIG_NET_POLL_CONTROLLER
918 static void ehea_netpoll(struct net_device *dev)
919 {
920         struct ehea_port *port = netdev_priv(dev);
921         int i;
922
923         for (i = 0; i < port->num_def_qps; i++)
924                 napi_schedule(&port->port_res[i].napi);
925 }
926 #endif
927
928 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
929 {
930         struct ehea_port_res *pr = param;
931
932         napi_schedule(&pr->napi);
933
934         return IRQ_HANDLED;
935 }
936
937 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
938 {
939         struct ehea_port *port = param;
940         struct ehea_eqe *eqe;
941         struct ehea_qp *qp;
942         u32 qp_token;
943         u64 resource_type, aer, aerr;
944         int reset_port = 0;
945
946         eqe = ehea_poll_eq(port->qp_eq);
947
948         while (eqe) {
949                 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
950                 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
951                        eqe->entry, qp_token);
952
953                 qp = port->port_res[qp_token].qp;
954
955                 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
956                                                 &aer, &aerr);
957
958                 if (resource_type == EHEA_AER_RESTYPE_QP) {
959                         if ((aer & EHEA_AER_RESET_MASK) ||
960                             (aerr & EHEA_AERR_RESET_MASK))
961                                  reset_port = 1;
962                 } else
963                         reset_port = 1;   /* Reset in case of CQ or EQ error */
964
965                 eqe = ehea_poll_eq(port->qp_eq);
966         }
967
968         if (reset_port) {
969                 pr_err("Resetting port\n");
970                 ehea_schedule_port_reset(port);
971         }
972
973         return IRQ_HANDLED;
974 }
975
976 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
977                                        int logical_port)
978 {
979         int i;
980
981         for (i = 0; i < EHEA_MAX_PORTS; i++)
982                 if (adapter->port[i])
983                         if (adapter->port[i]->logical_port_id == logical_port)
984                                 return adapter->port[i];
985         return NULL;
986 }
987
988 int ehea_sense_port_attr(struct ehea_port *port)
989 {
990         int ret;
991         u64 hret;
992         struct hcp_ehea_port_cb0 *cb0;
993
994         /* may be called via ehea_neq_tasklet() */
995         cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
996         if (!cb0) {
997                 pr_err("no mem for cb0\n");
998                 ret = -ENOMEM;
999                 goto out;
1000         }
1001
1002         hret = ehea_h_query_ehea_port(port->adapter->handle,
1003                                       port->logical_port_id, H_PORT_CB0,
1004                                       EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1005                                       cb0);
1006         if (hret != H_SUCCESS) {
1007                 ret = -EIO;
1008                 goto out_free;
1009         }
1010
1011         /* MAC address */
1012         port->mac_addr = cb0->port_mac_addr << 16;
1013
1014         if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1015                 ret = -EADDRNOTAVAIL;
1016                 goto out_free;
1017         }
1018
1019         /* Port speed */
1020         switch (cb0->port_speed) {
1021         case H_SPEED_10M_H:
1022                 port->port_speed = EHEA_SPEED_10M;
1023                 port->full_duplex = 0;
1024                 break;
1025         case H_SPEED_10M_F:
1026                 port->port_speed = EHEA_SPEED_10M;
1027                 port->full_duplex = 1;
1028                 break;
1029         case H_SPEED_100M_H:
1030                 port->port_speed = EHEA_SPEED_100M;
1031                 port->full_duplex = 0;
1032                 break;
1033         case H_SPEED_100M_F:
1034                 port->port_speed = EHEA_SPEED_100M;
1035                 port->full_duplex = 1;
1036                 break;
1037         case H_SPEED_1G_F:
1038                 port->port_speed = EHEA_SPEED_1G;
1039                 port->full_duplex = 1;
1040                 break;
1041         case H_SPEED_10G_F:
1042                 port->port_speed = EHEA_SPEED_10G;
1043                 port->full_duplex = 1;
1044                 break;
1045         default:
1046                 port->port_speed = 0;
1047                 port->full_duplex = 0;
1048                 break;
1049         }
1050
1051         port->autoneg = 1;
1052         port->num_mcs = cb0->num_default_qps;
1053
1054         /* Number of default QPs */
1055         if (use_mcs)
1056                 port->num_def_qps = cb0->num_default_qps;
1057         else
1058                 port->num_def_qps = 1;
1059
1060         if (!port->num_def_qps) {
1061                 ret = -EINVAL;
1062                 goto out_free;
1063         }
1064
1065         ret = 0;
1066 out_free:
1067         if (ret || netif_msg_probe(port))
1068                 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1069         free_page((unsigned long)cb0);
1070 out:
1071         return ret;
1072 }
1073
1074 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1075 {
1076         struct hcp_ehea_port_cb4 *cb4;
1077         u64 hret;
1078         int ret = 0;
1079
1080         cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1081         if (!cb4) {
1082                 pr_err("no mem for cb4\n");
1083                 ret = -ENOMEM;
1084                 goto out;
1085         }
1086
1087         cb4->port_speed = port_speed;
1088
1089         netif_carrier_off(port->netdev);
1090
1091         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1092                                        port->logical_port_id,
1093                                        H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1094         if (hret == H_SUCCESS) {
1095                 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1096
1097                 hret = ehea_h_query_ehea_port(port->adapter->handle,
1098                                               port->logical_port_id,
1099                                               H_PORT_CB4, H_PORT_CB4_SPEED,
1100                                               cb4);
1101                 if (hret == H_SUCCESS) {
1102                         switch (cb4->port_speed) {
1103                         case H_SPEED_10M_H:
1104                                 port->port_speed = EHEA_SPEED_10M;
1105                                 port->full_duplex = 0;
1106                                 break;
1107                         case H_SPEED_10M_F:
1108                                 port->port_speed = EHEA_SPEED_10M;
1109                                 port->full_duplex = 1;
1110                                 break;
1111                         case H_SPEED_100M_H:
1112                                 port->port_speed = EHEA_SPEED_100M;
1113                                 port->full_duplex = 0;
1114                                 break;
1115                         case H_SPEED_100M_F:
1116                                 port->port_speed = EHEA_SPEED_100M;
1117                                 port->full_duplex = 1;
1118                                 break;
1119                         case H_SPEED_1G_F:
1120                                 port->port_speed = EHEA_SPEED_1G;
1121                                 port->full_duplex = 1;
1122                                 break;
1123                         case H_SPEED_10G_F:
1124                                 port->port_speed = EHEA_SPEED_10G;
1125                                 port->full_duplex = 1;
1126                                 break;
1127                         default:
1128                                 port->port_speed = 0;
1129                                 port->full_duplex = 0;
1130                                 break;
1131                         }
1132                 } else {
1133                         pr_err("Failed sensing port speed\n");
1134                         ret = -EIO;
1135                 }
1136         } else {
1137                 if (hret == H_AUTHORITY) {
1138                         pr_info("Hypervisor denied setting port speed\n");
1139                         ret = -EPERM;
1140                 } else {
1141                         ret = -EIO;
1142                         pr_err("Failed setting port speed\n");
1143                 }
1144         }
1145         if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1146                 netif_carrier_on(port->netdev);
1147
1148         free_page((unsigned long)cb4);
1149 out:
1150         return ret;
1151 }
1152
1153 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1154 {
1155         int ret;
1156         u8 ec;
1157         u8 portnum;
1158         struct ehea_port *port;
1159         struct net_device *dev;
1160
1161         ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1162         portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1163         port = ehea_get_port(adapter, portnum);
1164         dev = port->netdev;
1165
1166         switch (ec) {
1167         case EHEA_EC_PORTSTATE_CHG:     /* port state change */
1168
1169                 if (!port) {
1170                         netdev_err(dev, "unknown portnum %x\n", portnum);
1171                         break;
1172                 }
1173
1174                 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1175                         if (!netif_carrier_ok(dev)) {
1176                                 ret = ehea_sense_port_attr(port);
1177                                 if (ret) {
1178                                         netdev_err(dev, "failed resensing port attributes\n");
1179                                         break;
1180                                 }
1181
1182                                 netif_info(port, link, dev,
1183                                            "Logical port up: %dMbps %s Duplex\n",
1184                                            port->port_speed,
1185                                            port->full_duplex == 1 ?
1186                                            "Full" : "Half");
1187
1188                                 netif_carrier_on(dev);
1189                                 netif_wake_queue(dev);
1190                         }
1191                 } else
1192                         if (netif_carrier_ok(dev)) {
1193                                 netif_info(port, link, dev,
1194                                            "Logical port down\n");
1195                                 netif_carrier_off(dev);
1196                                 netif_tx_disable(dev);
1197                         }
1198
1199                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1200                         port->phy_link = EHEA_PHY_LINK_UP;
1201                         netif_info(port, link, dev,
1202                                    "Physical port up\n");
1203                         if (prop_carrier_state)
1204                                 netif_carrier_on(dev);
1205                 } else {
1206                         port->phy_link = EHEA_PHY_LINK_DOWN;
1207                         netif_info(port, link, dev,
1208                                    "Physical port down\n");
1209                         if (prop_carrier_state)
1210                                 netif_carrier_off(dev);
1211                 }
1212
1213                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1214                         netdev_info(dev,
1215                                     "External switch port is primary port\n");
1216                 else
1217                         netdev_info(dev,
1218                                     "External switch port is backup port\n");
1219
1220                 break;
1221         case EHEA_EC_ADAPTER_MALFUNC:
1222                 netdev_err(dev, "Adapter malfunction\n");
1223                 break;
1224         case EHEA_EC_PORT_MALFUNC:
1225                 netdev_info(dev, "Port malfunction\n");
1226                 netif_carrier_off(dev);
1227                 netif_tx_disable(dev);
1228                 break;
1229         default:
1230                 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1231                 break;
1232         }
1233 }
1234
1235 static void ehea_neq_tasklet(unsigned long data)
1236 {
1237         struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1238         struct ehea_eqe *eqe;
1239         u64 event_mask;
1240
1241         eqe = ehea_poll_eq(adapter->neq);
1242         pr_debug("eqe=%p\n", eqe);
1243
1244         while (eqe) {
1245                 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1246                 ehea_parse_eqe(adapter, eqe->entry);
1247                 eqe = ehea_poll_eq(adapter->neq);
1248                 pr_debug("next eqe=%p\n", eqe);
1249         }
1250
1251         event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1252                    | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1253                    | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1254
1255         ehea_h_reset_events(adapter->handle,
1256                             adapter->neq->fw_handle, event_mask);
1257 }
1258
1259 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1260 {
1261         struct ehea_adapter *adapter = param;
1262         tasklet_hi_schedule(&adapter->neq_tasklet);
1263         return IRQ_HANDLED;
1264 }
1265
1266
1267 static int ehea_fill_port_res(struct ehea_port_res *pr)
1268 {
1269         int ret;
1270         struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1271
1272         ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1273
1274         ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1275
1276         ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1277
1278         return ret;
1279 }
1280
1281 static int ehea_reg_interrupts(struct net_device *dev)
1282 {
1283         struct ehea_port *port = netdev_priv(dev);
1284         struct ehea_port_res *pr;
1285         int i, ret;
1286
1287
1288         snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1289                  dev->name);
1290
1291         ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1292                                   ehea_qp_aff_irq_handler,
1293                                   IRQF_DISABLED, port->int_aff_name, port);
1294         if (ret) {
1295                 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1296                            port->qp_eq->attr.ist1);
1297                 goto out_free_qpeq;
1298         }
1299
1300         netif_info(port, ifup, dev,
1301                    "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1302                    port->qp_eq->attr.ist1);
1303
1304
1305         for (i = 0; i < port->num_def_qps; i++) {
1306                 pr = &port->port_res[i];
1307                 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1308                          "%s-queue%d", dev->name, i);
1309                 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1310                                           ehea_recv_irq_handler,
1311                                           IRQF_DISABLED, pr->int_send_name,
1312                                           pr);
1313                 if (ret) {
1314                         netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1315                                    i, pr->eq->attr.ist1);
1316                         goto out_free_req;
1317                 }
1318                 netif_info(port, ifup, dev,
1319                            "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1320                            pr->eq->attr.ist1, i);
1321         }
1322 out:
1323         return ret;
1324
1325
1326 out_free_req:
1327         while (--i >= 0) {
1328                 u32 ist = port->port_res[i].eq->attr.ist1;
1329                 ibmebus_free_irq(ist, &port->port_res[i]);
1330         }
1331
1332 out_free_qpeq:
1333         ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1334         i = port->num_def_qps;
1335
1336         goto out;
1337
1338 }
1339
1340 static void ehea_free_interrupts(struct net_device *dev)
1341 {
1342         struct ehea_port *port = netdev_priv(dev);
1343         struct ehea_port_res *pr;
1344         int i;
1345
1346         /* send */
1347
1348         for (i = 0; i < port->num_def_qps; i++) {
1349                 pr = &port->port_res[i];
1350                 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1351                 netif_info(port, intr, dev,
1352                            "free send irq for res %d with handle 0x%X\n",
1353                            i, pr->eq->attr.ist1);
1354         }
1355
1356         /* associated events */
1357         ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1358         netif_info(port, intr, dev,
1359                    "associated event interrupt for handle 0x%X freed\n",
1360                    port->qp_eq->attr.ist1);
1361 }
1362
1363 static int ehea_configure_port(struct ehea_port *port)
1364 {
1365         int ret, i;
1366         u64 hret, mask;
1367         struct hcp_ehea_port_cb0 *cb0;
1368
1369         ret = -ENOMEM;
1370         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1371         if (!cb0)
1372                 goto out;
1373
1374         cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1375                      | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1376                      | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1377                      | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1378                      | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1379                                       PXLY_RC_VLAN_FILTER)
1380                      | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1381
1382         for (i = 0; i < port->num_mcs; i++)
1383                 if (use_mcs)
1384                         cb0->default_qpn_arr[i] =
1385                                 port->port_res[i].qp->init_attr.qp_nr;
1386                 else
1387                         cb0->default_qpn_arr[i] =
1388                                 port->port_res[0].qp->init_attr.qp_nr;
1389
1390         if (netif_msg_ifup(port))
1391                 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1392
1393         mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1394              | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1395
1396         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1397                                        port->logical_port_id,
1398                                        H_PORT_CB0, mask, cb0);
1399         ret = -EIO;
1400         if (hret != H_SUCCESS)
1401                 goto out_free;
1402
1403         ret = 0;
1404
1405 out_free:
1406         free_page((unsigned long)cb0);
1407 out:
1408         return ret;
1409 }
1410
1411 int ehea_gen_smrs(struct ehea_port_res *pr)
1412 {
1413         int ret;
1414         struct ehea_adapter *adapter = pr->port->adapter;
1415
1416         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1417         if (ret)
1418                 goto out;
1419
1420         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1421         if (ret)
1422                 goto out_free;
1423
1424         return 0;
1425
1426 out_free:
1427         ehea_rem_mr(&pr->send_mr);
1428 out:
1429         pr_err("Generating SMRS failed\n");
1430         return -EIO;
1431 }
1432
1433 int ehea_rem_smrs(struct ehea_port_res *pr)
1434 {
1435         if ((ehea_rem_mr(&pr->send_mr)) ||
1436             (ehea_rem_mr(&pr->recv_mr)))
1437                 return -EIO;
1438         else
1439                 return 0;
1440 }
1441
1442 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1443 {
1444         int arr_size = sizeof(void *) * max_q_entries;
1445
1446         q_skba->arr = vzalloc(arr_size);
1447         if (!q_skba->arr)
1448                 return -ENOMEM;
1449
1450         q_skba->len = max_q_entries;
1451         q_skba->index = 0;
1452         q_skba->os_skbs = 0;
1453
1454         return 0;
1455 }
1456
1457 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1458                               struct port_res_cfg *pr_cfg, int queue_token)
1459 {
1460         struct ehea_adapter *adapter = port->adapter;
1461         enum ehea_eq_type eq_type = EHEA_EQ;
1462         struct ehea_qp_init_attr *init_attr = NULL;
1463         int ret = -EIO;
1464         u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1465
1466         tx_bytes = pr->tx_bytes;
1467         tx_packets = pr->tx_packets;
1468         rx_bytes = pr->rx_bytes;
1469         rx_packets = pr->rx_packets;
1470
1471         memset(pr, 0, sizeof(struct ehea_port_res));
1472
1473         pr->tx_bytes = rx_bytes;
1474         pr->tx_packets = tx_packets;
1475         pr->rx_bytes = rx_bytes;
1476         pr->rx_packets = rx_packets;
1477
1478         pr->port = port;
1479
1480         pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1481         if (!pr->eq) {
1482                 pr_err("create_eq failed (eq)\n");
1483                 goto out_free;
1484         }
1485
1486         pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1487                                      pr->eq->fw_handle,
1488                                      port->logical_port_id);
1489         if (!pr->recv_cq) {
1490                 pr_err("create_cq failed (cq_recv)\n");
1491                 goto out_free;
1492         }
1493
1494         pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1495                                      pr->eq->fw_handle,
1496                                      port->logical_port_id);
1497         if (!pr->send_cq) {
1498                 pr_err("create_cq failed (cq_send)\n");
1499                 goto out_free;
1500         }
1501
1502         if (netif_msg_ifup(port))
1503                 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1504                         pr->send_cq->attr.act_nr_of_cqes,
1505                         pr->recv_cq->attr.act_nr_of_cqes);
1506
1507         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1508         if (!init_attr) {
1509                 ret = -ENOMEM;
1510                 pr_err("no mem for ehea_qp_init_attr\n");
1511                 goto out_free;
1512         }
1513
1514         init_attr->low_lat_rq1 = 1;
1515         init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1516         init_attr->rq_count = 3;
1517         init_attr->qp_token = queue_token;
1518         init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1519         init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1520         init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1521         init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1522         init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1523         init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1524         init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1525         init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1526         init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1527         init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1528         init_attr->port_nr = port->logical_port_id;
1529         init_attr->send_cq_handle = pr->send_cq->fw_handle;
1530         init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1531         init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1532
1533         pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1534         if (!pr->qp) {
1535                 pr_err("create_qp failed\n");
1536                 ret = -EIO;
1537                 goto out_free;
1538         }
1539
1540         if (netif_msg_ifup(port))
1541                 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1542                         init_attr->qp_nr,
1543                         init_attr->act_nr_send_wqes,
1544                         init_attr->act_nr_rwqes_rq1,
1545                         init_attr->act_nr_rwqes_rq2,
1546                         init_attr->act_nr_rwqes_rq3);
1547
1548         pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1549
1550         ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1551         ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1552         ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1553         ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1554         if (ret)
1555                 goto out_free;
1556
1557         pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1558         if (ehea_gen_smrs(pr) != 0) {
1559                 ret = -EIO;
1560                 goto out_free;
1561         }
1562
1563         atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1564
1565         kfree(init_attr);
1566
1567         netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1568
1569         ret = 0;
1570         goto out;
1571
1572 out_free:
1573         kfree(init_attr);
1574         vfree(pr->sq_skba.arr);
1575         vfree(pr->rq1_skba.arr);
1576         vfree(pr->rq2_skba.arr);
1577         vfree(pr->rq3_skba.arr);
1578         ehea_destroy_qp(pr->qp);
1579         ehea_destroy_cq(pr->send_cq);
1580         ehea_destroy_cq(pr->recv_cq);
1581         ehea_destroy_eq(pr->eq);
1582 out:
1583         return ret;
1584 }
1585
1586 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1587 {
1588         int ret, i;
1589
1590         if (pr->qp)
1591                 netif_napi_del(&pr->napi);
1592
1593         ret = ehea_destroy_qp(pr->qp);
1594
1595         if (!ret) {
1596                 ehea_destroy_cq(pr->send_cq);
1597                 ehea_destroy_cq(pr->recv_cq);
1598                 ehea_destroy_eq(pr->eq);
1599
1600                 for (i = 0; i < pr->rq1_skba.len; i++)
1601                         if (pr->rq1_skba.arr[i])
1602                                 dev_kfree_skb(pr->rq1_skba.arr[i]);
1603
1604                 for (i = 0; i < pr->rq2_skba.len; i++)
1605                         if (pr->rq2_skba.arr[i])
1606                                 dev_kfree_skb(pr->rq2_skba.arr[i]);
1607
1608                 for (i = 0; i < pr->rq3_skba.len; i++)
1609                         if (pr->rq3_skba.arr[i])
1610                                 dev_kfree_skb(pr->rq3_skba.arr[i]);
1611
1612                 for (i = 0; i < pr->sq_skba.len; i++)
1613                         if (pr->sq_skba.arr[i])
1614                                 dev_kfree_skb(pr->sq_skba.arr[i]);
1615
1616                 vfree(pr->rq1_skba.arr);
1617                 vfree(pr->rq2_skba.arr);
1618                 vfree(pr->rq3_skba.arr);
1619                 vfree(pr->sq_skba.arr);
1620                 ret = ehea_rem_smrs(pr);
1621         }
1622         return ret;
1623 }
1624
1625 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1626                                   u32 lkey)
1627 {
1628         int skb_data_size = skb_headlen(skb);
1629         u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1630         struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1631         unsigned int immediate_len = SWQE2_MAX_IMM;
1632
1633         swqe->descriptors = 0;
1634
1635         if (skb_is_gso(skb)) {
1636                 swqe->tx_control |= EHEA_SWQE_TSO;
1637                 swqe->mss = skb_shinfo(skb)->gso_size;
1638                 /*
1639                  * For TSO packets we only copy the headers into the
1640                  * immediate area.
1641                  */
1642                 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1643         }
1644
1645         if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1646                 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1647                 swqe->immediate_data_length = immediate_len;
1648
1649                 if (skb_data_size > immediate_len) {
1650                         sg1entry->l_key = lkey;
1651                         sg1entry->len = skb_data_size - immediate_len;
1652                         sg1entry->vaddr =
1653                                 ehea_map_vaddr(skb->data + immediate_len);
1654                         swqe->descriptors++;
1655                 }
1656         } else {
1657                 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1658                 swqe->immediate_data_length = skb_data_size;
1659         }
1660 }
1661
1662 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1663                                     struct ehea_swqe *swqe, u32 lkey)
1664 {
1665         struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1666         skb_frag_t *frag;
1667         int nfrags, sg1entry_contains_frag_data, i;
1668
1669         nfrags = skb_shinfo(skb)->nr_frags;
1670         sg1entry = &swqe->u.immdata_desc.sg_entry;
1671         sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1672         sg1entry_contains_frag_data = 0;
1673
1674         write_swqe2_immediate(skb, swqe, lkey);
1675
1676         /* write descriptors */
1677         if (nfrags > 0) {
1678                 if (swqe->descriptors == 0) {
1679                         /* sg1entry not yet used */
1680                         frag = &skb_shinfo(skb)->frags[0];
1681
1682                         /* copy sg1entry data */
1683                         sg1entry->l_key = lkey;
1684                         sg1entry->len = frag->size;
1685                         sg1entry->vaddr =
1686                                 ehea_map_vaddr(skb_frag_address(frag));
1687                         swqe->descriptors++;
1688                         sg1entry_contains_frag_data = 1;
1689                 }
1690
1691                 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1692
1693                         frag = &skb_shinfo(skb)->frags[i];
1694                         sgentry = &sg_list[i - sg1entry_contains_frag_data];
1695
1696                         sgentry->l_key = lkey;
1697                         sgentry->len = frag->size;
1698                         sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1699                         swqe->descriptors++;
1700                 }
1701         }
1702 }
1703
1704 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1705 {
1706         int ret = 0;
1707         u64 hret;
1708         u8 reg_type;
1709
1710         /* De/Register untagged packets */
1711         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1712         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1713                                      port->logical_port_id,
1714                                      reg_type, port->mac_addr, 0, hcallid);
1715         if (hret != H_SUCCESS) {
1716                 pr_err("%sregistering bc address failed (tagged)\n",
1717                        hcallid == H_REG_BCMC ? "" : "de");
1718                 ret = -EIO;
1719                 goto out_herr;
1720         }
1721
1722         /* De/Register VLAN packets */
1723         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1724         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1725                                      port->logical_port_id,
1726                                      reg_type, port->mac_addr, 0, hcallid);
1727         if (hret != H_SUCCESS) {
1728                 pr_err("%sregistering bc address failed (vlan)\n",
1729                        hcallid == H_REG_BCMC ? "" : "de");
1730                 ret = -EIO;
1731         }
1732 out_herr:
1733         return ret;
1734 }
1735
1736 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1737 {
1738         struct ehea_port *port = netdev_priv(dev);
1739         struct sockaddr *mac_addr = sa;
1740         struct hcp_ehea_port_cb0 *cb0;
1741         int ret;
1742         u64 hret;
1743
1744         if (!is_valid_ether_addr(mac_addr->sa_data)) {
1745                 ret = -EADDRNOTAVAIL;
1746                 goto out;
1747         }
1748
1749         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1750         if (!cb0) {
1751                 pr_err("no mem for cb0\n");
1752                 ret = -ENOMEM;
1753                 goto out;
1754         }
1755
1756         memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1757
1758         cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1759
1760         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1761                                        port->logical_port_id, H_PORT_CB0,
1762                                        EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1763         if (hret != H_SUCCESS) {
1764                 ret = -EIO;
1765                 goto out_free;
1766         }
1767
1768         memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1769
1770         /* Deregister old MAC in pHYP */
1771         if (port->state == EHEA_PORT_UP) {
1772                 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1773                 if (ret)
1774                         goto out_upregs;
1775         }
1776
1777         port->mac_addr = cb0->port_mac_addr << 16;
1778
1779         /* Register new MAC in pHYP */
1780         if (port->state == EHEA_PORT_UP) {
1781                 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1782                 if (ret)
1783                         goto out_upregs;
1784         }
1785
1786         ret = 0;
1787
1788 out_upregs:
1789         ehea_update_bcmc_registrations();
1790 out_free:
1791         free_page((unsigned long)cb0);
1792 out:
1793         return ret;
1794 }
1795
1796 static void ehea_promiscuous_error(u64 hret, int enable)
1797 {
1798         if (hret == H_AUTHORITY)
1799                 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1800                         enable == 1 ? "en" : "dis");
1801         else
1802                 pr_err("failed %sabling promiscuous mode\n",
1803                        enable == 1 ? "en" : "dis");
1804 }
1805
1806 static void ehea_promiscuous(struct net_device *dev, int enable)
1807 {
1808         struct ehea_port *port = netdev_priv(dev);
1809         struct hcp_ehea_port_cb7 *cb7;
1810         u64 hret;
1811
1812         if (enable == port->promisc)
1813                 return;
1814
1815         cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1816         if (!cb7) {
1817                 pr_err("no mem for cb7\n");
1818                 goto out;
1819         }
1820
1821         /* Modify Pxs_DUCQPN in CB7 */
1822         cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1823
1824         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1825                                        port->logical_port_id,
1826                                        H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1827         if (hret) {
1828                 ehea_promiscuous_error(hret, enable);
1829                 goto out;
1830         }
1831
1832         port->promisc = enable;
1833 out:
1834         free_page((unsigned long)cb7);
1835 }
1836
1837 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1838                                      u32 hcallid)
1839 {
1840         u64 hret;
1841         u8 reg_type;
1842
1843         reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1844                  | EHEA_BCMC_UNTAGGED;
1845
1846         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1847                                      port->logical_port_id,
1848                                      reg_type, mc_mac_addr, 0, hcallid);
1849         if (hret)
1850                 goto out;
1851
1852         reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1853                  | EHEA_BCMC_VLANID_ALL;
1854
1855         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1856                                      port->logical_port_id,
1857                                      reg_type, mc_mac_addr, 0, hcallid);
1858 out:
1859         return hret;
1860 }
1861
1862 static int ehea_drop_multicast_list(struct net_device *dev)
1863 {
1864         struct ehea_port *port = netdev_priv(dev);
1865         struct ehea_mc_list *mc_entry = port->mc_list;
1866         struct list_head *pos;
1867         struct list_head *temp;
1868         int ret = 0;
1869         u64 hret;
1870
1871         list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1872                 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1873
1874                 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1875                                                  H_DEREG_BCMC);
1876                 if (hret) {
1877                         pr_err("failed deregistering mcast MAC\n");
1878                         ret = -EIO;
1879                 }
1880
1881                 list_del(pos);
1882                 kfree(mc_entry);
1883         }
1884         return ret;
1885 }
1886
1887 static void ehea_allmulti(struct net_device *dev, int enable)
1888 {
1889         struct ehea_port *port = netdev_priv(dev);
1890         u64 hret;
1891
1892         if (!port->allmulti) {
1893                 if (enable) {
1894                         /* Enable ALLMULTI */
1895                         ehea_drop_multicast_list(dev);
1896                         hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1897                         if (!hret)
1898                                 port->allmulti = 1;
1899                         else
1900                                 netdev_err(dev,
1901                                            "failed enabling IFF_ALLMULTI\n");
1902                 }
1903         } else
1904                 if (!enable) {
1905                         /* Disable ALLMULTI */
1906                         hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1907                         if (!hret)
1908                                 port->allmulti = 0;
1909                         else
1910                                 netdev_err(dev,
1911                                            "failed disabling IFF_ALLMULTI\n");
1912                 }
1913 }
1914
1915 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1916 {
1917         struct ehea_mc_list *ehea_mcl_entry;
1918         u64 hret;
1919
1920         ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1921         if (!ehea_mcl_entry) {
1922                 pr_err("no mem for mcl_entry\n");
1923                 return;
1924         }
1925
1926         INIT_LIST_HEAD(&ehea_mcl_entry->list);
1927
1928         memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1929
1930         hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1931                                          H_REG_BCMC);
1932         if (!hret)
1933                 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1934         else {
1935                 pr_err("failed registering mcast MAC\n");
1936                 kfree(ehea_mcl_entry);
1937         }
1938 }
1939
1940 static void ehea_set_multicast_list(struct net_device *dev)
1941 {
1942         struct ehea_port *port = netdev_priv(dev);
1943         struct netdev_hw_addr *ha;
1944         int ret;
1945
1946         if (port->promisc) {
1947                 ehea_promiscuous(dev, 1);
1948                 return;
1949         }
1950         ehea_promiscuous(dev, 0);
1951
1952         if (dev->flags & IFF_ALLMULTI) {
1953                 ehea_allmulti(dev, 1);
1954                 goto out;
1955         }
1956         ehea_allmulti(dev, 0);
1957
1958         if (!netdev_mc_empty(dev)) {
1959                 ret = ehea_drop_multicast_list(dev);
1960                 if (ret) {
1961                         /* Dropping the current multicast list failed.
1962                          * Enabling ALL_MULTI is the best we can do.
1963                          */
1964                         ehea_allmulti(dev, 1);
1965                 }
1966
1967                 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1968                         pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1969                                 port->adapter->max_mc_mac);
1970                         goto out;
1971                 }
1972
1973                 netdev_for_each_mc_addr(ha, dev)
1974                         ehea_add_multicast_entry(port, ha->addr);
1975
1976         }
1977 out:
1978         ehea_update_bcmc_registrations();
1979 }
1980
1981 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1982 {
1983         if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1984                 return -EINVAL;
1985         dev->mtu = new_mtu;
1986         return 0;
1987 }
1988
1989 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1990 {
1991         swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1992
1993         if (skb->protocol != htons(ETH_P_IP))
1994                 return;
1995
1996         if (skb->ip_summed == CHECKSUM_PARTIAL)
1997                 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1998
1999         swqe->ip_start = skb_network_offset(skb);
2000         swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
2001
2002         switch (ip_hdr(skb)->protocol) {
2003         case IPPROTO_UDP:
2004                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2005                         swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2006
2007                 swqe->tcp_offset = swqe->ip_end + 1 +
2008                                    offsetof(struct udphdr, check);
2009                 swqe->tcp_end = skb->len - 1;
2010                 break;
2011
2012         case IPPROTO_TCP:
2013                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2014                         swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2015
2016                 swqe->tcp_offset = swqe->ip_end + 1 +
2017                                    offsetof(struct tcphdr, check);
2018                 swqe->tcp_end = skb->len - 1;
2019                 break;
2020         }
2021 }
2022
2023 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2024                        struct ehea_swqe *swqe, u32 lkey)
2025 {
2026         swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2027
2028         xmit_common(skb, swqe);
2029
2030         write_swqe2_data(skb, dev, swqe, lkey);
2031 }
2032
2033 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2034                        struct ehea_swqe *swqe)
2035 {
2036         u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2037
2038         xmit_common(skb, swqe);
2039
2040         if (!skb->data_len)
2041                 skb_copy_from_linear_data(skb, imm_data, skb->len);
2042         else
2043                 skb_copy_bits(skb, 0, imm_data, skb->len);
2044
2045         swqe->immediate_data_length = skb->len;
2046         dev_kfree_skb(skb);
2047 }
2048
2049 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2050 {
2051         struct ehea_port *port = netdev_priv(dev);
2052         struct ehea_swqe *swqe;
2053         u32 lkey;
2054         int swqe_index;
2055         struct ehea_port_res *pr;
2056         struct netdev_queue *txq;
2057
2058         pr = &port->port_res[skb_get_queue_mapping(skb)];
2059         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2060
2061         swqe = ehea_get_swqe(pr->qp, &swqe_index);
2062         memset(swqe, 0, SWQE_HEADER_SIZE);
2063         atomic_dec(&pr->swqe_avail);
2064
2065         if (vlan_tx_tag_present(skb)) {
2066                 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2067                 swqe->vlan_tag = vlan_tx_tag_get(skb);
2068         }
2069
2070         pr->tx_packets++;
2071         pr->tx_bytes += skb->len;
2072
2073         if (skb->len <= SWQE3_MAX_IMM) {
2074                 u32 sig_iv = port->sig_comp_iv;
2075                 u32 swqe_num = pr->swqe_id_counter;
2076                 ehea_xmit3(skb, dev, swqe);
2077                 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2078                         | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2079                 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2080                         swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2081                                                       sig_iv);
2082                         swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2083                         pr->swqe_ll_count = 0;
2084                 } else
2085                         pr->swqe_ll_count += 1;
2086         } else {
2087                 swqe->wr_id =
2088                         EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2089                       | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2090                       | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2091                       | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2092                 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2093
2094                 pr->sq_skba.index++;
2095                 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2096
2097                 lkey = pr->send_mr.lkey;
2098                 ehea_xmit2(skb, dev, swqe, lkey);
2099                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2100         }
2101         pr->swqe_id_counter += 1;
2102
2103         netif_info(port, tx_queued, dev,
2104                    "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2105         if (netif_msg_tx_queued(port))
2106                 ehea_dump(swqe, 512, "swqe");
2107
2108         if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2109                 netif_tx_stop_queue(txq);
2110                 swqe->tx_control |= EHEA_SWQE_PURGE;
2111         }
2112
2113         ehea_post_swqe(pr->qp, swqe);
2114
2115         if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2116                 pr->p_stats.queue_stopped++;
2117                 netif_tx_stop_queue(txq);
2118         }
2119
2120         return NETDEV_TX_OK;
2121 }
2122
2123 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2124 {
2125         struct ehea_port *port = netdev_priv(dev);
2126         struct ehea_adapter *adapter = port->adapter;
2127         struct hcp_ehea_port_cb1 *cb1;
2128         int index;
2129         u64 hret;
2130
2131         cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2132         if (!cb1) {
2133                 pr_err("no mem for cb1\n");
2134                 goto out;
2135         }
2136
2137         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2138                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2139         if (hret != H_SUCCESS) {
2140                 pr_err("query_ehea_port failed\n");
2141                 goto out;
2142         }
2143
2144         index = (vid / 64);
2145         cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2146
2147         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2148                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2149         if (hret != H_SUCCESS)
2150                 pr_err("modify_ehea_port failed\n");
2151 out:
2152         free_page((unsigned long)cb1);
2153         return;
2154 }
2155
2156 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2157 {
2158         struct ehea_port *port = netdev_priv(dev);
2159         struct ehea_adapter *adapter = port->adapter;
2160         struct hcp_ehea_port_cb1 *cb1;
2161         int index;
2162         u64 hret;
2163
2164         cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2165         if (!cb1) {
2166                 pr_err("no mem for cb1\n");
2167                 goto out;
2168         }
2169
2170         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2171                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2172         if (hret != H_SUCCESS) {
2173                 pr_err("query_ehea_port failed\n");
2174                 goto out;
2175         }
2176
2177         index = (vid / 64);
2178         cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2179
2180         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2181                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2182         if (hret != H_SUCCESS)
2183                 pr_err("modify_ehea_port failed\n");
2184 out:
2185         free_page((unsigned long)cb1);
2186 }
2187
2188 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2189 {
2190         int ret = -EIO;
2191         u64 hret;
2192         u16 dummy16 = 0;
2193         u64 dummy64 = 0;
2194         struct hcp_modify_qp_cb0 *cb0;
2195
2196         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2197         if (!cb0) {
2198                 ret = -ENOMEM;
2199                 goto out;
2200         }
2201
2202         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2203                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2204         if (hret != H_SUCCESS) {
2205                 pr_err("query_ehea_qp failed (1)\n");
2206                 goto out;
2207         }
2208
2209         cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2210         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2211                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2212                                      &dummy64, &dummy64, &dummy16, &dummy16);
2213         if (hret != H_SUCCESS) {
2214                 pr_err("modify_ehea_qp failed (1)\n");
2215                 goto out;
2216         }
2217
2218         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2219                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2220         if (hret != H_SUCCESS) {
2221                 pr_err("query_ehea_qp failed (2)\n");
2222                 goto out;
2223         }
2224
2225         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2226         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2227                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2228                                      &dummy64, &dummy64, &dummy16, &dummy16);
2229         if (hret != H_SUCCESS) {
2230                 pr_err("modify_ehea_qp failed (2)\n");
2231                 goto out;
2232         }
2233
2234         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2235                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2236         if (hret != H_SUCCESS) {
2237                 pr_err("query_ehea_qp failed (3)\n");
2238                 goto out;
2239         }
2240
2241         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2242         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2243                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2244                                      &dummy64, &dummy64, &dummy16, &dummy16);
2245         if (hret != H_SUCCESS) {
2246                 pr_err("modify_ehea_qp failed (3)\n");
2247                 goto out;
2248         }
2249
2250         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2251                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2252         if (hret != H_SUCCESS) {
2253                 pr_err("query_ehea_qp failed (4)\n");
2254                 goto out;
2255         }
2256
2257         ret = 0;
2258 out:
2259         free_page((unsigned long)cb0);
2260         return ret;
2261 }
2262
2263 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2264 {
2265         int ret, i;
2266         struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2267         enum ehea_eq_type eq_type = EHEA_EQ;
2268
2269         port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2270                                    EHEA_MAX_ENTRIES_EQ, 1);
2271         if (!port->qp_eq) {
2272                 ret = -EINVAL;
2273                 pr_err("ehea_create_eq failed (qp_eq)\n");
2274                 goto out_kill_eq;
2275         }
2276
2277         pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2278         pr_cfg.max_entries_scq = sq_entries * 2;
2279         pr_cfg.max_entries_sq = sq_entries;
2280         pr_cfg.max_entries_rq1 = rq1_entries;
2281         pr_cfg.max_entries_rq2 = rq2_entries;
2282         pr_cfg.max_entries_rq3 = rq3_entries;
2283
2284         pr_cfg_small_rx.max_entries_rcq = 1;
2285         pr_cfg_small_rx.max_entries_scq = sq_entries;
2286         pr_cfg_small_rx.max_entries_sq = sq_entries;
2287         pr_cfg_small_rx.max_entries_rq1 = 1;
2288         pr_cfg_small_rx.max_entries_rq2 = 1;
2289         pr_cfg_small_rx.max_entries_rq3 = 1;
2290
2291         for (i = 0; i < def_qps; i++) {
2292                 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2293                 if (ret)
2294                         goto out_clean_pr;
2295         }
2296         for (i = def_qps; i < def_qps; i++) {
2297                 ret = ehea_init_port_res(port, &port->port_res[i],
2298                                          &pr_cfg_small_rx, i);
2299                 if (ret)
2300                         goto out_clean_pr;
2301         }
2302
2303         return 0;
2304
2305 out_clean_pr:
2306         while (--i >= 0)
2307                 ehea_clean_portres(port, &port->port_res[i]);
2308
2309 out_kill_eq:
2310         ehea_destroy_eq(port->qp_eq);
2311         return ret;
2312 }
2313
2314 static int ehea_clean_all_portres(struct ehea_port *port)
2315 {
2316         int ret = 0;
2317         int i;
2318
2319         for (i = 0; i < port->num_def_qps; i++)
2320                 ret |= ehea_clean_portres(port, &port->port_res[i]);
2321
2322         ret |= ehea_destroy_eq(port->qp_eq);
2323
2324         return ret;
2325 }
2326
2327 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2328 {
2329         if (adapter->active_ports)
2330                 return;
2331
2332         ehea_rem_mr(&adapter->mr);
2333 }
2334
2335 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2336 {
2337         if (adapter->active_ports)
2338                 return 0;
2339
2340         return ehea_reg_kernel_mr(adapter, &adapter->mr);
2341 }
2342
2343 static int ehea_up(struct net_device *dev)
2344 {
2345         int ret, i;
2346         struct ehea_port *port = netdev_priv(dev);
2347
2348         if (port->state == EHEA_PORT_UP)
2349                 return 0;
2350
2351         ret = ehea_port_res_setup(port, port->num_def_qps);
2352         if (ret) {
2353                 netdev_err(dev, "port_res_failed\n");
2354                 goto out;
2355         }
2356
2357         /* Set default QP for this port */
2358         ret = ehea_configure_port(port);
2359         if (ret) {
2360                 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2361                 goto out_clean_pr;
2362         }
2363
2364         ret = ehea_reg_interrupts(dev);
2365         if (ret) {
2366                 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2367                 goto out_clean_pr;
2368         }
2369
2370         for (i = 0; i < port->num_def_qps; i++) {
2371                 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2372                 if (ret) {
2373                         netdev_err(dev, "activate_qp failed\n");
2374                         goto out_free_irqs;
2375                 }
2376         }
2377
2378         for (i = 0; i < port->num_def_qps; i++) {
2379                 ret = ehea_fill_port_res(&port->port_res[i]);
2380                 if (ret) {
2381                         netdev_err(dev, "out_free_irqs\n");
2382                         goto out_free_irqs;
2383                 }
2384         }
2385
2386         ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2387         if (ret) {
2388                 ret = -EIO;
2389                 goto out_free_irqs;
2390         }
2391
2392         port->state = EHEA_PORT_UP;
2393
2394         ret = 0;
2395         goto out;
2396
2397 out_free_irqs:
2398         ehea_free_interrupts(dev);
2399
2400 out_clean_pr:
2401         ehea_clean_all_portres(port);
2402 out:
2403         if (ret)
2404                 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2405
2406         ehea_update_bcmc_registrations();
2407         ehea_update_firmware_handles();
2408
2409         return ret;
2410 }
2411
2412 static void port_napi_disable(struct ehea_port *port)
2413 {
2414         int i;
2415
2416         for (i = 0; i < port->num_def_qps; i++)
2417                 napi_disable(&port->port_res[i].napi);
2418 }
2419
2420 static void port_napi_enable(struct ehea_port *port)
2421 {
2422         int i;
2423
2424         for (i = 0; i < port->num_def_qps; i++)
2425                 napi_enable(&port->port_res[i].napi);
2426 }
2427
2428 static int ehea_open(struct net_device *dev)
2429 {
2430         int ret;
2431         struct ehea_port *port = netdev_priv(dev);
2432
2433         mutex_lock(&port->port_lock);
2434
2435         netif_info(port, ifup, dev, "enabling port\n");
2436
2437         ret = ehea_up(dev);
2438         if (!ret) {
2439                 port_napi_enable(port);
2440                 netif_tx_start_all_queues(dev);
2441         }
2442
2443         mutex_unlock(&port->port_lock);
2444         schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
2445
2446         return ret;
2447 }
2448
2449 static int ehea_down(struct net_device *dev)
2450 {
2451         int ret;
2452         struct ehea_port *port = netdev_priv(dev);
2453
2454         if (port->state == EHEA_PORT_DOWN)
2455                 return 0;
2456
2457         ehea_drop_multicast_list(dev);
2458         ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2459
2460         ehea_free_interrupts(dev);
2461
2462         port->state = EHEA_PORT_DOWN;
2463
2464         ehea_update_bcmc_registrations();
2465
2466         ret = ehea_clean_all_portres(port);
2467         if (ret)
2468                 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2469
2470         ehea_update_firmware_handles();
2471
2472         return ret;
2473 }
2474
2475 static int ehea_stop(struct net_device *dev)
2476 {
2477         int ret;
2478         struct ehea_port *port = netdev_priv(dev);
2479
2480         netif_info(port, ifdown, dev, "disabling port\n");
2481
2482         set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2483         cancel_work_sync(&port->reset_task);
2484         cancel_delayed_work_sync(&port->stats_work);
2485         mutex_lock(&port->port_lock);
2486         netif_tx_stop_all_queues(dev);
2487         port_napi_disable(port);
2488         ret = ehea_down(dev);
2489         mutex_unlock(&port->port_lock);
2490         clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2491         return ret;
2492 }
2493
2494 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2495 {
2496         struct ehea_qp qp = *orig_qp;
2497         struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2498         struct ehea_swqe *swqe;
2499         int wqe_index;
2500         int i;
2501
2502         for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2503                 swqe = ehea_get_swqe(&qp, &wqe_index);
2504                 swqe->tx_control |= EHEA_SWQE_PURGE;
2505         }
2506 }
2507
2508 static void ehea_flush_sq(struct ehea_port *port)
2509 {
2510         int i;
2511
2512         for (i = 0; i < port->num_def_qps; i++) {
2513                 struct ehea_port_res *pr = &port->port_res[i];
2514                 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2515                 int ret;
2516
2517                 ret = wait_event_timeout(port->swqe_avail_wq,
2518                          atomic_read(&pr->swqe_avail) >= swqe_max,
2519                          msecs_to_jiffies(100));
2520
2521                 if (!ret) {
2522                         pr_err("WARNING: sq not flushed completely\n");
2523                         break;
2524                 }
2525         }
2526 }
2527
2528 int ehea_stop_qps(struct net_device *dev)
2529 {
2530         struct ehea_port *port = netdev_priv(dev);
2531         struct ehea_adapter *adapter = port->adapter;
2532         struct hcp_modify_qp_cb0 *cb0;
2533         int ret = -EIO;
2534         int dret;
2535         int i;
2536         u64 hret;
2537         u64 dummy64 = 0;
2538         u16 dummy16 = 0;
2539
2540         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2541         if (!cb0) {
2542                 ret = -ENOMEM;
2543                 goto out;
2544         }
2545
2546         for (i = 0; i < (port->num_def_qps); i++) {
2547                 struct ehea_port_res *pr =  &port->port_res[i];
2548                 struct ehea_qp *qp = pr->qp;
2549
2550                 /* Purge send queue */
2551                 ehea_purge_sq(qp);
2552
2553                 /* Disable queue pair */
2554                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2555                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2556                                             cb0);
2557                 if (hret != H_SUCCESS) {
2558                         pr_err("query_ehea_qp failed (1)\n");
2559                         goto out;
2560                 }
2561
2562                 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2563                 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2564
2565                 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2566                                              EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2567                                                             1), cb0, &dummy64,
2568                                              &dummy64, &dummy16, &dummy16);
2569                 if (hret != H_SUCCESS) {
2570                         pr_err("modify_ehea_qp failed (1)\n");
2571                         goto out;
2572                 }
2573
2574                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2575                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2576                                             cb0);
2577                 if (hret != H_SUCCESS) {
2578                         pr_err("query_ehea_qp failed (2)\n");
2579                         goto out;
2580                 }
2581
2582                 /* deregister shared memory regions */
2583                 dret = ehea_rem_smrs(pr);
2584                 if (dret) {
2585                         pr_err("unreg shared memory region failed\n");
2586                         goto out;
2587                 }
2588         }
2589
2590         ret = 0;
2591 out:
2592         free_page((unsigned long)cb0);
2593
2594         return ret;
2595 }
2596
2597 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2598 {
2599         struct ehea_qp qp = *orig_qp;
2600         struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2601         struct ehea_rwqe *rwqe;
2602         struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2603         struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2604         struct sk_buff *skb;
2605         u32 lkey = pr->recv_mr.lkey;
2606
2607
2608         int i;
2609         int index;
2610
2611         for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2612                 rwqe = ehea_get_next_rwqe(&qp, 2);
2613                 rwqe->sg_list[0].l_key = lkey;
2614                 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2615                 skb = skba_rq2[index];
2616                 if (skb)
2617                         rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2618         }
2619
2620         for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2621                 rwqe = ehea_get_next_rwqe(&qp, 3);
2622                 rwqe->sg_list[0].l_key = lkey;
2623                 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2624                 skb = skba_rq3[index];
2625                 if (skb)
2626                         rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2627         }
2628 }
2629
2630 int ehea_restart_qps(struct net_device *dev)
2631 {
2632         struct ehea_port *port = netdev_priv(dev);
2633         struct ehea_adapter *adapter = port->adapter;
2634         int ret = 0;
2635         int i;
2636
2637         struct hcp_modify_qp_cb0 *cb0;
2638         u64 hret;
2639         u64 dummy64 = 0;
2640         u16 dummy16 = 0;
2641
2642         cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2643         if (!cb0) {
2644                 ret = -ENOMEM;
2645                 goto out;
2646         }
2647
2648         for (i = 0; i < (port->num_def_qps); i++) {
2649                 struct ehea_port_res *pr =  &port->port_res[i];
2650                 struct ehea_qp *qp = pr->qp;
2651
2652                 ret = ehea_gen_smrs(pr);
2653                 if (ret) {
2654                         netdev_err(dev, "creation of shared memory regions failed\n");
2655                         goto out;
2656                 }
2657
2658                 ehea_update_rqs(qp, pr);
2659
2660                 /* Enable queue pair */
2661                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2662                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2663                                             cb0);
2664                 if (hret != H_SUCCESS) {
2665                         netdev_err(dev, "query_ehea_qp failed (1)\n");
2666                         goto out;
2667                 }
2668
2669                 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2670                 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2671
2672                 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2673                                              EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2674                                                             1), cb0, &dummy64,
2675                                              &dummy64, &dummy16, &dummy16);
2676                 if (hret != H_SUCCESS) {
2677                         netdev_err(dev, "modify_ehea_qp failed (1)\n");
2678                         goto out;
2679                 }
2680
2681                 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2682                                             EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2683                                             cb0);
2684                 if (hret != H_SUCCESS) {
2685                         netdev_err(dev, "query_ehea_qp failed (2)\n");
2686                         goto out;
2687                 }
2688
2689                 /* refill entire queue */
2690                 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2691                 ehea_refill_rq2(pr, 0);
2692                 ehea_refill_rq3(pr, 0);
2693         }
2694 out:
2695         free_page((unsigned long)cb0);
2696
2697         return ret;
2698 }
2699
2700 static void ehea_reset_port(struct work_struct *work)
2701 {
2702         int ret;
2703         struct ehea_port *port =
2704                 container_of(work, struct ehea_port, reset_task);
2705         struct net_device *dev = port->netdev;
2706
2707         mutex_lock(&dlpar_mem_lock);
2708         port->resets++;
2709         mutex_lock(&port->port_lock);
2710         netif_tx_disable(dev);
2711
2712         port_napi_disable(port);
2713
2714         ehea_down(dev);
2715
2716         ret = ehea_up(dev);
2717         if (ret)
2718                 goto out;
2719
2720         ehea_set_multicast_list(dev);
2721
2722         netif_info(port, timer, dev, "reset successful\n");
2723
2724         port_napi_enable(port);
2725
2726         netif_tx_wake_all_queues(dev);
2727 out:
2728         mutex_unlock(&port->port_lock);
2729         mutex_unlock(&dlpar_mem_lock);
2730 }
2731
2732 static void ehea_rereg_mrs(void)
2733 {
2734         int ret, i;
2735         struct ehea_adapter *adapter;
2736
2737         pr_info("LPAR memory changed - re-initializing driver\n");
2738
2739         list_for_each_entry(adapter, &adapter_list, list)
2740                 if (adapter->active_ports) {
2741                         /* Shutdown all ports */
2742                         for (i = 0; i < EHEA_MAX_PORTS; i++) {
2743                                 struct ehea_port *port = adapter->port[i];
2744                                 struct net_device *dev;
2745
2746                                 if (!port)
2747                                         continue;
2748
2749                                 dev = port->netdev;
2750
2751                                 if (dev->flags & IFF_UP) {
2752                                         mutex_lock(&port->port_lock);
2753                                         netif_tx_disable(dev);
2754                                         ehea_flush_sq(port);
2755                                         ret = ehea_stop_qps(dev);
2756                                         if (ret) {
2757                                                 mutex_unlock(&port->port_lock);
2758                                                 goto out;
2759                                         }
2760                                         port_napi_disable(port);
2761                                         mutex_unlock(&port->port_lock);
2762                                 }
2763                                 reset_sq_restart_flag(port);
2764                         }
2765
2766                         /* Unregister old memory region */
2767                         ret = ehea_rem_mr(&adapter->mr);
2768                         if (ret) {
2769                                 pr_err("unregister MR failed - driver inoperable!\n");
2770                                 goto out;
2771                         }
2772                 }
2773
2774         clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2775
2776         list_for_each_entry(adapter, &adapter_list, list)
2777                 if (adapter->active_ports) {
2778                         /* Register new memory region */
2779                         ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2780                         if (ret) {
2781                                 pr_err("register MR failed - driver inoperable!\n");
2782                                 goto out;
2783                         }
2784
2785                         /* Restart all ports */
2786                         for (i = 0; i < EHEA_MAX_PORTS; i++) {
2787                                 struct ehea_port *port = adapter->port[i];
2788
2789                                 if (port) {
2790                                         struct net_device *dev = port->netdev;
2791
2792                                         if (dev->flags & IFF_UP) {
2793                                                 mutex_lock(&port->port_lock);
2794                                                 ret = ehea_restart_qps(dev);
2795                                                 if (!ret) {
2796                                                         check_sqs(port);
2797                                                         port_napi_enable(port);
2798                                                         netif_tx_wake_all_queues(dev);
2799                                                 } else {
2800                                                         netdev_err(dev, "Unable to restart QPS\n");
2801                                                 }
2802                                                 mutex_unlock(&port->port_lock);
2803                                         }
2804                                 }
2805                         }
2806                 }
2807         pr_info("re-initializing driver complete\n");
2808 out:
2809         return;
2810 }
2811
2812 static void ehea_tx_watchdog(struct net_device *dev)
2813 {
2814         struct ehea_port *port = netdev_priv(dev);
2815
2816         if (netif_carrier_ok(dev) &&
2817             !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2818                 ehea_schedule_port_reset(port);
2819 }
2820
2821 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2822 {
2823         struct hcp_query_ehea *cb;
2824         u64 hret;
2825         int ret;
2826
2827         cb = (void *)get_zeroed_page(GFP_KERNEL);
2828         if (!cb) {
2829                 ret = -ENOMEM;
2830                 goto out;
2831         }
2832
2833         hret = ehea_h_query_ehea(adapter->handle, cb);
2834
2835         if (hret != H_SUCCESS) {
2836                 ret = -EIO;
2837                 goto out_herr;
2838         }
2839
2840         adapter->max_mc_mac = cb->max_mc_mac - 1;
2841         ret = 0;
2842
2843 out_herr:
2844         free_page((unsigned long)cb);
2845 out:
2846         return ret;
2847 }
2848
2849 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2850 {
2851         struct hcp_ehea_port_cb4 *cb4;
2852         u64 hret;
2853         int ret = 0;
2854
2855         *jumbo = 0;
2856
2857         /* (Try to) enable *jumbo frames */
2858         cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2859         if (!cb4) {
2860                 pr_err("no mem for cb4\n");
2861                 ret = -ENOMEM;
2862                 goto out;
2863         } else {
2864                 hret = ehea_h_query_ehea_port(port->adapter->handle,
2865                                               port->logical_port_id,
2866                                               H_PORT_CB4,
2867                                               H_PORT_CB4_JUMBO, cb4);
2868                 if (hret == H_SUCCESS) {
2869                         if (cb4->jumbo_frame)
2870                                 *jumbo = 1;
2871                         else {
2872                                 cb4->jumbo_frame = 1;
2873                                 hret = ehea_h_modify_ehea_port(port->adapter->
2874                                                                handle,
2875                                                                port->
2876                                                                logical_port_id,
2877                                                                H_PORT_CB4,
2878                                                                H_PORT_CB4_JUMBO,
2879                                                                cb4);
2880                                 if (hret == H_SUCCESS)
2881                                         *jumbo = 1;
2882                         }
2883                 } else
2884                         ret = -EINVAL;
2885
2886                 free_page((unsigned long)cb4);
2887         }
2888 out:
2889         return ret;
2890 }
2891
2892 static ssize_t ehea_show_port_id(struct device *dev,
2893                                  struct device_attribute *attr, char *buf)
2894 {
2895         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2896         return sprintf(buf, "%d", port->logical_port_id);
2897 }
2898
2899 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2900                    NULL);
2901
2902 static void __devinit logical_port_release(struct device *dev)
2903 {
2904         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2905         of_node_put(port->ofdev.dev.of_node);
2906 }
2907
2908 static struct device *ehea_register_port(struct ehea_port *port,
2909                                          struct device_node *dn)
2910 {
2911         int ret;
2912
2913         port->ofdev.dev.of_node = of_node_get(dn);
2914         port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2915         port->ofdev.dev.bus = &ibmebus_bus_type;
2916
2917         dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2918         port->ofdev.dev.release = logical_port_release;
2919
2920         ret = of_device_register(&port->ofdev);
2921         if (ret) {
2922                 pr_err("failed to register device. ret=%d\n", ret);
2923                 goto out;
2924         }
2925
2926         ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2927         if (ret) {
2928                 pr_err("failed to register attributes, ret=%d\n", ret);
2929                 goto out_unreg_of_dev;
2930         }
2931
2932         return &port->ofdev.dev;
2933
2934 out_unreg_of_dev:
2935         of_device_unregister(&port->ofdev);
2936 out:
2937         return NULL;
2938 }
2939
2940 static void ehea_unregister_port(struct ehea_port *port)
2941 {
2942         device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2943         of_device_unregister(&port->ofdev);
2944 }
2945
2946 static const struct net_device_ops ehea_netdev_ops = {
2947         .ndo_open               = ehea_open,
2948         .ndo_stop               = ehea_stop,
2949         .ndo_start_xmit         = ehea_start_xmit,
2950 #ifdef CONFIG_NET_POLL_CONTROLLER
2951         .ndo_poll_controller    = ehea_netpoll,
2952 #endif
2953         .ndo_get_stats64        = ehea_get_stats64,
2954         .ndo_set_mac_address    = ehea_set_mac_addr,
2955         .ndo_validate_addr      = eth_validate_addr,
2956         .ndo_set_rx_mode        = ehea_set_multicast_list,
2957         .ndo_change_mtu         = ehea_change_mtu,
2958         .ndo_vlan_rx_add_vid    = ehea_vlan_rx_add_vid,
2959         .ndo_vlan_rx_kill_vid   = ehea_vlan_rx_kill_vid,
2960         .ndo_tx_timeout         = ehea_tx_watchdog,
2961 };
2962
2963 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2964                                          u32 logical_port_id,
2965                                          struct device_node *dn)
2966 {
2967         int ret;
2968         struct net_device *dev;
2969         struct ehea_port *port;
2970         struct device *port_dev;
2971         int jumbo;
2972
2973         /* allocate memory for the port structures */
2974         dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2975
2976         if (!dev) {
2977                 pr_err("no mem for net_device\n");
2978                 ret = -ENOMEM;
2979                 goto out_err;
2980         }
2981
2982         port = netdev_priv(dev);
2983
2984         mutex_init(&port->port_lock);
2985         port->state = EHEA_PORT_DOWN;
2986         port->sig_comp_iv = sq_entries / 10;
2987
2988         port->adapter = adapter;
2989         port->netdev = dev;
2990         port->logical_port_id = logical_port_id;
2991
2992         port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2993
2994         port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2995         if (!port->mc_list) {
2996                 ret = -ENOMEM;
2997                 goto out_free_ethdev;
2998         }
2999
3000         INIT_LIST_HEAD(&port->mc_list->list);
3001
3002         ret = ehea_sense_port_attr(port);
3003         if (ret)
3004                 goto out_free_mc_list;
3005
3006         netif_set_real_num_rx_queues(dev, port->num_def_qps);
3007         netif_set_real_num_tx_queues(dev, port->num_def_qps);
3008
3009         port_dev = ehea_register_port(port, dn);
3010         if (!port_dev)
3011                 goto out_free_mc_list;
3012
3013         SET_NETDEV_DEV(dev, port_dev);
3014
3015         /* initialize net_device structure */
3016         memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3017
3018         dev->netdev_ops = &ehea_netdev_ops;
3019         ehea_set_ethtool_ops(dev);
3020
3021         dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3022                       | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3023         dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3024                       | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3025                       | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3026                       | NETIF_F_RXCSUM;
3027         dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3028                         NETIF_F_IP_CSUM;
3029         dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3030
3031         INIT_WORK(&port->reset_task, ehea_reset_port);
3032         INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3033
3034         init_waitqueue_head(&port->swqe_avail_wq);
3035         init_waitqueue_head(&port->restart_wq);
3036
3037         memset(&port->stats, 0, sizeof(struct net_device_stats));
3038         ret = register_netdev(dev);
3039         if (ret) {
3040                 pr_err("register_netdev failed. ret=%d\n", ret);
3041                 goto out_unreg_port;
3042         }
3043
3044         ret = ehea_get_jumboframe_status(port, &jumbo);
3045         if (ret)
3046                 netdev_err(dev, "failed determining jumbo frame status\n");
3047
3048         netdev_info(dev, "Jumbo frames are %sabled\n",
3049                     jumbo == 1 ? "en" : "dis");
3050
3051         adapter->active_ports++;
3052
3053         return port;
3054
3055 out_unreg_port:
3056         ehea_unregister_port(port);
3057
3058 out_free_mc_list:
3059         kfree(port->mc_list);
3060
3061 out_free_ethdev:
3062         free_netdev(dev);
3063
3064 out_err:
3065         pr_err("setting up logical port with id=%d failed, ret=%d\n",
3066                logical_port_id, ret);
3067         return NULL;
3068 }
3069
3070 static void ehea_shutdown_single_port(struct ehea_port *port)
3071 {
3072         struct ehea_adapter *adapter = port->adapter;
3073
3074         cancel_work_sync(&port->reset_task);
3075         cancel_delayed_work_sync(&port->stats_work);
3076         unregister_netdev(port->netdev);
3077         ehea_unregister_port(port);
3078         kfree(port->mc_list);
3079         free_netdev(port->netdev);
3080         adapter->active_ports--;
3081 }
3082
3083 static int ehea_setup_ports(struct ehea_adapter *adapter)
3084 {
3085         struct device_node *lhea_dn;
3086         struct device_node *eth_dn = NULL;
3087
3088         const u32 *dn_log_port_id;
3089         int i = 0;
3090
3091         lhea_dn = adapter->ofdev->dev.of_node;
3092         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3093
3094                 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3095                                                  NULL);
3096                 if (!dn_log_port_id) {
3097                         pr_err("bad device node: eth_dn name=%s\n",
3098                                eth_dn->full_name);
3099                         continue;
3100                 }
3101
3102                 if (ehea_add_adapter_mr(adapter)) {
3103                         pr_err("creating MR failed\n");
3104                         of_node_put(eth_dn);
3105                         return -EIO;
3106                 }
3107
3108                 adapter->port[i] = ehea_setup_single_port(adapter,
3109                                                           *dn_log_port_id,
3110                                                           eth_dn);
3111                 if (adapter->port[i])
3112                         netdev_info(adapter->port[i]->netdev,
3113                                     "logical port id #%d\n", *dn_log_port_id);
3114                 else
3115                         ehea_remove_adapter_mr(adapter);
3116
3117                 i++;
3118         }
3119         return 0;
3120 }
3121
3122 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3123                                            u32 logical_port_id)
3124 {
3125         struct device_node *lhea_dn;
3126         struct device_node *eth_dn = NULL;
3127         const u32 *dn_log_port_id;
3128
3129         lhea_dn = adapter->ofdev->dev.of_node;
3130         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3131
3132                 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3133                                                  NULL);
3134                 if (dn_log_port_id)
3135                         if (*dn_log_port_id == logical_port_id)
3136                                 return eth_dn;
3137         }
3138
3139         return NULL;
3140 }
3141
3142 static ssize_t ehea_probe_port(struct device *dev,
3143                                struct device_attribute *attr,
3144                                const char *buf, size_t count)
3145 {
3146         struct ehea_adapter *adapter = dev_get_drvdata(dev);
3147         struct ehea_port *port;
3148         struct device_node *eth_dn = NULL;
3149         int i;
3150
3151         u32 logical_port_id;
3152
3153         sscanf(buf, "%d", &logical_port_id);
3154
3155         port = ehea_get_port(adapter, logical_port_id);
3156
3157         if (port) {
3158                 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3159                             logical_port_id);
3160                 return -EINVAL;
3161         }
3162
3163         eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3164
3165         if (!eth_dn) {
3166                 pr_info("no logical port with id %d found\n", logical_port_id);
3167                 return -EINVAL;
3168         }
3169
3170         if (ehea_add_adapter_mr(adapter)) {
3171                 pr_err("creating MR failed\n");
3172                 return -EIO;
3173         }
3174
3175         port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3176
3177         of_node_put(eth_dn);
3178
3179         if (port) {
3180                 for (i = 0; i < EHEA_MAX_PORTS; i++)
3181                         if (!adapter->port[i]) {
3182                                 adapter->port[i] = port;
3183                                 break;
3184                         }
3185
3186                 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3187                             logical_port_id);
3188         } else {
3189                 ehea_remove_adapter_mr(adapter);
3190                 return -EIO;
3191         }
3192
3193         return (ssize_t) count;
3194 }
3195
3196 static ssize_t ehea_remove_port(struct device *dev,
3197                                 struct device_attribute *attr,
3198                                 const char *buf, size_t count)
3199 {
3200         struct ehea_adapter *adapter = dev_get_drvdata(dev);
3201         struct ehea_port *port;
3202         int i;
3203         u32 logical_port_id;
3204
3205         sscanf(buf, "%d", &logical_port_id);
3206
3207         port = ehea_get_port(adapter, logical_port_id);
3208
3209         if (port) {
3210                 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3211                             logical_port_id);
3212
3213                 ehea_shutdown_single_port(port);
3214
3215                 for (i = 0; i < EHEA_MAX_PORTS; i++)
3216                         if (adapter->port[i] == port) {
3217                                 adapter->port[i] = NULL;
3218                                 break;
3219                         }
3220         } else {
3221                 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3222                        logical_port_id);
3223                 return -EINVAL;
3224         }
3225
3226         ehea_remove_adapter_mr(adapter);
3227
3228         return (ssize_t) count;
3229 }
3230
3231 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3232 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3233
3234 int ehea_create_device_sysfs(struct platform_device *dev)
3235 {
3236         int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3237         if (ret)
3238                 goto out;
3239
3240         ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3241 out:
3242         return ret;
3243 }
3244
3245 void ehea_remove_device_sysfs(struct platform_device *dev)
3246 {
3247         device_remove_file(&dev->dev, &dev_attr_probe_port);
3248         device_remove_file(&dev->dev, &dev_attr_remove_port);
3249 }
3250
3251 static int __devinit ehea_probe_adapter(struct platform_device *dev,
3252                                         const struct of_device_id *id)
3253 {
3254         struct ehea_adapter *adapter;
3255         const u64 *adapter_handle;
3256         int ret;
3257
3258         if (!dev || !dev->dev.of_node) {
3259                 pr_err("Invalid ibmebus device probed\n");
3260                 return -EINVAL;
3261         }
3262
3263         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3264         if (!adapter) {
3265                 ret = -ENOMEM;
3266                 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3267                 goto out;
3268         }
3269
3270         list_add(&adapter->list, &adapter_list);
3271
3272         adapter->ofdev = dev;
3273
3274         adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3275                                          NULL);
3276         if (adapter_handle)
3277                 adapter->handle = *adapter_handle;
3278
3279         if (!adapter->handle) {
3280                 dev_err(&dev->dev, "failed getting handle for adapter"
3281                         " '%s'\n", dev->dev.of_node->full_name);
3282                 ret = -ENODEV;
3283                 goto out_free_ad;
3284         }
3285
3286         adapter->pd = EHEA_PD_ID;
3287
3288         dev_set_drvdata(&dev->dev, adapter);
3289
3290
3291         /* initialize adapter and ports */
3292         /* get adapter properties */
3293         ret = ehea_sense_adapter_attr(adapter);
3294         if (ret) {
3295                 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3296                 goto out_free_ad;
3297         }
3298
3299         adapter->neq = ehea_create_eq(adapter,
3300                                       EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3301         if (!adapter->neq) {
3302                 ret = -EIO;
3303                 dev_err(&dev->dev, "NEQ creation failed\n");
3304                 goto out_free_ad;
3305         }
3306
3307         tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3308                      (unsigned long)adapter);
3309
3310         ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3311                                   ehea_interrupt_neq, IRQF_DISABLED,
3312                                   "ehea_neq", adapter);
3313         if (ret) {
3314                 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3315                 goto out_kill_eq;
3316         }
3317
3318         ret = ehea_create_device_sysfs(dev);
3319         if (ret)
3320                 goto out_free_irq;
3321
3322         ret = ehea_setup_ports(adapter);
3323         if (ret) {
3324                 dev_err(&dev->dev, "setup_ports failed\n");
3325                 goto out_rem_dev_sysfs;
3326         }
3327
3328         ret = 0;
3329         goto out;
3330
3331 out_rem_dev_sysfs:
3332         ehea_remove_device_sysfs(dev);
3333
3334 out_free_irq:
3335         ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3336
3337 out_kill_eq:
3338         ehea_destroy_eq(adapter->neq);
3339
3340 out_free_ad:
3341         list_del(&adapter->list);
3342         kfree(adapter);
3343
3344 out:
3345         ehea_update_firmware_handles();
3346
3347         return ret;
3348 }
3349
3350 static int __devexit ehea_remove(struct platform_device *dev)
3351 {
3352         struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3353         int i;
3354
3355         for (i = 0; i < EHEA_MAX_PORTS; i++)
3356                 if (adapter->port[i]) {
3357                         ehea_shutdown_single_port(adapter->port[i]);
3358                         adapter->port[i] = NULL;
3359                 }
3360
3361         ehea_remove_device_sysfs(dev);
3362
3363         ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3364         tasklet_kill(&adapter->neq_tasklet);
3365
3366         ehea_destroy_eq(adapter->neq);
3367         ehea_remove_adapter_mr(adapter);
3368         list_del(&adapter->list);
3369         kfree(adapter);
3370
3371         ehea_update_firmware_handles();
3372
3373         return 0;
3374 }
3375
3376 void ehea_crash_handler(void)
3377 {
3378         int i;
3379
3380         if (ehea_fw_handles.arr)
3381                 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3382                         ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3383                                              ehea_fw_handles.arr[i].fwh,
3384                                              FORCE_FREE);
3385
3386         if (ehea_bcmc_regs.arr)
3387                 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3388                         ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3389                                               ehea_bcmc_regs.arr[i].port_id,
3390                                               ehea_bcmc_regs.arr[i].reg_type,
3391                                               ehea_bcmc_regs.arr[i].macaddr,
3392                                               0, H_DEREG_BCMC);
3393 }
3394
3395 static int ehea_mem_notifier(struct notifier_block *nb,
3396                              unsigned long action, void *data)
3397 {
3398         int ret = NOTIFY_BAD;
3399         struct memory_notify *arg = data;
3400
3401         mutex_lock(&dlpar_mem_lock);
3402
3403         switch (action) {
3404         case MEM_CANCEL_OFFLINE:
3405                 pr_info("memory offlining canceled");
3406                 /* Readd canceled memory block */
3407         case MEM_ONLINE:
3408                 pr_info("memory is going online");
3409                 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3410                 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3411                         goto out_unlock;
3412                 ehea_rereg_mrs();
3413                 break;
3414         case MEM_GOING_OFFLINE:
3415                 pr_info("memory is going offline");
3416                 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3417                 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3418                         goto out_unlock;
3419                 ehea_rereg_mrs();
3420                 break;
3421         default:
3422                 break;
3423         }
3424
3425         ehea_update_firmware_handles();
3426         ret = NOTIFY_OK;
3427
3428 out_unlock:
3429         mutex_unlock(&dlpar_mem_lock);
3430         return ret;
3431 }
3432
3433 static struct notifier_block ehea_mem_nb = {
3434         .notifier_call = ehea_mem_notifier,
3435 };
3436
3437 static int ehea_reboot_notifier(struct notifier_block *nb,
3438                                 unsigned long action, void *unused)
3439 {
3440         if (action == SYS_RESTART) {
3441                 pr_info("Reboot: freeing all eHEA resources\n");
3442                 ibmebus_unregister_driver(&ehea_driver);
3443         }
3444         return NOTIFY_DONE;
3445 }
3446
3447 static struct notifier_block ehea_reboot_nb = {
3448         .notifier_call = ehea_reboot_notifier,
3449 };
3450
3451 static int check_module_parm(void)
3452 {
3453         int ret = 0;
3454
3455         if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3456             (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3457                 pr_info("Bad parameter: rq1_entries\n");
3458                 ret = -EINVAL;
3459         }
3460         if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3461             (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3462                 pr_info("Bad parameter: rq2_entries\n");
3463                 ret = -EINVAL;
3464         }
3465         if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3466             (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3467                 pr_info("Bad parameter: rq3_entries\n");
3468                 ret = -EINVAL;
3469         }
3470         if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3471             (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3472                 pr_info("Bad parameter: sq_entries\n");
3473                 ret = -EINVAL;
3474         }
3475
3476         return ret;
3477 }
3478
3479 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3480                                       char *buf)
3481 {
3482         return sprintf(buf, "%d", EHEA_CAPABILITIES);
3483 }
3484
3485 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3486                    ehea_show_capabilities, NULL);
3487
3488 int __init ehea_module_init(void)
3489 {
3490         int ret;
3491
3492         pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3493
3494         memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3495         memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3496
3497         mutex_init(&ehea_fw_handles.lock);
3498         spin_lock_init(&ehea_bcmc_regs.lock);
3499
3500         ret = check_module_parm();
3501         if (ret)
3502                 goto out;
3503
3504         ret = ehea_create_busmap();
3505         if (ret)
3506                 goto out;
3507
3508         ret = register_reboot_notifier(&ehea_reboot_nb);
3509         if (ret)
3510                 pr_info("failed registering reboot notifier\n");
3511
3512         ret = register_memory_notifier(&ehea_mem_nb);
3513         if (ret)
3514                 pr_info("failed registering memory remove notifier\n");
3515
3516         ret = crash_shutdown_register(ehea_crash_handler);
3517         if (ret)
3518                 pr_info("failed registering crash handler\n");
3519
3520         ret = ibmebus_register_driver(&ehea_driver);
3521         if (ret) {
3522                 pr_err("failed registering eHEA device driver on ebus\n");
3523                 goto out2;
3524         }
3525
3526         ret = driver_create_file(&ehea_driver.driver,
3527                                  &driver_attr_capabilities);
3528         if (ret) {
3529                 pr_err("failed to register capabilities attribute, ret=%d\n",
3530                        ret);
3531                 goto out3;
3532         }
3533
3534         return ret;
3535
3536 out3:
3537         ibmebus_unregister_driver(&ehea_driver);
3538 out2:
3539         unregister_memory_notifier(&ehea_mem_nb);
3540         unregister_reboot_notifier(&ehea_reboot_nb);
3541         crash_shutdown_unregister(ehea_crash_handler);
3542 out:
3543         return ret;
3544 }
3545
3546 static void __exit ehea_module_exit(void)
3547 {
3548         int ret;
3549
3550         driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3551         ibmebus_unregister_driver(&ehea_driver);
3552         unregister_reboot_notifier(&ehea_reboot_nb);
3553         ret = crash_shutdown_unregister(ehea_crash_handler);
3554         if (ret)
3555                 pr_info("failed unregistering crash handler\n");
3556         unregister_memory_notifier(&ehea_mem_nb);
3557         kfree(ehea_fw_handles.arr);
3558         kfree(ehea_bcmc_regs.arr);
3559         ehea_destroy_busmap();
3560 }
3561
3562 module_init(ehea_module_init);
3563 module_exit(ehea_module_exit);