target: Fix bug in handling of FILEIO + block_device resize ops
[linux-flexiantxendom0-3.2.10.git] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  * Copyright (c) 2009 HNR Consulting. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  */
36 #include <linux/dma-mapping.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <rdma/ib_cache.h>
40
41 #include "mad_priv.h"
42 #include "mad_rmpp.h"
43 #include "smi.h"
44 #include "agent.h"
45
46 MODULE_LICENSE("Dual BSD/GPL");
47 MODULE_DESCRIPTION("kernel IB MAD API");
48 MODULE_AUTHOR("Hal Rosenstock");
49 MODULE_AUTHOR("Sean Hefty");
50
51 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
52 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
53
54 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
55 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
56 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
57 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
58
59 static struct kmem_cache *ib_mad_cache;
60
61 static struct list_head ib_mad_port_list;
62 static u32 ib_mad_client_id = 0;
63
64 /* Port list lock */
65 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
66
67 /* Forward declarations */
68 static int method_in_use(struct ib_mad_mgmt_method_table **method,
69                          struct ib_mad_reg_req *mad_reg_req);
70 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
71 static struct ib_mad_agent_private *find_mad_agent(
72                                         struct ib_mad_port_private *port_priv,
73                                         struct ib_mad *mad);
74 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
75                                     struct ib_mad_private *mad);
76 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
77 static void timeout_sends(struct work_struct *work);
78 static void local_completions(struct work_struct *work);
79 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
80                               struct ib_mad_agent_private *agent_priv,
81                               u8 mgmt_class);
82 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83                            struct ib_mad_agent_private *agent_priv);
84
85 /*
86  * Returns a ib_mad_port_private structure or NULL for a device/port
87  * Assumes ib_mad_port_list_lock is being held
88  */
89 static inline struct ib_mad_port_private *
90 __ib_get_mad_port(struct ib_device *device, int port_num)
91 {
92         struct ib_mad_port_private *entry;
93
94         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
95                 if (entry->device == device && entry->port_num == port_num)
96                         return entry;
97         }
98         return NULL;
99 }
100
101 /*
102  * Wrapper function to return a ib_mad_port_private structure or NULL
103  * for a device/port
104  */
105 static inline struct ib_mad_port_private *
106 ib_get_mad_port(struct ib_device *device, int port_num)
107 {
108         struct ib_mad_port_private *entry;
109         unsigned long flags;
110
111         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
112         entry = __ib_get_mad_port(device, port_num);
113         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
114
115         return entry;
116 }
117
118 static inline u8 convert_mgmt_class(u8 mgmt_class)
119 {
120         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
121         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
122                 0 : mgmt_class;
123 }
124
125 static int get_spl_qp_index(enum ib_qp_type qp_type)
126 {
127         switch (qp_type)
128         {
129         case IB_QPT_SMI:
130                 return 0;
131         case IB_QPT_GSI:
132                 return 1;
133         default:
134                 return -1;
135         }
136 }
137
138 static int vendor_class_index(u8 mgmt_class)
139 {
140         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
141 }
142
143 static int is_vendor_class(u8 mgmt_class)
144 {
145         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
146             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
147                 return 0;
148         return 1;
149 }
150
151 static int is_vendor_oui(char *oui)
152 {
153         if (oui[0] || oui[1] || oui[2])
154                 return 1;
155         return 0;
156 }
157
158 static int is_vendor_method_in_use(
159                 struct ib_mad_mgmt_vendor_class *vendor_class,
160                 struct ib_mad_reg_req *mad_reg_req)
161 {
162         struct ib_mad_mgmt_method_table *method;
163         int i;
164
165         for (i = 0; i < MAX_MGMT_OUI; i++) {
166                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
167                         method = vendor_class->method_table[i];
168                         if (method) {
169                                 if (method_in_use(&method, mad_reg_req))
170                                         return 1;
171                                 else
172                                         break;
173                         }
174                 }
175         }
176         return 0;
177 }
178
179 int ib_response_mad(struct ib_mad *mad)
180 {
181         return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
182                 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
183                 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
184                  (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
185 }
186 EXPORT_SYMBOL(ib_response_mad);
187
188 /*
189  * ib_register_mad_agent - Register to send/receive MADs
190  */
191 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
192                                            u8 port_num,
193                                            enum ib_qp_type qp_type,
194                                            struct ib_mad_reg_req *mad_reg_req,
195                                            u8 rmpp_version,
196                                            ib_mad_send_handler send_handler,
197                                            ib_mad_recv_handler recv_handler,
198                                            void *context)
199 {
200         struct ib_mad_port_private *port_priv;
201         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
202         struct ib_mad_agent_private *mad_agent_priv;
203         struct ib_mad_reg_req *reg_req = NULL;
204         struct ib_mad_mgmt_class_table *class;
205         struct ib_mad_mgmt_vendor_class_table *vendor;
206         struct ib_mad_mgmt_vendor_class *vendor_class;
207         struct ib_mad_mgmt_method_table *method;
208         int ret2, qpn;
209         unsigned long flags;
210         u8 mgmt_class, vclass;
211
212         /* Validate parameters */
213         qpn = get_spl_qp_index(qp_type);
214         if (qpn == -1)
215                 goto error1;
216
217         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
218                 goto error1;
219
220         /* Validate MAD registration request if supplied */
221         if (mad_reg_req) {
222                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
223                         goto error1;
224                 if (!recv_handler)
225                         goto error1;
226                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
227                         /*
228                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
229                          * one in this range currently allowed
230                          */
231                         if (mad_reg_req->mgmt_class !=
232                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
233                                 goto error1;
234                 } else if (mad_reg_req->mgmt_class == 0) {
235                         /*
236                          * Class 0 is reserved in IBA and is used for
237                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
238                          */
239                         goto error1;
240                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
241                         /*
242                          * If class is in "new" vendor range,
243                          * ensure supplied OUI is not zero
244                          */
245                         if (!is_vendor_oui(mad_reg_req->oui))
246                                 goto error1;
247                 }
248                 /* Make sure class supplied is consistent with RMPP */
249                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
250                         if (rmpp_version)
251                                 goto error1;
252                 }
253                 /* Make sure class supplied is consistent with QP type */
254                 if (qp_type == IB_QPT_SMI) {
255                         if ((mad_reg_req->mgmt_class !=
256                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
257                             (mad_reg_req->mgmt_class !=
258                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
259                                 goto error1;
260                 } else {
261                         if ((mad_reg_req->mgmt_class ==
262                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
263                             (mad_reg_req->mgmt_class ==
264                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
265                                 goto error1;
266                 }
267         } else {
268                 /* No registration request supplied */
269                 if (!send_handler)
270                         goto error1;
271         }
272
273         /* Validate device and port */
274         port_priv = ib_get_mad_port(device, port_num);
275         if (!port_priv) {
276                 ret = ERR_PTR(-ENODEV);
277                 goto error1;
278         }
279
280         /* Verify the QP requested is supported.  For example, Ethernet devices
281          * will not have QP0 */
282         if (!port_priv->qp_info[qpn].qp) {
283                 ret = ERR_PTR(-EPROTONOSUPPORT);
284                 goto error1;
285         }
286
287         /* Allocate structures */
288         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
289         if (!mad_agent_priv) {
290                 ret = ERR_PTR(-ENOMEM);
291                 goto error1;
292         }
293
294         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
295                                                  IB_ACCESS_LOCAL_WRITE);
296         if (IS_ERR(mad_agent_priv->agent.mr)) {
297                 ret = ERR_PTR(-ENOMEM);
298                 goto error2;
299         }
300
301         if (mad_reg_req) {
302                 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
303                 if (!reg_req) {
304                         ret = ERR_PTR(-ENOMEM);
305                         goto error3;
306                 }
307         }
308
309         /* Now, fill in the various structures */
310         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
311         mad_agent_priv->reg_req = reg_req;
312         mad_agent_priv->agent.rmpp_version = rmpp_version;
313         mad_agent_priv->agent.device = device;
314         mad_agent_priv->agent.recv_handler = recv_handler;
315         mad_agent_priv->agent.send_handler = send_handler;
316         mad_agent_priv->agent.context = context;
317         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
318         mad_agent_priv->agent.port_num = port_num;
319         spin_lock_init(&mad_agent_priv->lock);
320         INIT_LIST_HEAD(&mad_agent_priv->send_list);
321         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
322         INIT_LIST_HEAD(&mad_agent_priv->done_list);
323         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
324         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
325         INIT_LIST_HEAD(&mad_agent_priv->local_list);
326         INIT_WORK(&mad_agent_priv->local_work, local_completions);
327         atomic_set(&mad_agent_priv->refcount, 1);
328         init_completion(&mad_agent_priv->comp);
329
330         spin_lock_irqsave(&port_priv->reg_lock, flags);
331         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
332
333         /*
334          * Make sure MAD registration (if supplied)
335          * is non overlapping with any existing ones
336          */
337         if (mad_reg_req) {
338                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
339                 if (!is_vendor_class(mgmt_class)) {
340                         class = port_priv->version[mad_reg_req->
341                                                    mgmt_class_version].class;
342                         if (class) {
343                                 method = class->method_table[mgmt_class];
344                                 if (method) {
345                                         if (method_in_use(&method,
346                                                            mad_reg_req))
347                                                 goto error4;
348                                 }
349                         }
350                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
351                                                   mgmt_class);
352                 } else {
353                         /* "New" vendor class range */
354                         vendor = port_priv->version[mad_reg_req->
355                                                     mgmt_class_version].vendor;
356                         if (vendor) {
357                                 vclass = vendor_class_index(mgmt_class);
358                                 vendor_class = vendor->vendor_class[vclass];
359                                 if (vendor_class) {
360                                         if (is_vendor_method_in_use(
361                                                         vendor_class,
362                                                         mad_reg_req))
363                                                 goto error4;
364                                 }
365                         }
366                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
367                 }
368                 if (ret2) {
369                         ret = ERR_PTR(ret2);
370                         goto error4;
371                 }
372         }
373
374         /* Add mad agent into port's agent list */
375         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
376         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
377
378         return &mad_agent_priv->agent;
379
380 error4:
381         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
382         kfree(reg_req);
383 error3:
384         ib_dereg_mr(mad_agent_priv->agent.mr);
385 error2:
386         kfree(mad_agent_priv);
387 error1:
388         return ret;
389 }
390 EXPORT_SYMBOL(ib_register_mad_agent);
391
392 static inline int is_snooping_sends(int mad_snoop_flags)
393 {
394         return (mad_snoop_flags &
395                 (/*IB_MAD_SNOOP_POSTED_SENDS |
396                  IB_MAD_SNOOP_RMPP_SENDS |*/
397                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
398                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
399 }
400
401 static inline int is_snooping_recvs(int mad_snoop_flags)
402 {
403         return (mad_snoop_flags &
404                 (IB_MAD_SNOOP_RECVS /*|
405                  IB_MAD_SNOOP_RMPP_RECVS*/));
406 }
407
408 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
409                                 struct ib_mad_snoop_private *mad_snoop_priv)
410 {
411         struct ib_mad_snoop_private **new_snoop_table;
412         unsigned long flags;
413         int i;
414
415         spin_lock_irqsave(&qp_info->snoop_lock, flags);
416         /* Check for empty slot in array. */
417         for (i = 0; i < qp_info->snoop_table_size; i++)
418                 if (!qp_info->snoop_table[i])
419                         break;
420
421         if (i == qp_info->snoop_table_size) {
422                 /* Grow table. */
423                 new_snoop_table = krealloc(qp_info->snoop_table,
424                                            sizeof mad_snoop_priv *
425                                            (qp_info->snoop_table_size + 1),
426                                            GFP_ATOMIC);
427                 if (!new_snoop_table) {
428                         i = -ENOMEM;
429                         goto out;
430                 }
431
432                 qp_info->snoop_table = new_snoop_table;
433                 qp_info->snoop_table_size++;
434         }
435         qp_info->snoop_table[i] = mad_snoop_priv;
436         atomic_inc(&qp_info->snoop_count);
437 out:
438         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
439         return i;
440 }
441
442 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
443                                            u8 port_num,
444                                            enum ib_qp_type qp_type,
445                                            int mad_snoop_flags,
446                                            ib_mad_snoop_handler snoop_handler,
447                                            ib_mad_recv_handler recv_handler,
448                                            void *context)
449 {
450         struct ib_mad_port_private *port_priv;
451         struct ib_mad_agent *ret;
452         struct ib_mad_snoop_private *mad_snoop_priv;
453         int qpn;
454
455         /* Validate parameters */
456         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
457             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
458                 ret = ERR_PTR(-EINVAL);
459                 goto error1;
460         }
461         qpn = get_spl_qp_index(qp_type);
462         if (qpn == -1) {
463                 ret = ERR_PTR(-EINVAL);
464                 goto error1;
465         }
466         port_priv = ib_get_mad_port(device, port_num);
467         if (!port_priv) {
468                 ret = ERR_PTR(-ENODEV);
469                 goto error1;
470         }
471         /* Allocate structures */
472         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
473         if (!mad_snoop_priv) {
474                 ret = ERR_PTR(-ENOMEM);
475                 goto error1;
476         }
477
478         /* Now, fill in the various structures */
479         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
480         mad_snoop_priv->agent.device = device;
481         mad_snoop_priv->agent.recv_handler = recv_handler;
482         mad_snoop_priv->agent.snoop_handler = snoop_handler;
483         mad_snoop_priv->agent.context = context;
484         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
485         mad_snoop_priv->agent.port_num = port_num;
486         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
487         init_completion(&mad_snoop_priv->comp);
488         mad_snoop_priv->snoop_index = register_snoop_agent(
489                                                 &port_priv->qp_info[qpn],
490                                                 mad_snoop_priv);
491         if (mad_snoop_priv->snoop_index < 0) {
492                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
493                 goto error2;
494         }
495
496         atomic_set(&mad_snoop_priv->refcount, 1);
497         return &mad_snoop_priv->agent;
498
499 error2:
500         kfree(mad_snoop_priv);
501 error1:
502         return ret;
503 }
504 EXPORT_SYMBOL(ib_register_mad_snoop);
505
506 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
507 {
508         if (atomic_dec_and_test(&mad_agent_priv->refcount))
509                 complete(&mad_agent_priv->comp);
510 }
511
512 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
513 {
514         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
515                 complete(&mad_snoop_priv->comp);
516 }
517
518 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
519 {
520         struct ib_mad_port_private *port_priv;
521         unsigned long flags;
522
523         /* Note that we could still be handling received MADs */
524
525         /*
526          * Canceling all sends results in dropping received response
527          * MADs, preventing us from queuing additional work
528          */
529         cancel_mads(mad_agent_priv);
530         port_priv = mad_agent_priv->qp_info->port_priv;
531         cancel_delayed_work(&mad_agent_priv->timed_work);
532
533         spin_lock_irqsave(&port_priv->reg_lock, flags);
534         remove_mad_reg_req(mad_agent_priv);
535         list_del(&mad_agent_priv->agent_list);
536         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
537
538         flush_workqueue(port_priv->wq);
539         ib_cancel_rmpp_recvs(mad_agent_priv);
540
541         deref_mad_agent(mad_agent_priv);
542         wait_for_completion(&mad_agent_priv->comp);
543
544         kfree(mad_agent_priv->reg_req);
545         ib_dereg_mr(mad_agent_priv->agent.mr);
546         kfree(mad_agent_priv);
547 }
548
549 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
550 {
551         struct ib_mad_qp_info *qp_info;
552         unsigned long flags;
553
554         qp_info = mad_snoop_priv->qp_info;
555         spin_lock_irqsave(&qp_info->snoop_lock, flags);
556         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
557         atomic_dec(&qp_info->snoop_count);
558         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
559
560         deref_snoop_agent(mad_snoop_priv);
561         wait_for_completion(&mad_snoop_priv->comp);
562
563         kfree(mad_snoop_priv);
564 }
565
566 /*
567  * ib_unregister_mad_agent - Unregisters a client from using MAD services
568  */
569 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
570 {
571         struct ib_mad_agent_private *mad_agent_priv;
572         struct ib_mad_snoop_private *mad_snoop_priv;
573
574         /* If the TID is zero, the agent can only snoop. */
575         if (mad_agent->hi_tid) {
576                 mad_agent_priv = container_of(mad_agent,
577                                               struct ib_mad_agent_private,
578                                               agent);
579                 unregister_mad_agent(mad_agent_priv);
580         } else {
581                 mad_snoop_priv = container_of(mad_agent,
582                                               struct ib_mad_snoop_private,
583                                               agent);
584                 unregister_mad_snoop(mad_snoop_priv);
585         }
586         return 0;
587 }
588 EXPORT_SYMBOL(ib_unregister_mad_agent);
589
590 static void dequeue_mad(struct ib_mad_list_head *mad_list)
591 {
592         struct ib_mad_queue *mad_queue;
593         unsigned long flags;
594
595         BUG_ON(!mad_list->mad_queue);
596         mad_queue = mad_list->mad_queue;
597         spin_lock_irqsave(&mad_queue->lock, flags);
598         list_del(&mad_list->list);
599         mad_queue->count--;
600         spin_unlock_irqrestore(&mad_queue->lock, flags);
601 }
602
603 static void snoop_send(struct ib_mad_qp_info *qp_info,
604                        struct ib_mad_send_buf *send_buf,
605                        struct ib_mad_send_wc *mad_send_wc,
606                        int mad_snoop_flags)
607 {
608         struct ib_mad_snoop_private *mad_snoop_priv;
609         unsigned long flags;
610         int i;
611
612         spin_lock_irqsave(&qp_info->snoop_lock, flags);
613         for (i = 0; i < qp_info->snoop_table_size; i++) {
614                 mad_snoop_priv = qp_info->snoop_table[i];
615                 if (!mad_snoop_priv ||
616                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
617                         continue;
618
619                 atomic_inc(&mad_snoop_priv->refcount);
620                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
621                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
622                                                     send_buf, mad_send_wc);
623                 deref_snoop_agent(mad_snoop_priv);
624                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
625         }
626         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
627 }
628
629 static void snoop_recv(struct ib_mad_qp_info *qp_info,
630                        struct ib_mad_recv_wc *mad_recv_wc,
631                        int mad_snoop_flags)
632 {
633         struct ib_mad_snoop_private *mad_snoop_priv;
634         unsigned long flags;
635         int i;
636
637         spin_lock_irqsave(&qp_info->snoop_lock, flags);
638         for (i = 0; i < qp_info->snoop_table_size; i++) {
639                 mad_snoop_priv = qp_info->snoop_table[i];
640                 if (!mad_snoop_priv ||
641                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
642                         continue;
643
644                 atomic_inc(&mad_snoop_priv->refcount);
645                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
646                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
647                                                    mad_recv_wc);
648                 deref_snoop_agent(mad_snoop_priv);
649                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
650         }
651         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
652 }
653
654 static void build_smp_wc(struct ib_qp *qp,
655                          u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
656                          struct ib_wc *wc)
657 {
658         memset(wc, 0, sizeof *wc);
659         wc->wr_id = wr_id;
660         wc->status = IB_WC_SUCCESS;
661         wc->opcode = IB_WC_RECV;
662         wc->pkey_index = pkey_index;
663         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
664         wc->src_qp = IB_QP0;
665         wc->qp = qp;
666         wc->slid = slid;
667         wc->sl = 0;
668         wc->dlid_path_bits = 0;
669         wc->port_num = port_num;
670 }
671
672 /*
673  * Return 0 if SMP is to be sent
674  * Return 1 if SMP was consumed locally (whether or not solicited)
675  * Return < 0 if error
676  */
677 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
678                                   struct ib_mad_send_wr_private *mad_send_wr)
679 {
680         int ret = 0;
681         struct ib_smp *smp = mad_send_wr->send_buf.mad;
682         unsigned long flags;
683         struct ib_mad_local_private *local;
684         struct ib_mad_private *mad_priv;
685         struct ib_mad_port_private *port_priv;
686         struct ib_mad_agent_private *recv_mad_agent = NULL;
687         struct ib_device *device = mad_agent_priv->agent.device;
688         u8 port_num;
689         struct ib_wc mad_wc;
690         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
691
692         if (device->node_type == RDMA_NODE_IB_SWITCH &&
693             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
694                 port_num = send_wr->wr.ud.port_num;
695         else
696                 port_num = mad_agent_priv->agent.port_num;
697
698         /*
699          * Directed route handling starts if the initial LID routed part of
700          * a request or the ending LID routed part of a response is empty.
701          * If we are at the start of the LID routed part, don't update the
702          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
703          */
704         if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
705              IB_LID_PERMISSIVE &&
706              smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
707              IB_SMI_DISCARD) {
708                 ret = -EINVAL;
709                 printk(KERN_ERR PFX "Invalid directed route\n");
710                 goto out;
711         }
712
713         /* Check to post send on QP or process locally */
714         if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
715             smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
716                 goto out;
717
718         local = kmalloc(sizeof *local, GFP_ATOMIC);
719         if (!local) {
720                 ret = -ENOMEM;
721                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
722                 goto out;
723         }
724         local->mad_priv = NULL;
725         local->recv_mad_agent = NULL;
726         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
727         if (!mad_priv) {
728                 ret = -ENOMEM;
729                 printk(KERN_ERR PFX "No memory for local response MAD\n");
730                 kfree(local);
731                 goto out;
732         }
733
734         build_smp_wc(mad_agent_priv->agent.qp,
735                      send_wr->wr_id, be16_to_cpu(smp->dr_slid),
736                      send_wr->wr.ud.pkey_index,
737                      send_wr->wr.ud.port_num, &mad_wc);
738
739         /* No GRH for DR SMP */
740         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
741                                   (struct ib_mad *)smp,
742                                   (struct ib_mad *)&mad_priv->mad);
743         switch (ret)
744         {
745         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
746                 if (ib_response_mad(&mad_priv->mad.mad) &&
747                     mad_agent_priv->agent.recv_handler) {
748                         local->mad_priv = mad_priv;
749                         local->recv_mad_agent = mad_agent_priv;
750                         /*
751                          * Reference MAD agent until receive
752                          * side of local completion handled
753                          */
754                         atomic_inc(&mad_agent_priv->refcount);
755                 } else
756                         kmem_cache_free(ib_mad_cache, mad_priv);
757                 break;
758         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
759                 kmem_cache_free(ib_mad_cache, mad_priv);
760                 break;
761         case IB_MAD_RESULT_SUCCESS:
762                 /* Treat like an incoming receive MAD */
763                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
764                                             mad_agent_priv->agent.port_num);
765                 if (port_priv) {
766                         memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
767                         recv_mad_agent = find_mad_agent(port_priv,
768                                                         &mad_priv->mad.mad);
769                 }
770                 if (!port_priv || !recv_mad_agent) {
771                         /*
772                          * No receiving agent so drop packet and
773                          * generate send completion.
774                          */
775                         kmem_cache_free(ib_mad_cache, mad_priv);
776                         break;
777                 }
778                 local->mad_priv = mad_priv;
779                 local->recv_mad_agent = recv_mad_agent;
780                 break;
781         default:
782                 kmem_cache_free(ib_mad_cache, mad_priv);
783                 kfree(local);
784                 ret = -EINVAL;
785                 goto out;
786         }
787
788         local->mad_send_wr = mad_send_wr;
789         /* Reference MAD agent until send side of local completion handled */
790         atomic_inc(&mad_agent_priv->refcount);
791         /* Queue local completion to local list */
792         spin_lock_irqsave(&mad_agent_priv->lock, flags);
793         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
794         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
795         queue_work(mad_agent_priv->qp_info->port_priv->wq,
796                    &mad_agent_priv->local_work);
797         ret = 1;
798 out:
799         return ret;
800 }
801
802 static int get_pad_size(int hdr_len, int data_len)
803 {
804         int seg_size, pad;
805
806         seg_size = sizeof(struct ib_mad) - hdr_len;
807         if (data_len && seg_size) {
808                 pad = seg_size - data_len % seg_size;
809                 return pad == seg_size ? 0 : pad;
810         } else
811                 return seg_size;
812 }
813
814 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
815 {
816         struct ib_rmpp_segment *s, *t;
817
818         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
819                 list_del(&s->list);
820                 kfree(s);
821         }
822 }
823
824 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
825                                 gfp_t gfp_mask)
826 {
827         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
828         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
829         struct ib_rmpp_segment *seg = NULL;
830         int left, seg_size, pad;
831
832         send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
833         seg_size = send_buf->seg_size;
834         pad = send_wr->pad;
835
836         /* Allocate data segments. */
837         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
838                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
839                 if (!seg) {
840                         printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
841                                "alloc failed for len %zd, gfp %#x\n",
842                                sizeof (*seg) + seg_size, gfp_mask);
843                         free_send_rmpp_list(send_wr);
844                         return -ENOMEM;
845                 }
846                 seg->num = ++send_buf->seg_count;
847                 list_add_tail(&seg->list, &send_wr->rmpp_list);
848         }
849
850         /* Zero any padding */
851         if (pad)
852                 memset(seg->data + seg_size - pad, 0, pad);
853
854         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
855                                           agent.rmpp_version;
856         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
857         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
858
859         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
860                                         struct ib_rmpp_segment, list);
861         send_wr->last_ack_seg = send_wr->cur_seg;
862         return 0;
863 }
864
865 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
866                                             u32 remote_qpn, u16 pkey_index,
867                                             int rmpp_active,
868                                             int hdr_len, int data_len,
869                                             gfp_t gfp_mask)
870 {
871         struct ib_mad_agent_private *mad_agent_priv;
872         struct ib_mad_send_wr_private *mad_send_wr;
873         int pad, message_size, ret, size;
874         void *buf;
875
876         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
877                                       agent);
878         pad = get_pad_size(hdr_len, data_len);
879         message_size = hdr_len + data_len + pad;
880
881         if ((!mad_agent->rmpp_version &&
882              (rmpp_active || message_size > sizeof(struct ib_mad))) ||
883             (!rmpp_active && message_size > sizeof(struct ib_mad)))
884                 return ERR_PTR(-EINVAL);
885
886         size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
887         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
888         if (!buf)
889                 return ERR_PTR(-ENOMEM);
890
891         mad_send_wr = buf + size;
892         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
893         mad_send_wr->send_buf.mad = buf;
894         mad_send_wr->send_buf.hdr_len = hdr_len;
895         mad_send_wr->send_buf.data_len = data_len;
896         mad_send_wr->pad = pad;
897
898         mad_send_wr->mad_agent_priv = mad_agent_priv;
899         mad_send_wr->sg_list[0].length = hdr_len;
900         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
901         mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
902         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
903
904         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
905         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
906         mad_send_wr->send_wr.num_sge = 2;
907         mad_send_wr->send_wr.opcode = IB_WR_SEND;
908         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
909         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
910         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
911         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
912
913         if (rmpp_active) {
914                 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
915                 if (ret) {
916                         kfree(buf);
917                         return ERR_PTR(ret);
918                 }
919         }
920
921         mad_send_wr->send_buf.mad_agent = mad_agent;
922         atomic_inc(&mad_agent_priv->refcount);
923         return &mad_send_wr->send_buf;
924 }
925 EXPORT_SYMBOL(ib_create_send_mad);
926
927 int ib_get_mad_data_offset(u8 mgmt_class)
928 {
929         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
930                 return IB_MGMT_SA_HDR;
931         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
932                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
933                  (mgmt_class == IB_MGMT_CLASS_BIS))
934                 return IB_MGMT_DEVICE_HDR;
935         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
936                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
937                 return IB_MGMT_VENDOR_HDR;
938         else
939                 return IB_MGMT_MAD_HDR;
940 }
941 EXPORT_SYMBOL(ib_get_mad_data_offset);
942
943 int ib_is_mad_class_rmpp(u8 mgmt_class)
944 {
945         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
946             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
947             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
948             (mgmt_class == IB_MGMT_CLASS_BIS) ||
949             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
950              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
951                 return 1;
952         return 0;
953 }
954 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
955
956 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
957 {
958         struct ib_mad_send_wr_private *mad_send_wr;
959         struct list_head *list;
960
961         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
962                                    send_buf);
963         list = &mad_send_wr->cur_seg->list;
964
965         if (mad_send_wr->cur_seg->num < seg_num) {
966                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
967                         if (mad_send_wr->cur_seg->num == seg_num)
968                                 break;
969         } else if (mad_send_wr->cur_seg->num > seg_num) {
970                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
971                         if (mad_send_wr->cur_seg->num == seg_num)
972                                 break;
973         }
974         return mad_send_wr->cur_seg->data;
975 }
976 EXPORT_SYMBOL(ib_get_rmpp_segment);
977
978 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
979 {
980         if (mad_send_wr->send_buf.seg_count)
981                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
982                                            mad_send_wr->seg_num);
983         else
984                 return mad_send_wr->send_buf.mad +
985                        mad_send_wr->send_buf.hdr_len;
986 }
987
988 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
989 {
990         struct ib_mad_agent_private *mad_agent_priv;
991         struct ib_mad_send_wr_private *mad_send_wr;
992
993         mad_agent_priv = container_of(send_buf->mad_agent,
994                                       struct ib_mad_agent_private, agent);
995         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
996                                    send_buf);
997
998         free_send_rmpp_list(mad_send_wr);
999         kfree(send_buf->mad);
1000         deref_mad_agent(mad_agent_priv);
1001 }
1002 EXPORT_SYMBOL(ib_free_send_mad);
1003
1004 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1005 {
1006         struct ib_mad_qp_info *qp_info;
1007         struct list_head *list;
1008         struct ib_send_wr *bad_send_wr;
1009         struct ib_mad_agent *mad_agent;
1010         struct ib_sge *sge;
1011         unsigned long flags;
1012         int ret;
1013
1014         /* Set WR ID to find mad_send_wr upon completion */
1015         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1016         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1017         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1018
1019         mad_agent = mad_send_wr->send_buf.mad_agent;
1020         sge = mad_send_wr->sg_list;
1021         sge[0].addr = ib_dma_map_single(mad_agent->device,
1022                                         mad_send_wr->send_buf.mad,
1023                                         sge[0].length,
1024                                         DMA_TO_DEVICE);
1025         mad_send_wr->header_mapping = sge[0].addr;
1026
1027         sge[1].addr = ib_dma_map_single(mad_agent->device,
1028                                         ib_get_payload(mad_send_wr),
1029                                         sge[1].length,
1030                                         DMA_TO_DEVICE);
1031         mad_send_wr->payload_mapping = sge[1].addr;
1032
1033         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1034         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1035                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1036                                    &bad_send_wr);
1037                 list = &qp_info->send_queue.list;
1038         } else {
1039                 ret = 0;
1040                 list = &qp_info->overflow_list;
1041         }
1042
1043         if (!ret) {
1044                 qp_info->send_queue.count++;
1045                 list_add_tail(&mad_send_wr->mad_list.list, list);
1046         }
1047         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1048         if (ret) {
1049                 ib_dma_unmap_single(mad_agent->device,
1050                                     mad_send_wr->header_mapping,
1051                                     sge[0].length, DMA_TO_DEVICE);
1052                 ib_dma_unmap_single(mad_agent->device,
1053                                     mad_send_wr->payload_mapping,
1054                                     sge[1].length, DMA_TO_DEVICE);
1055         }
1056         return ret;
1057 }
1058
1059 /*
1060  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1061  *  with the registered client
1062  */
1063 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1064                      struct ib_mad_send_buf **bad_send_buf)
1065 {
1066         struct ib_mad_agent_private *mad_agent_priv;
1067         struct ib_mad_send_buf *next_send_buf;
1068         struct ib_mad_send_wr_private *mad_send_wr;
1069         unsigned long flags;
1070         int ret = -EINVAL;
1071
1072         /* Walk list of send WRs and post each on send list */
1073         for (; send_buf; send_buf = next_send_buf) {
1074
1075                 mad_send_wr = container_of(send_buf,
1076                                            struct ib_mad_send_wr_private,
1077                                            send_buf);
1078                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1079
1080                 if (!send_buf->mad_agent->send_handler ||
1081                     (send_buf->timeout_ms &&
1082                      !send_buf->mad_agent->recv_handler)) {
1083                         ret = -EINVAL;
1084                         goto error;
1085                 }
1086
1087                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1088                         if (mad_agent_priv->agent.rmpp_version) {
1089                                 ret = -EINVAL;
1090                                 goto error;
1091                         }
1092                 }
1093
1094                 /*
1095                  * Save pointer to next work request to post in case the
1096                  * current one completes, and the user modifies the work
1097                  * request associated with the completion
1098                  */
1099                 next_send_buf = send_buf->next;
1100                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1101
1102                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1103                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1104                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1105                                                      mad_send_wr);
1106                         if (ret < 0)            /* error */
1107                                 goto error;
1108                         else if (ret == 1)      /* locally consumed */
1109                                 continue;
1110                 }
1111
1112                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1113                 /* Timeout will be updated after send completes */
1114                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1115                 mad_send_wr->max_retries = send_buf->retries;
1116                 mad_send_wr->retries_left = send_buf->retries;
1117                 send_buf->retries = 0;
1118                 /* Reference for work request to QP + response */
1119                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1120                 mad_send_wr->status = IB_WC_SUCCESS;
1121
1122                 /* Reference MAD agent until send completes */
1123                 atomic_inc(&mad_agent_priv->refcount);
1124                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1125                 list_add_tail(&mad_send_wr->agent_list,
1126                               &mad_agent_priv->send_list);
1127                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1128
1129                 if (mad_agent_priv->agent.rmpp_version) {
1130                         ret = ib_send_rmpp_mad(mad_send_wr);
1131                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1132                                 ret = ib_send_mad(mad_send_wr);
1133                 } else
1134                         ret = ib_send_mad(mad_send_wr);
1135                 if (ret < 0) {
1136                         /* Fail send request */
1137                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1138                         list_del(&mad_send_wr->agent_list);
1139                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1140                         atomic_dec(&mad_agent_priv->refcount);
1141                         goto error;
1142                 }
1143         }
1144         return 0;
1145 error:
1146         if (bad_send_buf)
1147                 *bad_send_buf = send_buf;
1148         return ret;
1149 }
1150 EXPORT_SYMBOL(ib_post_send_mad);
1151
1152 /*
1153  * ib_free_recv_mad - Returns data buffers used to receive
1154  *  a MAD to the access layer
1155  */
1156 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1157 {
1158         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1159         struct ib_mad_private_header *mad_priv_hdr;
1160         struct ib_mad_private *priv;
1161         struct list_head free_list;
1162
1163         INIT_LIST_HEAD(&free_list);
1164         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1165
1166         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1167                                         &free_list, list) {
1168                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1169                                            recv_buf);
1170                 mad_priv_hdr = container_of(mad_recv_wc,
1171                                             struct ib_mad_private_header,
1172                                             recv_wc);
1173                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1174                                     header);
1175                 kmem_cache_free(ib_mad_cache, priv);
1176         }
1177 }
1178 EXPORT_SYMBOL(ib_free_recv_mad);
1179
1180 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1181                                         u8 rmpp_version,
1182                                         ib_mad_send_handler send_handler,
1183                                         ib_mad_recv_handler recv_handler,
1184                                         void *context)
1185 {
1186         return ERR_PTR(-EINVAL);        /* XXX: for now */
1187 }
1188 EXPORT_SYMBOL(ib_redirect_mad_qp);
1189
1190 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1191                       struct ib_wc *wc)
1192 {
1193         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1194         return 0;
1195 }
1196 EXPORT_SYMBOL(ib_process_mad_wc);
1197
1198 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1199                          struct ib_mad_reg_req *mad_reg_req)
1200 {
1201         int i;
1202
1203         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1204                 if ((*method)->agent[i]) {
1205                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1206                         return -EINVAL;
1207                 }
1208         }
1209         return 0;
1210 }
1211
1212 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1213 {
1214         /* Allocate management method table */
1215         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1216         if (!*method) {
1217                 printk(KERN_ERR PFX "No memory for "
1218                        "ib_mad_mgmt_method_table\n");
1219                 return -ENOMEM;
1220         }
1221
1222         return 0;
1223 }
1224
1225 /*
1226  * Check to see if there are any methods still in use
1227  */
1228 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1229 {
1230         int i;
1231
1232         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1233                 if (method->agent[i])
1234                         return 1;
1235         return 0;
1236 }
1237
1238 /*
1239  * Check to see if there are any method tables for this class still in use
1240  */
1241 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1242 {
1243         int i;
1244
1245         for (i = 0; i < MAX_MGMT_CLASS; i++)
1246                 if (class->method_table[i])
1247                         return 1;
1248         return 0;
1249 }
1250
1251 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1252 {
1253         int i;
1254
1255         for (i = 0; i < MAX_MGMT_OUI; i++)
1256                 if (vendor_class->method_table[i])
1257                         return 1;
1258         return 0;
1259 }
1260
1261 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1262                            char *oui)
1263 {
1264         int i;
1265
1266         for (i = 0; i < MAX_MGMT_OUI; i++)
1267                 /* Is there matching OUI for this vendor class ? */
1268                 if (!memcmp(vendor_class->oui[i], oui, 3))
1269                         return i;
1270
1271         return -1;
1272 }
1273
1274 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1275 {
1276         int i;
1277
1278         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1279                 if (vendor->vendor_class[i])
1280                         return 1;
1281
1282         return 0;
1283 }
1284
1285 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1286                                      struct ib_mad_agent_private *agent)
1287 {
1288         int i;
1289
1290         /* Remove any methods for this mad agent */
1291         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1292                 if (method->agent[i] == agent) {
1293                         method->agent[i] = NULL;
1294                 }
1295         }
1296 }
1297
1298 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1299                               struct ib_mad_agent_private *agent_priv,
1300                               u8 mgmt_class)
1301 {
1302         struct ib_mad_port_private *port_priv;
1303         struct ib_mad_mgmt_class_table **class;
1304         struct ib_mad_mgmt_method_table **method;
1305         int i, ret;
1306
1307         port_priv = agent_priv->qp_info->port_priv;
1308         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1309         if (!*class) {
1310                 /* Allocate management class table for "new" class version */
1311                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1312                 if (!*class) {
1313                         printk(KERN_ERR PFX "No memory for "
1314                                "ib_mad_mgmt_class_table\n");
1315                         ret = -ENOMEM;
1316                         goto error1;
1317                 }
1318
1319                 /* Allocate method table for this management class */
1320                 method = &(*class)->method_table[mgmt_class];
1321                 if ((ret = allocate_method_table(method)))
1322                         goto error2;
1323         } else {
1324                 method = &(*class)->method_table[mgmt_class];
1325                 if (!*method) {
1326                         /* Allocate method table for this management class */
1327                         if ((ret = allocate_method_table(method)))
1328                                 goto error1;
1329                 }
1330         }
1331
1332         /* Now, make sure methods are not already in use */
1333         if (method_in_use(method, mad_reg_req))
1334                 goto error3;
1335
1336         /* Finally, add in methods being registered */
1337         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1338                 (*method)->agent[i] = agent_priv;
1339
1340         return 0;
1341
1342 error3:
1343         /* Remove any methods for this mad agent */
1344         remove_methods_mad_agent(*method, agent_priv);
1345         /* Now, check to see if there are any methods in use */
1346         if (!check_method_table(*method)) {
1347                 /* If not, release management method table */
1348                 kfree(*method);
1349                 *method = NULL;
1350         }
1351         ret = -EINVAL;
1352         goto error1;
1353 error2:
1354         kfree(*class);
1355         *class = NULL;
1356 error1:
1357         return ret;
1358 }
1359
1360 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1361                            struct ib_mad_agent_private *agent_priv)
1362 {
1363         struct ib_mad_port_private *port_priv;
1364         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1365         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1366         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1367         struct ib_mad_mgmt_method_table **method;
1368         int i, ret = -ENOMEM;
1369         u8 vclass;
1370
1371         /* "New" vendor (with OUI) class */
1372         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1373         port_priv = agent_priv->qp_info->port_priv;
1374         vendor_table = &port_priv->version[
1375                                 mad_reg_req->mgmt_class_version].vendor;
1376         if (!*vendor_table) {
1377                 /* Allocate mgmt vendor class table for "new" class version */
1378                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1379                 if (!vendor) {
1380                         printk(KERN_ERR PFX "No memory for "
1381                                "ib_mad_mgmt_vendor_class_table\n");
1382                         goto error1;
1383                 }
1384
1385                 *vendor_table = vendor;
1386         }
1387         if (!(*vendor_table)->vendor_class[vclass]) {
1388                 /* Allocate table for this management vendor class */
1389                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1390                 if (!vendor_class) {
1391                         printk(KERN_ERR PFX "No memory for "
1392                                "ib_mad_mgmt_vendor_class\n");
1393                         goto error2;
1394                 }
1395
1396                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1397         }
1398         for (i = 0; i < MAX_MGMT_OUI; i++) {
1399                 /* Is there matching OUI for this vendor class ? */
1400                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1401                             mad_reg_req->oui, 3)) {
1402                         method = &(*vendor_table)->vendor_class[
1403                                                 vclass]->method_table[i];
1404                         BUG_ON(!*method);
1405                         goto check_in_use;
1406                 }
1407         }
1408         for (i = 0; i < MAX_MGMT_OUI; i++) {
1409                 /* OUI slot available ? */
1410                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1411                                 vclass]->oui[i])) {
1412                         method = &(*vendor_table)->vendor_class[
1413                                 vclass]->method_table[i];
1414                         BUG_ON(*method);
1415                         /* Allocate method table for this OUI */
1416                         if ((ret = allocate_method_table(method)))
1417                                 goto error3;
1418                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1419                                mad_reg_req->oui, 3);
1420                         goto check_in_use;
1421                 }
1422         }
1423         printk(KERN_ERR PFX "All OUI slots in use\n");
1424         goto error3;
1425
1426 check_in_use:
1427         /* Now, make sure methods are not already in use */
1428         if (method_in_use(method, mad_reg_req))
1429                 goto error4;
1430
1431         /* Finally, add in methods being registered */
1432         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1433                 (*method)->agent[i] = agent_priv;
1434
1435         return 0;
1436
1437 error4:
1438         /* Remove any methods for this mad agent */
1439         remove_methods_mad_agent(*method, agent_priv);
1440         /* Now, check to see if there are any methods in use */
1441         if (!check_method_table(*method)) {
1442                 /* If not, release management method table */
1443                 kfree(*method);
1444                 *method = NULL;
1445         }
1446         ret = -EINVAL;
1447 error3:
1448         if (vendor_class) {
1449                 (*vendor_table)->vendor_class[vclass] = NULL;
1450                 kfree(vendor_class);
1451         }
1452 error2:
1453         if (vendor) {
1454                 *vendor_table = NULL;
1455                 kfree(vendor);
1456         }
1457 error1:
1458         return ret;
1459 }
1460
1461 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1462 {
1463         struct ib_mad_port_private *port_priv;
1464         struct ib_mad_mgmt_class_table *class;
1465         struct ib_mad_mgmt_method_table *method;
1466         struct ib_mad_mgmt_vendor_class_table *vendor;
1467         struct ib_mad_mgmt_vendor_class *vendor_class;
1468         int index;
1469         u8 mgmt_class;
1470
1471         /*
1472          * Was MAD registration request supplied
1473          * with original registration ?
1474          */
1475         if (!agent_priv->reg_req) {
1476                 goto out;
1477         }
1478
1479         port_priv = agent_priv->qp_info->port_priv;
1480         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1481         class = port_priv->version[
1482                         agent_priv->reg_req->mgmt_class_version].class;
1483         if (!class)
1484                 goto vendor_check;
1485
1486         method = class->method_table[mgmt_class];
1487         if (method) {
1488                 /* Remove any methods for this mad agent */
1489                 remove_methods_mad_agent(method, agent_priv);
1490                 /* Now, check to see if there are any methods still in use */
1491                 if (!check_method_table(method)) {
1492                         /* If not, release management method table */
1493                          kfree(method);
1494                          class->method_table[mgmt_class] = NULL;
1495                          /* Any management classes left ? */
1496                         if (!check_class_table(class)) {
1497                                 /* If not, release management class table */
1498                                 kfree(class);
1499                                 port_priv->version[
1500                                         agent_priv->reg_req->
1501                                         mgmt_class_version].class = NULL;
1502                         }
1503                 }
1504         }
1505
1506 vendor_check:
1507         if (!is_vendor_class(mgmt_class))
1508                 goto out;
1509
1510         /* normalize mgmt_class to vendor range 2 */
1511         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1512         vendor = port_priv->version[
1513                         agent_priv->reg_req->mgmt_class_version].vendor;
1514
1515         if (!vendor)
1516                 goto out;
1517
1518         vendor_class = vendor->vendor_class[mgmt_class];
1519         if (vendor_class) {
1520                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1521                 if (index < 0)
1522                         goto out;
1523                 method = vendor_class->method_table[index];
1524                 if (method) {
1525                         /* Remove any methods for this mad agent */
1526                         remove_methods_mad_agent(method, agent_priv);
1527                         /*
1528                          * Now, check to see if there are
1529                          * any methods still in use
1530                          */
1531                         if (!check_method_table(method)) {
1532                                 /* If not, release management method table */
1533                                 kfree(method);
1534                                 vendor_class->method_table[index] = NULL;
1535                                 memset(vendor_class->oui[index], 0, 3);
1536                                 /* Any OUIs left ? */
1537                                 if (!check_vendor_class(vendor_class)) {
1538                                         /* If not, release vendor class table */
1539                                         kfree(vendor_class);
1540                                         vendor->vendor_class[mgmt_class] = NULL;
1541                                         /* Any other vendor classes left ? */
1542                                         if (!check_vendor_table(vendor)) {
1543                                                 kfree(vendor);
1544                                                 port_priv->version[
1545                                                         agent_priv->reg_req->
1546                                                         mgmt_class_version].
1547                                                         vendor = NULL;
1548                                         }
1549                                 }
1550                         }
1551                 }
1552         }
1553
1554 out:
1555         return;
1556 }
1557
1558 static struct ib_mad_agent_private *
1559 find_mad_agent(struct ib_mad_port_private *port_priv,
1560                struct ib_mad *mad)
1561 {
1562         struct ib_mad_agent_private *mad_agent = NULL;
1563         unsigned long flags;
1564
1565         spin_lock_irqsave(&port_priv->reg_lock, flags);
1566         if (ib_response_mad(mad)) {
1567                 u32 hi_tid;
1568                 struct ib_mad_agent_private *entry;
1569
1570                 /*
1571                  * Routing is based on high 32 bits of transaction ID
1572                  * of MAD.
1573                  */
1574                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1575                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1576                         if (entry->agent.hi_tid == hi_tid) {
1577                                 mad_agent = entry;
1578                                 break;
1579                         }
1580                 }
1581         } else {
1582                 struct ib_mad_mgmt_class_table *class;
1583                 struct ib_mad_mgmt_method_table *method;
1584                 struct ib_mad_mgmt_vendor_class_table *vendor;
1585                 struct ib_mad_mgmt_vendor_class *vendor_class;
1586                 struct ib_vendor_mad *vendor_mad;
1587                 int index;
1588
1589                 /*
1590                  * Routing is based on version, class, and method
1591                  * For "newer" vendor MADs, also based on OUI
1592                  */
1593                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1594                         goto out;
1595                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1596                         class = port_priv->version[
1597                                         mad->mad_hdr.class_version].class;
1598                         if (!class)
1599                                 goto out;
1600                         if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
1601                             IB_MGMT_MAX_METHODS)
1602                                 goto out;
1603                         method = class->method_table[convert_mgmt_class(
1604                                                         mad->mad_hdr.mgmt_class)];
1605                         if (method)
1606                                 mad_agent = method->agent[mad->mad_hdr.method &
1607                                                           ~IB_MGMT_METHOD_RESP];
1608                 } else {
1609                         vendor = port_priv->version[
1610                                         mad->mad_hdr.class_version].vendor;
1611                         if (!vendor)
1612                                 goto out;
1613                         vendor_class = vendor->vendor_class[vendor_class_index(
1614                                                 mad->mad_hdr.mgmt_class)];
1615                         if (!vendor_class)
1616                                 goto out;
1617                         /* Find matching OUI */
1618                         vendor_mad = (struct ib_vendor_mad *)mad;
1619                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1620                         if (index == -1)
1621                                 goto out;
1622                         method = vendor_class->method_table[index];
1623                         if (method) {
1624                                 mad_agent = method->agent[mad->mad_hdr.method &
1625                                                           ~IB_MGMT_METHOD_RESP];
1626                         }
1627                 }
1628         }
1629
1630         if (mad_agent) {
1631                 if (mad_agent->agent.recv_handler)
1632                         atomic_inc(&mad_agent->refcount);
1633                 else {
1634                         printk(KERN_NOTICE PFX "No receive handler for client "
1635                                "%p on port %d\n",
1636                                &mad_agent->agent, port_priv->port_num);
1637                         mad_agent = NULL;
1638                 }
1639         }
1640 out:
1641         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1642
1643         return mad_agent;
1644 }
1645
1646 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1647 {
1648         int valid = 0;
1649
1650         /* Make sure MAD base version is understood */
1651         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1652                 printk(KERN_ERR PFX "MAD received with unsupported base "
1653                        "version %d\n", mad->mad_hdr.base_version);
1654                 goto out;
1655         }
1656
1657         /* Filter SMI packets sent to other than QP0 */
1658         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1659             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1660                 if (qp_num == 0)
1661                         valid = 1;
1662         } else {
1663                 /* Filter GSI packets sent to QP0 */
1664                 if (qp_num != 0)
1665                         valid = 1;
1666         }
1667
1668 out:
1669         return valid;
1670 }
1671
1672 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1673                        struct ib_mad_hdr *mad_hdr)
1674 {
1675         struct ib_rmpp_mad *rmpp_mad;
1676
1677         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1678         return !mad_agent_priv->agent.rmpp_version ||
1679                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1680                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1681                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1682 }
1683
1684 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1685                                      struct ib_mad_recv_wc *rwc)
1686 {
1687         return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1688                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1689 }
1690
1691 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1692                                    struct ib_mad_send_wr_private *wr,
1693                                    struct ib_mad_recv_wc *rwc )
1694 {
1695         struct ib_ah_attr attr;
1696         u8 send_resp, rcv_resp;
1697         union ib_gid sgid;
1698         struct ib_device *device = mad_agent_priv->agent.device;
1699         u8 port_num = mad_agent_priv->agent.port_num;
1700         u8 lmc;
1701
1702         send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1703         rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1704
1705         if (send_resp == rcv_resp)
1706                 /* both requests, or both responses. GIDs different */
1707                 return 0;
1708
1709         if (ib_query_ah(wr->send_buf.ah, &attr))
1710                 /* Assume not equal, to avoid false positives. */
1711                 return 0;
1712
1713         if (!!(attr.ah_flags & IB_AH_GRH) !=
1714             !!(rwc->wc->wc_flags & IB_WC_GRH))
1715                 /* one has GID, other does not.  Assume different */
1716                 return 0;
1717
1718         if (!send_resp && rcv_resp) {
1719                 /* is request/response. */
1720                 if (!(attr.ah_flags & IB_AH_GRH)) {
1721                         if (ib_get_cached_lmc(device, port_num, &lmc))
1722                                 return 0;
1723                         return (!lmc || !((attr.src_path_bits ^
1724                                            rwc->wc->dlid_path_bits) &
1725                                           ((1 << lmc) - 1)));
1726                 } else {
1727                         if (ib_get_cached_gid(device, port_num,
1728                                               attr.grh.sgid_index, &sgid))
1729                                 return 0;
1730                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1731                                        16);
1732                 }
1733         }
1734
1735         if (!(attr.ah_flags & IB_AH_GRH))
1736                 return attr.dlid == rwc->wc->slid;
1737         else
1738                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1739                                16);
1740 }
1741
1742 static inline int is_direct(u8 class)
1743 {
1744         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1745 }
1746
1747 struct ib_mad_send_wr_private*
1748 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1749                  struct ib_mad_recv_wc *wc)
1750 {
1751         struct ib_mad_send_wr_private *wr;
1752         struct ib_mad *mad;
1753
1754         mad = (struct ib_mad *)wc->recv_buf.mad;
1755
1756         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1757                 if ((wr->tid == mad->mad_hdr.tid) &&
1758                     rcv_has_same_class(wr, wc) &&
1759                     /*
1760                      * Don't check GID for direct routed MADs.
1761                      * These might have permissive LIDs.
1762                      */
1763                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1764                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1765                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1766         }
1767
1768         /*
1769          * It's possible to receive the response before we've
1770          * been notified that the send has completed
1771          */
1772         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1773                 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1774                     wr->tid == mad->mad_hdr.tid &&
1775                     wr->timeout &&
1776                     rcv_has_same_class(wr, wc) &&
1777                     /*
1778                      * Don't check GID for direct routed MADs.
1779                      * These might have permissive LIDs.
1780                      */
1781                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1782                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1783                         /* Verify request has not been canceled */
1784                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1785         }
1786         return NULL;
1787 }
1788
1789 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1790 {
1791         mad_send_wr->timeout = 0;
1792         if (mad_send_wr->refcount == 1)
1793                 list_move_tail(&mad_send_wr->agent_list,
1794                               &mad_send_wr->mad_agent_priv->done_list);
1795 }
1796
1797 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1798                                  struct ib_mad_recv_wc *mad_recv_wc)
1799 {
1800         struct ib_mad_send_wr_private *mad_send_wr;
1801         struct ib_mad_send_wc mad_send_wc;
1802         unsigned long flags;
1803
1804         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1805         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1806         if (mad_agent_priv->agent.rmpp_version) {
1807                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1808                                                       mad_recv_wc);
1809                 if (!mad_recv_wc) {
1810                         deref_mad_agent(mad_agent_priv);
1811                         return;
1812                 }
1813         }
1814
1815         /* Complete corresponding request */
1816         if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1817                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1818                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1819                 if (!mad_send_wr) {
1820                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1821                         ib_free_recv_mad(mad_recv_wc);
1822                         deref_mad_agent(mad_agent_priv);
1823                         return;
1824                 }
1825                 ib_mark_mad_done(mad_send_wr);
1826                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1827
1828                 /* Defined behavior is to complete response before request */
1829                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1830                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1831                                                    mad_recv_wc);
1832                 atomic_dec(&mad_agent_priv->refcount);
1833
1834                 mad_send_wc.status = IB_WC_SUCCESS;
1835                 mad_send_wc.vendor_err = 0;
1836                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1837                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1838         } else {
1839                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1840                                                    mad_recv_wc);
1841                 deref_mad_agent(mad_agent_priv);
1842         }
1843 }
1844
1845 static bool generate_unmatched_resp(struct ib_mad_private *recv,
1846                                     struct ib_mad_private *response)
1847 {
1848         if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1849             recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1850                 memcpy(response, recv, sizeof *response);
1851                 response->header.recv_wc.wc = &response->header.wc;
1852                 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1853                 response->header.recv_wc.recv_buf.grh = &response->grh;
1854                 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1855                 response->mad.mad.mad_hdr.status =
1856                         cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1857
1858                 return true;
1859         } else {
1860                 return false;
1861         }
1862 }
1863 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1864                                      struct ib_wc *wc)
1865 {
1866         struct ib_mad_qp_info *qp_info;
1867         struct ib_mad_private_header *mad_priv_hdr;
1868         struct ib_mad_private *recv, *response = NULL;
1869         struct ib_mad_list_head *mad_list;
1870         struct ib_mad_agent_private *mad_agent;
1871         int port_num;
1872
1873         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1874         qp_info = mad_list->mad_queue->qp_info;
1875         dequeue_mad(mad_list);
1876
1877         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1878                                     mad_list);
1879         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1880         ib_dma_unmap_single(port_priv->device,
1881                             recv->header.mapping,
1882                             sizeof(struct ib_mad_private) -
1883                               sizeof(struct ib_mad_private_header),
1884                             DMA_FROM_DEVICE);
1885
1886         /* Setup MAD receive work completion from "normal" work completion */
1887         recv->header.wc = *wc;
1888         recv->header.recv_wc.wc = &recv->header.wc;
1889         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1890         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1891         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1892
1893         if (atomic_read(&qp_info->snoop_count))
1894                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1895
1896         /* Validate MAD */
1897         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1898                 goto out;
1899
1900         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1901         if (!response) {
1902                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1903                        "for response buffer\n");
1904                 goto out;
1905         }
1906
1907         if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1908                 port_num = wc->port_num;
1909         else
1910                 port_num = port_priv->port_num;
1911
1912         if (recv->mad.mad.mad_hdr.mgmt_class ==
1913             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1914                 enum smi_forward_action retsmi;
1915
1916                 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1917                                            port_priv->device->node_type,
1918                                            port_num,
1919                                            port_priv->device->phys_port_cnt) ==
1920                                            IB_SMI_DISCARD)
1921                         goto out;
1922
1923                 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1924                 if (retsmi == IB_SMI_LOCAL)
1925                         goto local;
1926
1927                 if (retsmi == IB_SMI_SEND) { /* don't forward */
1928                         if (smi_handle_dr_smp_send(&recv->mad.smp,
1929                                                    port_priv->device->node_type,
1930                                                    port_num) == IB_SMI_DISCARD)
1931                                 goto out;
1932
1933                         if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1934                                 goto out;
1935                 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1936                         /* forward case for switches */
1937                         memcpy(response, recv, sizeof(*response));
1938                         response->header.recv_wc.wc = &response->header.wc;
1939                         response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1940                         response->header.recv_wc.recv_buf.grh = &response->grh;
1941
1942                         agent_send_response(&response->mad.mad,
1943                                             &response->grh, wc,
1944                                             port_priv->device,
1945                                             smi_get_fwd_port(&recv->mad.smp),
1946                                             qp_info->qp->qp_num);
1947
1948                         goto out;
1949                 }
1950         }
1951
1952 local:
1953         /* Give driver "right of first refusal" on incoming MAD */
1954         if (port_priv->device->process_mad) {
1955                 int ret;
1956
1957                 ret = port_priv->device->process_mad(port_priv->device, 0,
1958                                                      port_priv->port_num,
1959                                                      wc, &recv->grh,
1960                                                      &recv->mad.mad,
1961                                                      &response->mad.mad);
1962                 if (ret & IB_MAD_RESULT_SUCCESS) {
1963                         if (ret & IB_MAD_RESULT_CONSUMED)
1964                                 goto out;
1965                         if (ret & IB_MAD_RESULT_REPLY) {
1966                                 agent_send_response(&response->mad.mad,
1967                                                     &recv->grh, wc,
1968                                                     port_priv->device,
1969                                                     port_num,
1970                                                     qp_info->qp->qp_num);
1971                                 goto out;
1972                         }
1973                 }
1974         }
1975
1976         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1977         if (mad_agent) {
1978                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1979                 /*
1980                  * recv is freed up in error cases in ib_mad_complete_recv
1981                  * or via recv_handler in ib_mad_complete_recv()
1982                  */
1983                 recv = NULL;
1984         } else if (generate_unmatched_resp(recv, response)) {
1985                 agent_send_response(&response->mad.mad, &recv->grh, wc,
1986                                     port_priv->device, port_num, qp_info->qp->qp_num);
1987         }
1988
1989 out:
1990         /* Post another receive request for this QP */
1991         if (response) {
1992                 ib_mad_post_receive_mads(qp_info, response);
1993                 if (recv)
1994                         kmem_cache_free(ib_mad_cache, recv);
1995         } else
1996                 ib_mad_post_receive_mads(qp_info, recv);
1997 }
1998
1999 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2000 {
2001         struct ib_mad_send_wr_private *mad_send_wr;
2002         unsigned long delay;
2003
2004         if (list_empty(&mad_agent_priv->wait_list)) {
2005                 __cancel_delayed_work(&mad_agent_priv->timed_work);
2006         } else {
2007                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2008                                          struct ib_mad_send_wr_private,
2009                                          agent_list);
2010
2011                 if (time_after(mad_agent_priv->timeout,
2012                                mad_send_wr->timeout)) {
2013                         mad_agent_priv->timeout = mad_send_wr->timeout;
2014                         __cancel_delayed_work(&mad_agent_priv->timed_work);
2015                         delay = mad_send_wr->timeout - jiffies;
2016                         if ((long)delay <= 0)
2017                                 delay = 1;
2018                         queue_delayed_work(mad_agent_priv->qp_info->
2019                                            port_priv->wq,
2020                                            &mad_agent_priv->timed_work, delay);
2021                 }
2022         }
2023 }
2024
2025 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2026 {
2027         struct ib_mad_agent_private *mad_agent_priv;
2028         struct ib_mad_send_wr_private *temp_mad_send_wr;
2029         struct list_head *list_item;
2030         unsigned long delay;
2031
2032         mad_agent_priv = mad_send_wr->mad_agent_priv;
2033         list_del(&mad_send_wr->agent_list);
2034
2035         delay = mad_send_wr->timeout;
2036         mad_send_wr->timeout += jiffies;
2037
2038         if (delay) {
2039                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2040                         temp_mad_send_wr = list_entry(list_item,
2041                                                 struct ib_mad_send_wr_private,
2042                                                 agent_list);
2043                         if (time_after(mad_send_wr->timeout,
2044                                        temp_mad_send_wr->timeout))
2045                                 break;
2046                 }
2047         }
2048         else
2049                 list_item = &mad_agent_priv->wait_list;
2050         list_add(&mad_send_wr->agent_list, list_item);
2051
2052         /* Reschedule a work item if we have a shorter timeout */
2053         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2054                 __cancel_delayed_work(&mad_agent_priv->timed_work);
2055                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2056                                    &mad_agent_priv->timed_work, delay);
2057         }
2058 }
2059
2060 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2061                           int timeout_ms)
2062 {
2063         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2064         wait_for_response(mad_send_wr);
2065 }
2066
2067 /*
2068  * Process a send work completion
2069  */
2070 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2071                              struct ib_mad_send_wc *mad_send_wc)
2072 {
2073         struct ib_mad_agent_private     *mad_agent_priv;
2074         unsigned long                   flags;
2075         int                             ret;
2076
2077         mad_agent_priv = mad_send_wr->mad_agent_priv;
2078         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2079         if (mad_agent_priv->agent.rmpp_version) {
2080                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2081                 if (ret == IB_RMPP_RESULT_CONSUMED)
2082                         goto done;
2083         } else
2084                 ret = IB_RMPP_RESULT_UNHANDLED;
2085
2086         if (mad_send_wc->status != IB_WC_SUCCESS &&
2087             mad_send_wr->status == IB_WC_SUCCESS) {
2088                 mad_send_wr->status = mad_send_wc->status;
2089                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2090         }
2091
2092         if (--mad_send_wr->refcount > 0) {
2093                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2094                     mad_send_wr->status == IB_WC_SUCCESS) {
2095                         wait_for_response(mad_send_wr);
2096                 }
2097                 goto done;
2098         }
2099
2100         /* Remove send from MAD agent and notify client of completion */
2101         list_del(&mad_send_wr->agent_list);
2102         adjust_timeout(mad_agent_priv);
2103         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2104
2105         if (mad_send_wr->status != IB_WC_SUCCESS )
2106                 mad_send_wc->status = mad_send_wr->status;
2107         if (ret == IB_RMPP_RESULT_INTERNAL)
2108                 ib_rmpp_send_handler(mad_send_wc);
2109         else
2110                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2111                                                    mad_send_wc);
2112
2113         /* Release reference on agent taken when sending */
2114         deref_mad_agent(mad_agent_priv);
2115         return;
2116 done:
2117         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2118 }
2119
2120 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2121                                      struct ib_wc *wc)
2122 {
2123         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2124         struct ib_mad_list_head         *mad_list;
2125         struct ib_mad_qp_info           *qp_info;
2126         struct ib_mad_queue             *send_queue;
2127         struct ib_send_wr               *bad_send_wr;
2128         struct ib_mad_send_wc           mad_send_wc;
2129         unsigned long flags;
2130         int ret;
2131
2132         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2133         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2134                                    mad_list);
2135         send_queue = mad_list->mad_queue;
2136         qp_info = send_queue->qp_info;
2137
2138 retry:
2139         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2140                             mad_send_wr->header_mapping,
2141                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2142         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2143                             mad_send_wr->payload_mapping,
2144                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2145         queued_send_wr = NULL;
2146         spin_lock_irqsave(&send_queue->lock, flags);
2147         list_del(&mad_list->list);
2148
2149         /* Move queued send to the send queue */
2150         if (send_queue->count-- > send_queue->max_active) {
2151                 mad_list = container_of(qp_info->overflow_list.next,
2152                                         struct ib_mad_list_head, list);
2153                 queued_send_wr = container_of(mad_list,
2154                                         struct ib_mad_send_wr_private,
2155                                         mad_list);
2156                 list_move_tail(&mad_list->list, &send_queue->list);
2157         }
2158         spin_unlock_irqrestore(&send_queue->lock, flags);
2159
2160         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2161         mad_send_wc.status = wc->status;
2162         mad_send_wc.vendor_err = wc->vendor_err;
2163         if (atomic_read(&qp_info->snoop_count))
2164                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2165                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2166         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2167
2168         if (queued_send_wr) {
2169                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2170                                    &bad_send_wr);
2171                 if (ret) {
2172                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2173                         mad_send_wr = queued_send_wr;
2174                         wc->status = IB_WC_LOC_QP_OP_ERR;
2175                         goto retry;
2176                 }
2177         }
2178 }
2179
2180 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2181 {
2182         struct ib_mad_send_wr_private *mad_send_wr;
2183         struct ib_mad_list_head *mad_list;
2184         unsigned long flags;
2185
2186         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2187         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2188                 mad_send_wr = container_of(mad_list,
2189                                            struct ib_mad_send_wr_private,
2190                                            mad_list);
2191                 mad_send_wr->retry = 1;
2192         }
2193         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2194 }
2195
2196 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2197                               struct ib_wc *wc)
2198 {
2199         struct ib_mad_list_head *mad_list;
2200         struct ib_mad_qp_info *qp_info;
2201         struct ib_mad_send_wr_private *mad_send_wr;
2202         int ret;
2203
2204         /* Determine if failure was a send or receive */
2205         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2206         qp_info = mad_list->mad_queue->qp_info;
2207         if (mad_list->mad_queue == &qp_info->recv_queue)
2208                 /*
2209                  * Receive errors indicate that the QP has entered the error
2210                  * state - error handling/shutdown code will cleanup
2211                  */
2212                 return;
2213
2214         /*
2215          * Send errors will transition the QP to SQE - move
2216          * QP to RTS and repost flushed work requests
2217          */
2218         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2219                                    mad_list);
2220         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2221                 if (mad_send_wr->retry) {
2222                         /* Repost send */
2223                         struct ib_send_wr *bad_send_wr;
2224
2225                         mad_send_wr->retry = 0;
2226                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2227                                         &bad_send_wr);
2228                         if (ret)
2229                                 ib_mad_send_done_handler(port_priv, wc);
2230                 } else
2231                         ib_mad_send_done_handler(port_priv, wc);
2232         } else {
2233                 struct ib_qp_attr *attr;
2234
2235                 /* Transition QP to RTS and fail offending send */
2236                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2237                 if (attr) {
2238                         attr->qp_state = IB_QPS_RTS;
2239                         attr->cur_qp_state = IB_QPS_SQE;
2240                         ret = ib_modify_qp(qp_info->qp, attr,
2241                                            IB_QP_STATE | IB_QP_CUR_STATE);
2242                         kfree(attr);
2243                         if (ret)
2244                                 printk(KERN_ERR PFX "mad_error_handler - "
2245                                        "ib_modify_qp to RTS : %d\n", ret);
2246                         else
2247                                 mark_sends_for_retry(qp_info);
2248                 }
2249                 ib_mad_send_done_handler(port_priv, wc);
2250         }
2251 }
2252
2253 /*
2254  * IB MAD completion callback
2255  */
2256 static void ib_mad_completion_handler(struct work_struct *work)
2257 {
2258         struct ib_mad_port_private *port_priv;
2259         struct ib_wc wc;
2260
2261         port_priv = container_of(work, struct ib_mad_port_private, work);
2262         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2263
2264         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2265                 if (wc.status == IB_WC_SUCCESS) {
2266                         switch (wc.opcode) {
2267                         case IB_WC_SEND:
2268                                 ib_mad_send_done_handler(port_priv, &wc);
2269                                 break;
2270                         case IB_WC_RECV:
2271                                 ib_mad_recv_done_handler(port_priv, &wc);
2272                                 break;
2273                         default:
2274                                 BUG_ON(1);
2275                                 break;
2276                         }
2277                 } else
2278                         mad_error_handler(port_priv, &wc);
2279         }
2280 }
2281
2282 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2283 {
2284         unsigned long flags;
2285         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2286         struct ib_mad_send_wc mad_send_wc;
2287         struct list_head cancel_list;
2288
2289         INIT_LIST_HEAD(&cancel_list);
2290
2291         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2292         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2293                                  &mad_agent_priv->send_list, agent_list) {
2294                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2295                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2296                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2297                 }
2298         }
2299
2300         /* Empty wait list to prevent receives from finding a request */
2301         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2302         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2303
2304         /* Report all cancelled requests */
2305         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2306         mad_send_wc.vendor_err = 0;
2307
2308         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2309                                  &cancel_list, agent_list) {
2310                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2311                 list_del(&mad_send_wr->agent_list);
2312                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2313                                                    &mad_send_wc);
2314                 atomic_dec(&mad_agent_priv->refcount);
2315         }
2316 }
2317
2318 static struct ib_mad_send_wr_private*
2319 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2320              struct ib_mad_send_buf *send_buf)
2321 {
2322         struct ib_mad_send_wr_private *mad_send_wr;
2323
2324         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2325                             agent_list) {
2326                 if (&mad_send_wr->send_buf == send_buf)
2327                         return mad_send_wr;
2328         }
2329
2330         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2331                             agent_list) {
2332                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2333                     &mad_send_wr->send_buf == send_buf)
2334                         return mad_send_wr;
2335         }
2336         return NULL;
2337 }
2338
2339 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2340                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2341 {
2342         struct ib_mad_agent_private *mad_agent_priv;
2343         struct ib_mad_send_wr_private *mad_send_wr;
2344         unsigned long flags;
2345         int active;
2346
2347         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2348                                       agent);
2349         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2350         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2351         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2352                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2353                 return -EINVAL;
2354         }
2355
2356         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2357         if (!timeout_ms) {
2358                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2359                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2360         }
2361
2362         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2363         if (active)
2364                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2365         else
2366                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2367
2368         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2369         return 0;
2370 }
2371 EXPORT_SYMBOL(ib_modify_mad);
2372
2373 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2374                    struct ib_mad_send_buf *send_buf)
2375 {
2376         ib_modify_mad(mad_agent, send_buf, 0);
2377 }
2378 EXPORT_SYMBOL(ib_cancel_mad);
2379
2380 static void local_completions(struct work_struct *work)
2381 {
2382         struct ib_mad_agent_private *mad_agent_priv;
2383         struct ib_mad_local_private *local;
2384         struct ib_mad_agent_private *recv_mad_agent;
2385         unsigned long flags;
2386         int free_mad;
2387         struct ib_wc wc;
2388         struct ib_mad_send_wc mad_send_wc;
2389
2390         mad_agent_priv =
2391                 container_of(work, struct ib_mad_agent_private, local_work);
2392
2393         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2394         while (!list_empty(&mad_agent_priv->local_list)) {
2395                 local = list_entry(mad_agent_priv->local_list.next,
2396                                    struct ib_mad_local_private,
2397                                    completion_list);
2398                 list_del(&local->completion_list);
2399                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2400                 free_mad = 0;
2401                 if (local->mad_priv) {
2402                         recv_mad_agent = local->recv_mad_agent;
2403                         if (!recv_mad_agent) {
2404                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2405                                 free_mad = 1;
2406                                 goto local_send_completion;
2407                         }
2408
2409                         /*
2410                          * Defined behavior is to complete response
2411                          * before request
2412                          */
2413                         build_smp_wc(recv_mad_agent->agent.qp,
2414                                      (unsigned long) local->mad_send_wr,
2415                                      be16_to_cpu(IB_LID_PERMISSIVE),
2416                                      0, recv_mad_agent->agent.port_num, &wc);
2417
2418                         local->mad_priv->header.recv_wc.wc = &wc;
2419                         local->mad_priv->header.recv_wc.mad_len =
2420                                                 sizeof(struct ib_mad);
2421                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2422                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2423                                  &local->mad_priv->header.recv_wc.rmpp_list);
2424                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2425                         local->mad_priv->header.recv_wc.recv_buf.mad =
2426                                                 &local->mad_priv->mad.mad;
2427                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2428                                 snoop_recv(recv_mad_agent->qp_info,
2429                                           &local->mad_priv->header.recv_wc,
2430                                            IB_MAD_SNOOP_RECVS);
2431                         recv_mad_agent->agent.recv_handler(
2432                                                 &recv_mad_agent->agent,
2433                                                 &local->mad_priv->header.recv_wc);
2434                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2435                         atomic_dec(&recv_mad_agent->refcount);
2436                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2437                 }
2438
2439 local_send_completion:
2440                 /* Complete send */
2441                 mad_send_wc.status = IB_WC_SUCCESS;
2442                 mad_send_wc.vendor_err = 0;
2443                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2444                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2445                         snoop_send(mad_agent_priv->qp_info,
2446                                    &local->mad_send_wr->send_buf,
2447                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2448                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2449                                                    &mad_send_wc);
2450
2451                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2452                 atomic_dec(&mad_agent_priv->refcount);
2453                 if (free_mad)
2454                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2455                 kfree(local);
2456         }
2457         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2458 }
2459
2460 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2461 {
2462         int ret;
2463
2464         if (!mad_send_wr->retries_left)
2465                 return -ETIMEDOUT;
2466
2467         mad_send_wr->retries_left--;
2468         mad_send_wr->send_buf.retries++;
2469
2470         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2471
2472         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2473                 ret = ib_retry_rmpp(mad_send_wr);
2474                 switch (ret) {
2475                 case IB_RMPP_RESULT_UNHANDLED:
2476                         ret = ib_send_mad(mad_send_wr);
2477                         break;
2478                 case IB_RMPP_RESULT_CONSUMED:
2479                         ret = 0;
2480                         break;
2481                 default:
2482                         ret = -ECOMM;
2483                         break;
2484                 }
2485         } else
2486                 ret = ib_send_mad(mad_send_wr);
2487
2488         if (!ret) {
2489                 mad_send_wr->refcount++;
2490                 list_add_tail(&mad_send_wr->agent_list,
2491                               &mad_send_wr->mad_agent_priv->send_list);
2492         }
2493         return ret;
2494 }
2495
2496 static void timeout_sends(struct work_struct *work)
2497 {
2498         struct ib_mad_agent_private *mad_agent_priv;
2499         struct ib_mad_send_wr_private *mad_send_wr;
2500         struct ib_mad_send_wc mad_send_wc;
2501         unsigned long flags, delay;
2502
2503         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2504                                       timed_work.work);
2505         mad_send_wc.vendor_err = 0;
2506
2507         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2508         while (!list_empty(&mad_agent_priv->wait_list)) {
2509                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2510                                          struct ib_mad_send_wr_private,
2511                                          agent_list);
2512
2513                 if (time_after(mad_send_wr->timeout, jiffies)) {
2514                         delay = mad_send_wr->timeout - jiffies;
2515                         if ((long)delay <= 0)
2516                                 delay = 1;
2517                         queue_delayed_work(mad_agent_priv->qp_info->
2518                                            port_priv->wq,
2519                                            &mad_agent_priv->timed_work, delay);
2520                         break;
2521                 }
2522
2523                 list_del(&mad_send_wr->agent_list);
2524                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2525                     !retry_send(mad_send_wr))
2526                         continue;
2527
2528                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2529
2530                 if (mad_send_wr->status == IB_WC_SUCCESS)
2531                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2532                 else
2533                         mad_send_wc.status = mad_send_wr->status;
2534                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2535                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2536                                                    &mad_send_wc);
2537
2538                 atomic_dec(&mad_agent_priv->refcount);
2539                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2540         }
2541         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2542 }
2543
2544 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2545 {
2546         struct ib_mad_port_private *port_priv = cq->cq_context;
2547         unsigned long flags;
2548
2549         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2550         if (!list_empty(&port_priv->port_list))
2551                 queue_work(port_priv->wq, &port_priv->work);
2552         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2553 }
2554
2555 /*
2556  * Allocate receive MADs and post receive WRs for them
2557  */
2558 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2559                                     struct ib_mad_private *mad)
2560 {
2561         unsigned long flags;
2562         int post, ret;
2563         struct ib_mad_private *mad_priv;
2564         struct ib_sge sg_list;
2565         struct ib_recv_wr recv_wr, *bad_recv_wr;
2566         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2567
2568         /* Initialize common scatter list fields */
2569         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2570         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2571
2572         /* Initialize common receive WR fields */
2573         recv_wr.next = NULL;
2574         recv_wr.sg_list = &sg_list;
2575         recv_wr.num_sge = 1;
2576
2577         do {
2578                 /* Allocate and map receive buffer */
2579                 if (mad) {
2580                         mad_priv = mad;
2581                         mad = NULL;
2582                 } else {
2583                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2584                         if (!mad_priv) {
2585                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2586                                 ret = -ENOMEM;
2587                                 break;
2588                         }
2589                 }
2590                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2591                                                  &mad_priv->grh,
2592                                                  sizeof *mad_priv -
2593                                                    sizeof mad_priv->header,
2594                                                  DMA_FROM_DEVICE);
2595                 mad_priv->header.mapping = sg_list.addr;
2596                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2597                 mad_priv->header.mad_list.mad_queue = recv_queue;
2598
2599                 /* Post receive WR */
2600                 spin_lock_irqsave(&recv_queue->lock, flags);
2601                 post = (++recv_queue->count < recv_queue->max_active);
2602                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2603                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2604                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2605                 if (ret) {
2606                         spin_lock_irqsave(&recv_queue->lock, flags);
2607                         list_del(&mad_priv->header.mad_list.list);
2608                         recv_queue->count--;
2609                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2610                         ib_dma_unmap_single(qp_info->port_priv->device,
2611                                             mad_priv->header.mapping,
2612                                             sizeof *mad_priv -
2613                                               sizeof mad_priv->header,
2614                                             DMA_FROM_DEVICE);
2615                         kmem_cache_free(ib_mad_cache, mad_priv);
2616                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2617                         break;
2618                 }
2619         } while (post);
2620
2621         return ret;
2622 }
2623
2624 /*
2625  * Return all the posted receive MADs
2626  */
2627 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2628 {
2629         struct ib_mad_private_header *mad_priv_hdr;
2630         struct ib_mad_private *recv;
2631         struct ib_mad_list_head *mad_list;
2632
2633         if (!qp_info->qp)
2634                 return;
2635
2636         while (!list_empty(&qp_info->recv_queue.list)) {
2637
2638                 mad_list = list_entry(qp_info->recv_queue.list.next,
2639                                       struct ib_mad_list_head, list);
2640                 mad_priv_hdr = container_of(mad_list,
2641                                             struct ib_mad_private_header,
2642                                             mad_list);
2643                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2644                                     header);
2645
2646                 /* Remove from posted receive MAD list */
2647                 list_del(&mad_list->list);
2648
2649                 ib_dma_unmap_single(qp_info->port_priv->device,
2650                                     recv->header.mapping,
2651                                     sizeof(struct ib_mad_private) -
2652                                       sizeof(struct ib_mad_private_header),
2653                                     DMA_FROM_DEVICE);
2654                 kmem_cache_free(ib_mad_cache, recv);
2655         }
2656
2657         qp_info->recv_queue.count = 0;
2658 }
2659
2660 /*
2661  * Start the port
2662  */
2663 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2664 {
2665         int ret, i;
2666         struct ib_qp_attr *attr;
2667         struct ib_qp *qp;
2668
2669         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2670         if (!attr) {
2671                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2672                 return -ENOMEM;
2673         }
2674
2675         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2676                 qp = port_priv->qp_info[i].qp;
2677                 if (!qp)
2678                         continue;
2679
2680                 /*
2681                  * PKey index for QP1 is irrelevant but
2682                  * one is needed for the Reset to Init transition
2683                  */
2684                 attr->qp_state = IB_QPS_INIT;
2685                 attr->pkey_index = 0;
2686                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2687                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2688                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2689                 if (ret) {
2690                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2691                                "INIT: %d\n", i, ret);
2692                         goto out;
2693                 }
2694
2695                 attr->qp_state = IB_QPS_RTR;
2696                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2697                 if (ret) {
2698                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2699                                "RTR: %d\n", i, ret);
2700                         goto out;
2701                 }
2702
2703                 attr->qp_state = IB_QPS_RTS;
2704                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2705                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2706                 if (ret) {
2707                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2708                                "RTS: %d\n", i, ret);
2709                         goto out;
2710                 }
2711         }
2712
2713         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2714         if (ret) {
2715                 printk(KERN_ERR PFX "Failed to request completion "
2716                        "notification: %d\n", ret);
2717                 goto out;
2718         }
2719
2720         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2721                 if (!port_priv->qp_info[i].qp)
2722                         continue;
2723
2724                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2725                 if (ret) {
2726                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2727                         goto out;
2728                 }
2729         }
2730 out:
2731         kfree(attr);
2732         return ret;
2733 }
2734
2735 static void qp_event_handler(struct ib_event *event, void *qp_context)
2736 {
2737         struct ib_mad_qp_info   *qp_info = qp_context;
2738
2739         /* It's worse than that! He's dead, Jim! */
2740         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2741                 event->event, qp_info->qp->qp_num);
2742 }
2743
2744 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2745                            struct ib_mad_queue *mad_queue)
2746 {
2747         mad_queue->qp_info = qp_info;
2748         mad_queue->count = 0;
2749         spin_lock_init(&mad_queue->lock);
2750         INIT_LIST_HEAD(&mad_queue->list);
2751 }
2752
2753 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2754                         struct ib_mad_qp_info *qp_info)
2755 {
2756         qp_info->port_priv = port_priv;
2757         init_mad_queue(qp_info, &qp_info->send_queue);
2758         init_mad_queue(qp_info, &qp_info->recv_queue);
2759         INIT_LIST_HEAD(&qp_info->overflow_list);
2760         spin_lock_init(&qp_info->snoop_lock);
2761         qp_info->snoop_table = NULL;
2762         qp_info->snoop_table_size = 0;
2763         atomic_set(&qp_info->snoop_count, 0);
2764 }
2765
2766 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2767                          enum ib_qp_type qp_type)
2768 {
2769         struct ib_qp_init_attr  qp_init_attr;
2770         int ret;
2771
2772         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2773         qp_init_attr.send_cq = qp_info->port_priv->cq;
2774         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2775         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2776         qp_init_attr.cap.max_send_wr = mad_sendq_size;
2777         qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2778         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2779         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2780         qp_init_attr.qp_type = qp_type;
2781         qp_init_attr.port_num = qp_info->port_priv->port_num;
2782         qp_init_attr.qp_context = qp_info;
2783         qp_init_attr.event_handler = qp_event_handler;
2784         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2785         if (IS_ERR(qp_info->qp)) {
2786                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2787                        get_spl_qp_index(qp_type));
2788                 ret = PTR_ERR(qp_info->qp);
2789                 goto error;
2790         }
2791         /* Use minimum queue sizes unless the CQ is resized */
2792         qp_info->send_queue.max_active = mad_sendq_size;
2793         qp_info->recv_queue.max_active = mad_recvq_size;
2794         return 0;
2795
2796 error:
2797         return ret;
2798 }
2799
2800 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2801 {
2802         if (!qp_info->qp)
2803                 return;
2804
2805         ib_destroy_qp(qp_info->qp);
2806         kfree(qp_info->snoop_table);
2807 }
2808
2809 /*
2810  * Open the port
2811  * Create the QP, PD, MR, and CQ if needed
2812  */
2813 static int ib_mad_port_open(struct ib_device *device,
2814                             int port_num)
2815 {
2816         int ret, cq_size;
2817         struct ib_mad_port_private *port_priv;
2818         unsigned long flags;
2819         char name[sizeof "ib_mad123"];
2820         int has_smi;
2821
2822         /* Create new device info */
2823         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2824         if (!port_priv) {
2825                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2826                 return -ENOMEM;
2827         }
2828
2829         port_priv->device = device;
2830         port_priv->port_num = port_num;
2831         spin_lock_init(&port_priv->reg_lock);
2832         INIT_LIST_HEAD(&port_priv->agent_list);
2833         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2834         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2835
2836         cq_size = mad_sendq_size + mad_recvq_size;
2837         has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2838         if (has_smi)
2839                 cq_size *= 2;
2840
2841         port_priv->cq = ib_create_cq(port_priv->device,
2842                                      ib_mad_thread_completion_handler,
2843                                      NULL, port_priv, cq_size, 0);
2844         if (IS_ERR(port_priv->cq)) {
2845                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2846                 ret = PTR_ERR(port_priv->cq);
2847                 goto error3;
2848         }
2849
2850         port_priv->pd = ib_alloc_pd(device);
2851         if (IS_ERR(port_priv->pd)) {
2852                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2853                 ret = PTR_ERR(port_priv->pd);
2854                 goto error4;
2855         }
2856
2857         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2858         if (IS_ERR(port_priv->mr)) {
2859                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2860                 ret = PTR_ERR(port_priv->mr);
2861                 goto error5;
2862         }
2863
2864         if (has_smi) {
2865                 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2866                 if (ret)
2867                         goto error6;
2868         }
2869         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2870         if (ret)
2871                 goto error7;
2872
2873         snprintf(name, sizeof name, "ib_mad%d", port_num);
2874         port_priv->wq = create_singlethread_workqueue(name);
2875         if (!port_priv->wq) {
2876                 ret = -ENOMEM;
2877                 goto error8;
2878         }
2879         INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2880
2881         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2882         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2883         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2884
2885         ret = ib_mad_port_start(port_priv);
2886         if (ret) {
2887                 printk(KERN_ERR PFX "Couldn't start port\n");
2888                 goto error9;
2889         }
2890
2891         return 0;
2892
2893 error9:
2894         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2895         list_del_init(&port_priv->port_list);
2896         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2897
2898         destroy_workqueue(port_priv->wq);
2899 error8:
2900         destroy_mad_qp(&port_priv->qp_info[1]);
2901 error7:
2902         destroy_mad_qp(&port_priv->qp_info[0]);
2903 error6:
2904         ib_dereg_mr(port_priv->mr);
2905 error5:
2906         ib_dealloc_pd(port_priv->pd);
2907 error4:
2908         ib_destroy_cq(port_priv->cq);
2909         cleanup_recv_queue(&port_priv->qp_info[1]);
2910         cleanup_recv_queue(&port_priv->qp_info[0]);
2911 error3:
2912         kfree(port_priv);
2913
2914         return ret;
2915 }
2916
2917 /*
2918  * Close the port
2919  * If there are no classes using the port, free the port
2920  * resources (CQ, MR, PD, QP) and remove the port's info structure
2921  */
2922 static int ib_mad_port_close(struct ib_device *device, int port_num)
2923 {
2924         struct ib_mad_port_private *port_priv;
2925         unsigned long flags;
2926
2927         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2928         port_priv = __ib_get_mad_port(device, port_num);
2929         if (port_priv == NULL) {
2930                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2931                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2932                 return -ENODEV;
2933         }
2934         list_del_init(&port_priv->port_list);
2935         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2936
2937         destroy_workqueue(port_priv->wq);
2938         destroy_mad_qp(&port_priv->qp_info[1]);
2939         destroy_mad_qp(&port_priv->qp_info[0]);
2940         ib_dereg_mr(port_priv->mr);
2941         ib_dealloc_pd(port_priv->pd);
2942         ib_destroy_cq(port_priv->cq);
2943         cleanup_recv_queue(&port_priv->qp_info[1]);
2944         cleanup_recv_queue(&port_priv->qp_info[0]);
2945         /* XXX: Handle deallocation of MAD registration tables */
2946
2947         kfree(port_priv);
2948
2949         return 0;
2950 }
2951
2952 static void ib_mad_init_device(struct ib_device *device)
2953 {
2954         int start, end, i;
2955
2956         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2957                 return;
2958
2959         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2960                 start = 0;
2961                 end   = 0;
2962         } else {
2963                 start = 1;
2964                 end   = device->phys_port_cnt;
2965         }
2966
2967         for (i = start; i <= end; i++) {
2968                 if (ib_mad_port_open(device, i)) {
2969                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2970                                device->name, i);
2971                         goto error;
2972                 }
2973                 if (ib_agent_port_open(device, i)) {
2974                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2975                                "for agents\n",
2976                                device->name, i);
2977                         goto error_agent;
2978                 }
2979         }
2980         return;
2981
2982 error_agent:
2983         if (ib_mad_port_close(device, i))
2984                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2985                        device->name, i);
2986
2987 error:
2988         i--;
2989
2990         while (i >= start) {
2991                 if (ib_agent_port_close(device, i))
2992                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2993                                "for agents\n",
2994                                device->name, i);
2995                 if (ib_mad_port_close(device, i))
2996                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2997                                device->name, i);
2998                 i--;
2999         }
3000 }
3001
3002 static void ib_mad_remove_device(struct ib_device *device)
3003 {
3004         int i, num_ports, cur_port;
3005
3006         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3007                 return;
3008
3009         if (device->node_type == RDMA_NODE_IB_SWITCH) {
3010                 num_ports = 1;
3011                 cur_port = 0;
3012         } else {
3013                 num_ports = device->phys_port_cnt;
3014                 cur_port = 1;
3015         }
3016         for (i = 0; i < num_ports; i++, cur_port++) {
3017                 if (ib_agent_port_close(device, cur_port))
3018                         printk(KERN_ERR PFX "Couldn't close %s port %d "
3019                                "for agents\n",
3020                                device->name, cur_port);
3021                 if (ib_mad_port_close(device, cur_port))
3022                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
3023                                device->name, cur_port);
3024         }
3025 }
3026
3027 static struct ib_client mad_client = {
3028         .name   = "mad",
3029         .add = ib_mad_init_device,
3030         .remove = ib_mad_remove_device
3031 };
3032
3033 static int __init ib_mad_init_module(void)
3034 {
3035         int ret;
3036
3037         mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3038         mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3039
3040         mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3041         mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3042
3043         ib_mad_cache = kmem_cache_create("ib_mad",
3044                                          sizeof(struct ib_mad_private),
3045                                          0,
3046                                          SLAB_HWCACHE_ALIGN,
3047                                          NULL);
3048         if (!ib_mad_cache) {
3049                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3050                 ret = -ENOMEM;
3051                 goto error1;
3052         }
3053
3054         INIT_LIST_HEAD(&ib_mad_port_list);
3055
3056         if (ib_register_client(&mad_client)) {
3057                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3058                 ret = -EINVAL;
3059                 goto error2;
3060         }
3061
3062         return 0;
3063
3064 error2:
3065         kmem_cache_destroy(ib_mad_cache);
3066 error1:
3067         return ret;
3068 }
3069
3070 static void __exit ib_mad_cleanup_module(void)
3071 {
3072         ib_unregister_client(&mad_client);
3073         kmem_cache_destroy(ib_mad_cache);
3074 }
3075
3076 module_init(ib_mad_init_module);
3077 module_exit(ib_mad_cleanup_module);