MD: Add del_timer_sync to mddev_suspend (fix nasty panic)
[linux-flexiantxendom0-3.2.10.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 void
15 qla2x00_bsg_job_done(void *data, void *ptr, int res)
16 {
17         srb_t *sp = (srb_t *)ptr;
18         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21         bsg_job->reply->result = res;
22         bsg_job->job_done(bsg_job);
23         sp->free(vha, sp);
24 }
25
26 void
27 qla2x00_bsg_sp_free(void *data, void *ptr)
28 {
29         srb_t *sp = (srb_t *)ptr;
30         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
31         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32         struct qla_hw_data *ha = vha->hw;
33
34         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
35             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
36
37         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
38             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
39
40         if (sp->type == SRB_CT_CMD ||
41             sp->type == SRB_ELS_CMD_HST)
42                 kfree(sp->fcport);
43         mempool_free(sp, vha->hw->srb_mempool);
44 }
45
46 int
47 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
48         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
49 {
50         int i, ret, num_valid;
51         uint8_t *bcode;
52         struct qla_fcp_prio_entry *pri_entry;
53         uint32_t *bcode_val_ptr, bcode_val;
54
55         ret = 1;
56         num_valid = 0;
57         bcode = (uint8_t *)pri_cfg;
58         bcode_val_ptr = (uint32_t *)pri_cfg;
59         bcode_val = (uint32_t)(*bcode_val_ptr);
60
61         if (bcode_val == 0xFFFFFFFF) {
62                 /* No FCP Priority config data in flash */
63                 ql_dbg(ql_dbg_user, vha, 0x7051,
64                     "No FCP Priority config data.\n");
65                 return 0;
66         }
67
68         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
69                         bcode[3] != 'S') {
70                 /* Invalid FCP priority data header*/
71                 ql_dbg(ql_dbg_user, vha, 0x7052,
72                     "Invalid FCP Priority data header. bcode=0x%x.\n",
73                     bcode_val);
74                 return 0;
75         }
76         if (flag != 1)
77                 return ret;
78
79         pri_entry = &pri_cfg->entry[0];
80         for (i = 0; i < pri_cfg->num_entries; i++) {
81                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
82                         num_valid++;
83                 pri_entry++;
84         }
85
86         if (num_valid == 0) {
87                 /* No valid FCP priority data entries */
88                 ql_dbg(ql_dbg_user, vha, 0x7053,
89                     "No valid FCP Priority data entries.\n");
90                 ret = 0;
91         } else {
92                 /* FCP priority data is valid */
93                 ql_dbg(ql_dbg_user, vha, 0x7054,
94                     "Valid FCP priority data. num entries = %d.\n",
95                     num_valid);
96         }
97
98         return ret;
99 }
100
101 static int
102 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
103 {
104         struct Scsi_Host *host = bsg_job->shost;
105         scsi_qla_host_t *vha = shost_priv(host);
106         struct qla_hw_data *ha = vha->hw;
107         int ret = 0;
108         uint32_t len;
109         uint32_t oper;
110
111         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
112                 ret = -EINVAL;
113                 goto exit_fcp_prio_cfg;
114         }
115
116         /* Get the sub command */
117         oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
118
119         /* Only set config is allowed if config memory is not allocated */
120         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
121                 ret = -EINVAL;
122                 goto exit_fcp_prio_cfg;
123         }
124         switch (oper) {
125         case QLFC_FCP_PRIO_DISABLE:
126                 if (ha->flags.fcp_prio_enabled) {
127                         ha->flags.fcp_prio_enabled = 0;
128                         ha->fcp_prio_cfg->attributes &=
129                                 ~FCP_PRIO_ATTR_ENABLE;
130                         qla24xx_update_all_fcp_prio(vha);
131                         bsg_job->reply->result = DID_OK;
132                 } else {
133                         ret = -EINVAL;
134                         bsg_job->reply->result = (DID_ERROR << 16);
135                         goto exit_fcp_prio_cfg;
136                 }
137                 break;
138
139         case QLFC_FCP_PRIO_ENABLE:
140                 if (!ha->flags.fcp_prio_enabled) {
141                         if (ha->fcp_prio_cfg) {
142                                 ha->flags.fcp_prio_enabled = 1;
143                                 ha->fcp_prio_cfg->attributes |=
144                                     FCP_PRIO_ATTR_ENABLE;
145                                 qla24xx_update_all_fcp_prio(vha);
146                                 bsg_job->reply->result = DID_OK;
147                         } else {
148                                 ret = -EINVAL;
149                                 bsg_job->reply->result = (DID_ERROR << 16);
150                                 goto exit_fcp_prio_cfg;
151                         }
152                 }
153                 break;
154
155         case QLFC_FCP_PRIO_GET_CONFIG:
156                 len = bsg_job->reply_payload.payload_len;
157                 if (!len || len > FCP_PRIO_CFG_SIZE) {
158                         ret = -EINVAL;
159                         bsg_job->reply->result = (DID_ERROR << 16);
160                         goto exit_fcp_prio_cfg;
161                 }
162
163                 bsg_job->reply->result = DID_OK;
164                 bsg_job->reply->reply_payload_rcv_len =
165                         sg_copy_from_buffer(
166                         bsg_job->reply_payload.sg_list,
167                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
168                         len);
169
170                 break;
171
172         case QLFC_FCP_PRIO_SET_CONFIG:
173                 len = bsg_job->request_payload.payload_len;
174                 if (!len || len > FCP_PRIO_CFG_SIZE) {
175                         bsg_job->reply->result = (DID_ERROR << 16);
176                         ret = -EINVAL;
177                         goto exit_fcp_prio_cfg;
178                 }
179
180                 if (!ha->fcp_prio_cfg) {
181                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
182                         if (!ha->fcp_prio_cfg) {
183                                 ql_log(ql_log_warn, vha, 0x7050,
184                                     "Unable to allocate memory for fcp prio "
185                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
186                                 bsg_job->reply->result = (DID_ERROR << 16);
187                                 ret = -ENOMEM;
188                                 goto exit_fcp_prio_cfg;
189                         }
190                 }
191
192                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
193                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
194                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
195                         FCP_PRIO_CFG_SIZE);
196
197                 /* validate fcp priority data */
198
199                 if (!qla24xx_fcp_prio_cfg_valid(vha,
200                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
201                         bsg_job->reply->result = (DID_ERROR << 16);
202                         ret = -EINVAL;
203                         /* If buffer was invalidatic int
204                          * fcp_prio_cfg is of no use
205                          */
206                         vfree(ha->fcp_prio_cfg);
207                         ha->fcp_prio_cfg = NULL;
208                         goto exit_fcp_prio_cfg;
209                 }
210
211                 ha->flags.fcp_prio_enabled = 0;
212                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
213                         ha->flags.fcp_prio_enabled = 1;
214                 qla24xx_update_all_fcp_prio(vha);
215                 bsg_job->reply->result = DID_OK;
216                 break;
217         default:
218                 ret = -EINVAL;
219                 break;
220         }
221 exit_fcp_prio_cfg:
222         bsg_job->job_done(bsg_job);
223         return ret;
224 }
225
226 static int
227 qla2x00_process_els(struct fc_bsg_job *bsg_job)
228 {
229         struct fc_rport *rport;
230         fc_port_t *fcport = NULL;
231         struct Scsi_Host *host;
232         scsi_qla_host_t *vha;
233         struct qla_hw_data *ha;
234         srb_t *sp;
235         const char *type;
236         int req_sg_cnt, rsp_sg_cnt;
237         int rval =  (DRIVER_ERROR << 16);
238         uint16_t nextlid = 0;
239
240         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
241                 rport = bsg_job->rport;
242                 fcport = *(fc_port_t **) rport->dd_data;
243                 host = rport_to_shost(rport);
244                 vha = shost_priv(host);
245                 ha = vha->hw;
246                 type = "FC_BSG_RPT_ELS";
247         } else {
248                 host = bsg_job->shost;
249                 vha = shost_priv(host);
250                 ha = vha->hw;
251                 type = "FC_BSG_HST_ELS_NOLOGIN";
252         }
253
254         /* pass through is supported only for ISP 4Gb or higher */
255         if (!IS_FWI2_CAPABLE(ha)) {
256                 ql_dbg(ql_dbg_user, vha, 0x7001,
257                     "ELS passthru not supported for ISP23xx based adapters.\n");
258                 rval = -EPERM;
259                 goto done;
260         }
261
262         /*  Multiple SG's are not supported for ELS requests */
263         if (bsg_job->request_payload.sg_cnt > 1 ||
264                 bsg_job->reply_payload.sg_cnt > 1) {
265                 ql_dbg(ql_dbg_user, vha, 0x7002,
266                     "Multiple SG's are not suppored for ELS requests, "
267                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
268                     bsg_job->request_payload.sg_cnt,
269                     bsg_job->reply_payload.sg_cnt);
270                 rval = -EPERM;
271                 goto done;
272         }
273
274         /* ELS request for rport */
275         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
276                 /* make sure the rport is logged in,
277                  * if not perform fabric login
278                  */
279                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
280                         ql_dbg(ql_dbg_user, vha, 0x7003,
281                             "Failed to login port %06X for ELS passthru.\n",
282                             fcport->d_id.b24);
283                         rval = -EIO;
284                         goto done;
285                 }
286         } else {
287                 /* Allocate a dummy fcport structure, since functions
288                  * preparing the IOCB and mailbox command retrieves port
289                  * specific information from fcport structure. For Host based
290                  * ELS commands there will be no fcport structure allocated
291                  */
292                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
293                 if (!fcport) {
294                         rval = -ENOMEM;
295                         goto done;
296                 }
297
298                 /* Initialize all required  fields of fcport */
299                 fcport->vha = vha;
300                 fcport->vp_idx = vha->vp_idx;
301                 fcport->d_id.b.al_pa =
302                         bsg_job->request->rqst_data.h_els.port_id[0];
303                 fcport->d_id.b.area =
304                         bsg_job->request->rqst_data.h_els.port_id[1];
305                 fcport->d_id.b.domain =
306                         bsg_job->request->rqst_data.h_els.port_id[2];
307                 fcport->loop_id =
308                         (fcport->d_id.b.al_pa == 0xFD) ?
309                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
310         }
311
312         if (!vha->flags.online) {
313                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
314                 rval = -EIO;
315                 goto done;
316         }
317
318         req_sg_cnt =
319                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
320                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
321         if (!req_sg_cnt) {
322                 rval = -ENOMEM;
323                 goto done_free_fcport;
324         }
325
326         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
327                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
328         if (!rsp_sg_cnt) {
329                 rval = -ENOMEM;
330                 goto done_free_fcport;
331         }
332
333         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
334                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
335                 ql_log(ql_log_warn, vha, 0x7008,
336                     "dma mapping resulted in different sg counts, "
337                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
338                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
339                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
340                 rval = -EAGAIN;
341                 goto done_unmap_sg;
342         }
343
344         /* Alloc SRB structure */
345         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
346         if (!sp) {
347                 rval = -ENOMEM;
348                 goto done_unmap_sg;
349         }
350
351         sp->type =
352                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
353                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
354         sp->name =
355                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
356                 "bsg_els_rpt" : "bsg_els_hst");
357         sp->u.bsg_job = bsg_job;
358         sp->free = qla2x00_bsg_sp_free;
359         sp->done = qla2x00_bsg_job_done;
360
361         ql_dbg(ql_dbg_user, vha, 0x700a,
362             "bsg rqst type: %s els type: %x - loop-id=%x "
363             "portid=%-2x%02x%02x.\n", type,
364             bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
365             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
366
367         rval = qla2x00_start_sp(sp);
368         if (rval != QLA_SUCCESS) {
369                 ql_log(ql_log_warn, vha, 0x700e,
370                     "qla2x00_start_sp failed = %d\n", rval);
371                 mempool_free(sp, ha->srb_mempool);
372                 rval = -EIO;
373                 goto done_unmap_sg;
374         }
375         return rval;
376
377 done_unmap_sg:
378         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
379                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
380         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
381                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
382         goto done_free_fcport;
383
384 done_free_fcport:
385         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
386                 kfree(fcport);
387 done:
388         return rval;
389 }
390
391 inline uint16_t
392 qla24xx_calc_ct_iocbs(uint16_t dsds)
393 {
394         uint16_t iocbs;
395
396         iocbs = 1;
397         if (dsds > 2) {
398                 iocbs += (dsds - 2) / 5;
399                 if ((dsds - 2) % 5)
400                         iocbs++;
401         }
402         return iocbs;
403 }
404
405 static int
406 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
407 {
408         srb_t *sp;
409         struct Scsi_Host *host = bsg_job->shost;
410         scsi_qla_host_t *vha = shost_priv(host);
411         struct qla_hw_data *ha = vha->hw;
412         int rval = (DRIVER_ERROR << 16);
413         int req_sg_cnt, rsp_sg_cnt;
414         uint16_t loop_id;
415         struct fc_port *fcport;
416         char  *type = "FC_BSG_HST_CT";
417
418         req_sg_cnt =
419                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
420                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
421         if (!req_sg_cnt) {
422                 ql_log(ql_log_warn, vha, 0x700f,
423                     "dma_map_sg return %d for request\n", req_sg_cnt);
424                 rval = -ENOMEM;
425                 goto done;
426         }
427
428         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
429                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
430         if (!rsp_sg_cnt) {
431                 ql_log(ql_log_warn, vha, 0x7010,
432                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
433                 rval = -ENOMEM;
434                 goto done;
435         }
436
437         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
438             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
439                 ql_log(ql_log_warn, vha, 0x7011,
440                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
441                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
442                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
443                 rval = -EAGAIN;
444                 goto done_unmap_sg;
445         }
446
447         if (!vha->flags.online) {
448                 ql_log(ql_log_warn, vha, 0x7012,
449                     "Host is not online.\n");
450                 rval = -EIO;
451                 goto done_unmap_sg;
452         }
453
454         loop_id =
455                 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
456                         >> 24;
457         switch (loop_id) {
458         case 0xFC:
459                 loop_id = cpu_to_le16(NPH_SNS);
460                 break;
461         case 0xFA:
462                 loop_id = vha->mgmt_svr_loop_id;
463                 break;
464         default:
465                 ql_dbg(ql_dbg_user, vha, 0x7013,
466                     "Unknown loop id: %x.\n", loop_id);
467                 rval = -EINVAL;
468                 goto done_unmap_sg;
469         }
470
471         /* Allocate a dummy fcport structure, since functions preparing the
472          * IOCB and mailbox command retrieves port specific information
473          * from fcport structure. For Host based ELS commands there will be
474          * no fcport structure allocated
475          */
476         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
477         if (!fcport) {
478                 ql_log(ql_log_warn, vha, 0x7014,
479                     "Failed to allocate fcport.\n");
480                 rval = -ENOMEM;
481                 goto done_unmap_sg;
482         }
483
484         /* Initialize all required  fields of fcport */
485         fcport->vha = vha;
486         fcport->vp_idx = vha->vp_idx;
487         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
488         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
489         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
490         fcport->loop_id = loop_id;
491
492         /* Alloc SRB structure */
493         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
494         if (!sp) {
495                 ql_log(ql_log_warn, vha, 0x7015,
496                     "qla2x00_get_sp failed.\n");
497                 rval = -ENOMEM;
498                 goto done_free_fcport;
499         }
500
501         sp->type = SRB_CT_CMD;
502         sp->name = "bsg_ct";
503         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
504         sp->u.bsg_job = bsg_job;
505         sp->free = qla2x00_bsg_sp_free;
506         sp->done = qla2x00_bsg_job_done;
507
508         ql_dbg(ql_dbg_user, vha, 0x7016,
509             "bsg rqst type: %s else type: %x - "
510             "loop-id=%x portid=%02x%02x%02x.\n", type,
511             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
512             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
513             fcport->d_id.b.al_pa);
514
515         rval = qla2x00_start_sp(sp);
516         if (rval != QLA_SUCCESS) {
517                 ql_log(ql_log_warn, vha, 0x7017,
518                     "qla2x00_start_sp failed=%d.\n", rval);
519                 mempool_free(sp, ha->srb_mempool);
520                 rval = -EIO;
521                 goto done_free_fcport;
522         }
523         return rval;
524
525 done_free_fcport:
526         kfree(fcport);
527 done_unmap_sg:
528         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
529                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
530         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
531                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
532 done:
533         return rval;
534 }
535
536 /* Set the port configuration to enable the
537  * internal loopback on ISP81XX
538  */
539 static inline int
540 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
541     uint16_t *new_config)
542 {
543         int ret = 0;
544         int rval = 0;
545         struct qla_hw_data *ha = vha->hw;
546
547         if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
548                 goto done_set_internal;
549
550         new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
551         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
552
553         ha->notify_dcbx_comp = 1;
554         ret = qla81xx_set_port_config(vha, new_config);
555         if (ret != QLA_SUCCESS) {
556                 ql_log(ql_log_warn, vha, 0x7021,
557                     "set port config failed.\n");
558                 ha->notify_dcbx_comp = 0;
559                 rval = -EINVAL;
560                 goto done_set_internal;
561         }
562
563         /* Wait for DCBX complete event */
564         if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
565                 ql_dbg(ql_dbg_user, vha, 0x7022,
566                     "State change notification not received.\n");
567         } else
568                 ql_dbg(ql_dbg_user, vha, 0x7023,
569                     "State change received.\n");
570
571         ha->notify_dcbx_comp = 0;
572
573 done_set_internal:
574         return rval;
575 }
576
577 /* Set the port configuration to disable the
578  * internal loopback on ISP81XX
579  */
580 static inline int
581 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
582     int wait)
583 {
584         int ret = 0;
585         int rval = 0;
586         uint16_t new_config[4];
587         struct qla_hw_data *ha = vha->hw;
588
589         if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
590                 goto done_reset_internal;
591
592         memset(new_config, 0 , sizeof(new_config));
593         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
594                         ENABLE_INTERNAL_LOOPBACK) {
595                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
596                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
597
598                 ha->notify_dcbx_comp = wait;
599                 ret = qla81xx_set_port_config(vha, new_config);
600                 if (ret != QLA_SUCCESS) {
601                         ql_log(ql_log_warn, vha, 0x7025,
602                             "Set port config failed.\n");
603                         ha->notify_dcbx_comp = 0;
604                         rval = -EINVAL;
605                         goto done_reset_internal;
606                 }
607
608                 /* Wait for DCBX complete event */
609                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
610                         (20 * HZ))) {
611                         ql_dbg(ql_dbg_user, vha, 0x7026,
612                             "State change notification not received.\n");
613                         ha->notify_dcbx_comp = 0;
614                         rval = -EINVAL;
615                         goto done_reset_internal;
616                 } else
617                         ql_dbg(ql_dbg_user, vha, 0x7027,
618                             "State change received.\n");
619
620                 ha->notify_dcbx_comp = 0;
621         }
622 done_reset_internal:
623         return rval;
624 }
625
626 static int
627 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
628 {
629         struct Scsi_Host *host = bsg_job->shost;
630         scsi_qla_host_t *vha = shost_priv(host);
631         struct qla_hw_data *ha = vha->hw;
632         int rval;
633         uint8_t command_sent;
634         char *type;
635         struct msg_echo_lb elreq;
636         uint16_t response[MAILBOX_REGISTER_COUNT];
637         uint16_t config[4], new_config[4];
638         uint8_t *fw_sts_ptr;
639         uint8_t *req_data = NULL;
640         dma_addr_t req_data_dma;
641         uint32_t req_data_len;
642         uint8_t *rsp_data = NULL;
643         dma_addr_t rsp_data_dma;
644         uint32_t rsp_data_len;
645
646         if (!vha->flags.online) {
647                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
648                 return -EIO;
649         }
650
651         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
652                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
653                 DMA_TO_DEVICE);
654
655         if (!elreq.req_sg_cnt) {
656                 ql_log(ql_log_warn, vha, 0x701a,
657                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
658                 return -ENOMEM;
659         }
660
661         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
662                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
663                 DMA_FROM_DEVICE);
664
665         if (!elreq.rsp_sg_cnt) {
666                 ql_log(ql_log_warn, vha, 0x701b,
667                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
668                 rval = -ENOMEM;
669                 goto done_unmap_req_sg;
670         }
671
672         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
673                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
674                 ql_log(ql_log_warn, vha, 0x701c,
675                     "dma mapping resulted in different sg counts, "
676                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
677                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
678                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
679                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
680                 rval = -EAGAIN;
681                 goto done_unmap_sg;
682         }
683         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
684         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
685                 &req_data_dma, GFP_KERNEL);
686         if (!req_data) {
687                 ql_log(ql_log_warn, vha, 0x701d,
688                     "dma alloc failed for req_data.\n");
689                 rval = -ENOMEM;
690                 goto done_unmap_sg;
691         }
692
693         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
694                 &rsp_data_dma, GFP_KERNEL);
695         if (!rsp_data) {
696                 ql_log(ql_log_warn, vha, 0x7004,
697                     "dma alloc failed for rsp_data.\n");
698                 rval = -ENOMEM;
699                 goto done_free_dma_req;
700         }
701
702         /* Copy the request buffer in req_data now */
703         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
704                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
705
706         elreq.send_dma = req_data_dma;
707         elreq.rcv_dma = rsp_data_dma;
708         elreq.transfer_size = req_data_len;
709
710         elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
711
712         if ((ha->current_topology == ISP_CFG_F ||
713             (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
714             ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
715             le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
716             && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
717                 elreq.options == EXTERNAL_LOOPBACK) {
718                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
719                 ql_dbg(ql_dbg_user, vha, 0x701e,
720                     "BSG request type: %s.\n", type);
721                 command_sent = INT_DEF_LB_ECHO_CMD;
722                 rval = qla2x00_echo_test(vha, &elreq, response);
723         } else {
724                 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
725                         memset(config, 0, sizeof(config));
726                         memset(new_config, 0, sizeof(new_config));
727                         if (qla81xx_get_port_config(vha, config)) {
728                                 ql_log(ql_log_warn, vha, 0x701f,
729                                     "Get port config failed.\n");
730                                 bsg_job->reply->result = (DID_ERROR << 16);
731                                 rval = -EPERM;
732                                 goto done_free_dma_req;
733                         }
734
735                         if (elreq.options != EXTERNAL_LOOPBACK) {
736                                 ql_dbg(ql_dbg_user, vha, 0x7020,
737                                     "Internal: current port config = %x\n",
738                                     config[0]);
739                                 if (qla81xx_set_internal_loopback(vha, config,
740                                         new_config)) {
741                                         ql_log(ql_log_warn, vha, 0x7024,
742                                             "Internal loopback failed.\n");
743                                         bsg_job->reply->result =
744                                                 (DID_ERROR << 16);
745                                         rval = -EPERM;
746                                         goto done_free_dma_req;
747                                 }
748                         } else {
749                                 /* For external loopback to work
750                                  * ensure internal loopback is disabled
751                                  */
752                                 if (qla81xx_reset_internal_loopback(vha,
753                                         config, 1)) {
754                                         bsg_job->reply->result =
755                                                 (DID_ERROR << 16);
756                                         rval = -EPERM;
757                                         goto done_free_dma_req;
758                                 }
759                         }
760
761                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
762                         ql_dbg(ql_dbg_user, vha, 0x7028,
763                             "BSG request type: %s.\n", type);
764
765                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
766                         rval = qla2x00_loopback_test(vha, &elreq, response);
767
768                         if (new_config[0]) {
769                                 /* Revert back to original port config
770                                  * Also clear internal loopback
771                                  */
772                                 qla81xx_reset_internal_loopback(vha,
773                                     new_config, 0);
774                         }
775
776                         if (response[0] == MBS_COMMAND_ERROR &&
777                                         response[1] == MBS_LB_RESET) {
778                                 ql_log(ql_log_warn, vha, 0x7029,
779                                     "MBX command error, Aborting ISP.\n");
780                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
781                                 qla2xxx_wake_dpc(vha);
782                                 qla2x00_wait_for_chip_reset(vha);
783                                 /* Also reset the MPI */
784                                 if (qla81xx_restart_mpi_firmware(vha) !=
785                                     QLA_SUCCESS) {
786                                         ql_log(ql_log_warn, vha, 0x702a,
787                                             "MPI reset failed.\n");
788                                 }
789
790                                 bsg_job->reply->result = (DID_ERROR << 16);
791                                 rval = -EIO;
792                                 goto done_free_dma_req;
793                         }
794                 } else {
795                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
796                         ql_dbg(ql_dbg_user, vha, 0x702b,
797                             "BSG request type: %s.\n", type);
798                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
799                         rval = qla2x00_loopback_test(vha, &elreq, response);
800                 }
801         }
802
803         if (rval) {
804                 ql_log(ql_log_warn, vha, 0x702c,
805                     "Vendor request %s failed.\n", type);
806
807                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
808                     sizeof(struct fc_bsg_reply);
809
810                 memcpy(fw_sts_ptr, response, sizeof(response));
811                 fw_sts_ptr += sizeof(response);
812                 *fw_sts_ptr = command_sent;
813                 rval = 0;
814                 bsg_job->reply->result = (DID_ERROR << 16);
815         } else {
816                 ql_dbg(ql_dbg_user, vha, 0x702d,
817                     "Vendor request %s completed.\n", type);
818
819                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
820                         sizeof(response) + sizeof(uint8_t);
821                 bsg_job->reply->reply_payload_rcv_len =
822                         bsg_job->reply_payload.payload_len;
823                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
824                         sizeof(struct fc_bsg_reply);
825                 memcpy(fw_sts_ptr, response, sizeof(response));
826                 fw_sts_ptr += sizeof(response);
827                 *fw_sts_ptr = command_sent;
828                 bsg_job->reply->result = DID_OK;
829                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
830                         bsg_job->reply_payload.sg_cnt, rsp_data,
831                         rsp_data_len);
832         }
833         bsg_job->job_done(bsg_job);
834
835         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
836                 rsp_data, rsp_data_dma);
837 done_free_dma_req:
838         dma_free_coherent(&ha->pdev->dev, req_data_len,
839                 req_data, req_data_dma);
840 done_unmap_sg:
841         dma_unmap_sg(&ha->pdev->dev,
842             bsg_job->reply_payload.sg_list,
843             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
844 done_unmap_req_sg:
845         dma_unmap_sg(&ha->pdev->dev,
846             bsg_job->request_payload.sg_list,
847             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
848         return rval;
849 }
850
851 static int
852 qla84xx_reset(struct fc_bsg_job *bsg_job)
853 {
854         struct Scsi_Host *host = bsg_job->shost;
855         scsi_qla_host_t *vha = shost_priv(host);
856         struct qla_hw_data *ha = vha->hw;
857         int rval = 0;
858         uint32_t flag;
859
860         if (!IS_QLA84XX(ha)) {
861                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
862                 return -EINVAL;
863         }
864
865         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
866
867         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
868
869         if (rval) {
870                 ql_log(ql_log_warn, vha, 0x7030,
871                     "Vendor request 84xx reset failed.\n");
872                 rval = 0;
873                 bsg_job->reply->result = (DID_ERROR << 16);
874
875         } else {
876                 ql_dbg(ql_dbg_user, vha, 0x7031,
877                     "Vendor request 84xx reset completed.\n");
878                 bsg_job->reply->result = DID_OK;
879         }
880
881         bsg_job->job_done(bsg_job);
882         return rval;
883 }
884
885 static int
886 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
887 {
888         struct Scsi_Host *host = bsg_job->shost;
889         scsi_qla_host_t *vha = shost_priv(host);
890         struct qla_hw_data *ha = vha->hw;
891         struct verify_chip_entry_84xx *mn = NULL;
892         dma_addr_t mn_dma, fw_dma;
893         void *fw_buf = NULL;
894         int rval = 0;
895         uint32_t sg_cnt;
896         uint32_t data_len;
897         uint16_t options;
898         uint32_t flag;
899         uint32_t fw_ver;
900
901         if (!IS_QLA84XX(ha)) {
902                 ql_dbg(ql_dbg_user, vha, 0x7032,
903                     "Not 84xx, exiting.\n");
904                 return -EINVAL;
905         }
906
907         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
908                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
909         if (!sg_cnt) {
910                 ql_log(ql_log_warn, vha, 0x7033,
911                     "dma_map_sg returned %d for request.\n", sg_cnt);
912                 return -ENOMEM;
913         }
914
915         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916                 ql_log(ql_log_warn, vha, 0x7034,
917                     "DMA mapping resulted in different sg counts, "
918                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
919                     bsg_job->request_payload.sg_cnt, sg_cnt);
920                 rval = -EAGAIN;
921                 goto done_unmap_sg;
922         }
923
924         data_len = bsg_job->request_payload.payload_len;
925         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926                 &fw_dma, GFP_KERNEL);
927         if (!fw_buf) {
928                 ql_log(ql_log_warn, vha, 0x7035,
929                     "DMA alloc failed for fw_buf.\n");
930                 rval = -ENOMEM;
931                 goto done_unmap_sg;
932         }
933
934         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
935                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
936
937         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938         if (!mn) {
939                 ql_log(ql_log_warn, vha, 0x7036,
940                     "DMA alloc failed for fw buffer.\n");
941                 rval = -ENOMEM;
942                 goto done_free_fw_buf;
943         }
944
945         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
946         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
947
948         memset(mn, 0, sizeof(struct access_chip_84xx));
949         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
950         mn->entry_count = 1;
951
952         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
953         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
954                 options |= VCO_DIAG_FW;
955
956         mn->options = cpu_to_le16(options);
957         mn->fw_ver =  cpu_to_le32(fw_ver);
958         mn->fw_size =  cpu_to_le32(data_len);
959         mn->fw_seq_size =  cpu_to_le32(data_len);
960         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
961         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
962         mn->dseg_length = cpu_to_le32(data_len);
963         mn->data_seg_cnt = cpu_to_le16(1);
964
965         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
966
967         if (rval) {
968                 ql_log(ql_log_warn, vha, 0x7037,
969                     "Vendor request 84xx updatefw failed.\n");
970
971                 rval = 0;
972                 bsg_job->reply->result = (DID_ERROR << 16);
973         } else {
974                 ql_dbg(ql_dbg_user, vha, 0x7038,
975                     "Vendor request 84xx updatefw completed.\n");
976
977                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
978                 bsg_job->reply->result = DID_OK;
979         }
980
981         bsg_job->job_done(bsg_job);
982         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
983
984 done_free_fw_buf:
985         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
986
987 done_unmap_sg:
988         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
989                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
990
991         return rval;
992 }
993
994 static int
995 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
996 {
997         struct Scsi_Host *host = bsg_job->shost;
998         scsi_qla_host_t *vha = shost_priv(host);
999         struct qla_hw_data *ha = vha->hw;
1000         struct access_chip_84xx *mn = NULL;
1001         dma_addr_t mn_dma, mgmt_dma;
1002         void *mgmt_b = NULL;
1003         int rval = 0;
1004         struct qla_bsg_a84_mgmt *ql84_mgmt;
1005         uint32_t sg_cnt;
1006         uint32_t data_len = 0;
1007         uint32_t dma_direction = DMA_NONE;
1008
1009         if (!IS_QLA84XX(ha)) {
1010                 ql_log(ql_log_warn, vha, 0x703a,
1011                     "Not 84xx, exiting.\n");
1012                 return -EINVAL;
1013         }
1014
1015         ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1016                 sizeof(struct fc_bsg_request));
1017         if (!ql84_mgmt) {
1018                 ql_log(ql_log_warn, vha, 0x703b,
1019                     "MGMT header not provided, exiting.\n");
1020                 return -EINVAL;
1021         }
1022
1023         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1024         if (!mn) {
1025                 ql_log(ql_log_warn, vha, 0x703c,
1026                     "DMA alloc failed for fw buffer.\n");
1027                 return -ENOMEM;
1028         }
1029
1030         memset(mn, 0, sizeof(struct access_chip_84xx));
1031         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1032         mn->entry_count = 1;
1033
1034         switch (ql84_mgmt->mgmt.cmd) {
1035         case QLA84_MGMT_READ_MEM:
1036         case QLA84_MGMT_GET_INFO:
1037                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1038                         bsg_job->reply_payload.sg_list,
1039                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1040                 if (!sg_cnt) {
1041                         ql_log(ql_log_warn, vha, 0x703d,
1042                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1043                         rval = -ENOMEM;
1044                         goto exit_mgmt;
1045                 }
1046
1047                 dma_direction = DMA_FROM_DEVICE;
1048
1049                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1050                         ql_log(ql_log_warn, vha, 0x703e,
1051                             "DMA mapping resulted in different sg counts, "
1052                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1053                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1054                         rval = -EAGAIN;
1055                         goto done_unmap_sg;
1056                 }
1057
1058                 data_len = bsg_job->reply_payload.payload_len;
1059
1060                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1061                     &mgmt_dma, GFP_KERNEL);
1062                 if (!mgmt_b) {
1063                         ql_log(ql_log_warn, vha, 0x703f,
1064                             "DMA alloc failed for mgmt_b.\n");
1065                         rval = -ENOMEM;
1066                         goto done_unmap_sg;
1067                 }
1068
1069                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1070                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1071                         mn->parameter1 =
1072                                 cpu_to_le32(
1073                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1074
1075                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1076                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1077                         mn->parameter1 =
1078                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1079
1080                         mn->parameter2 =
1081                                 cpu_to_le32(
1082                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1083                 }
1084                 break;
1085
1086         case QLA84_MGMT_WRITE_MEM:
1087                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1088                         bsg_job->request_payload.sg_list,
1089                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1090
1091                 if (!sg_cnt) {
1092                         ql_log(ql_log_warn, vha, 0x7040,
1093                             "dma_map_sg returned %d.\n", sg_cnt);
1094                         rval = -ENOMEM;
1095                         goto exit_mgmt;
1096                 }
1097
1098                 dma_direction = DMA_TO_DEVICE;
1099
1100                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1101                         ql_log(ql_log_warn, vha, 0x7041,
1102                             "DMA mapping resulted in different sg counts, "
1103                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1104                             bsg_job->request_payload.sg_cnt, sg_cnt);
1105                         rval = -EAGAIN;
1106                         goto done_unmap_sg;
1107                 }
1108
1109                 data_len = bsg_job->request_payload.payload_len;
1110                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1111                         &mgmt_dma, GFP_KERNEL);
1112                 if (!mgmt_b) {
1113                         ql_log(ql_log_warn, vha, 0x7042,
1114                             "DMA alloc failed for mgmt_b.\n");
1115                         rval = -ENOMEM;
1116                         goto done_unmap_sg;
1117                 }
1118
1119                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1120                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1121
1122                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1123                 mn->parameter1 =
1124                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1125                 break;
1126
1127         case QLA84_MGMT_CHNG_CONFIG:
1128                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1129                 mn->parameter1 =
1130                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1131
1132                 mn->parameter2 =
1133                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1134
1135                 mn->parameter3 =
1136                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1137                 break;
1138
1139         default:
1140                 rval = -EIO;
1141                 goto exit_mgmt;
1142         }
1143
1144         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1145                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1146                 mn->dseg_count = cpu_to_le16(1);
1147                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1148                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1149                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1150         }
1151
1152         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1153
1154         if (rval) {
1155                 ql_log(ql_log_warn, vha, 0x7043,
1156                     "Vendor request 84xx mgmt failed.\n");
1157
1158                 rval = 0;
1159                 bsg_job->reply->result = (DID_ERROR << 16);
1160
1161         } else {
1162                 ql_dbg(ql_dbg_user, vha, 0x7044,
1163                     "Vendor request 84xx mgmt completed.\n");
1164
1165                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1166                 bsg_job->reply->result = DID_OK;
1167
1168                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1169                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1170                         bsg_job->reply->reply_payload_rcv_len =
1171                                 bsg_job->reply_payload.payload_len;
1172
1173                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1174                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1175                                 data_len);
1176                 }
1177         }
1178
1179         bsg_job->job_done(bsg_job);
1180
1181 done_unmap_sg:
1182         if (mgmt_b)
1183                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1184
1185         if (dma_direction == DMA_TO_DEVICE)
1186                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1187                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1188         else if (dma_direction == DMA_FROM_DEVICE)
1189                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1190                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1191
1192 exit_mgmt:
1193         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1194
1195         return rval;
1196 }
1197
1198 static int
1199 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1200 {
1201         struct Scsi_Host *host = bsg_job->shost;
1202         scsi_qla_host_t *vha = shost_priv(host);
1203         int rval = 0;
1204         struct qla_port_param *port_param = NULL;
1205         fc_port_t *fcport = NULL;
1206         uint16_t mb[MAILBOX_REGISTER_COUNT];
1207         uint8_t *rsp_ptr = NULL;
1208
1209         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1210                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1211                 return -EINVAL;
1212         }
1213
1214         port_param = (struct qla_port_param *)((char *)bsg_job->request +
1215                 sizeof(struct fc_bsg_request));
1216         if (!port_param) {
1217                 ql_log(ql_log_warn, vha, 0x7047,
1218                     "port_param header not provided.\n");
1219                 return -EINVAL;
1220         }
1221
1222         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1223                 ql_log(ql_log_warn, vha, 0x7048,
1224                     "Invalid destination type.\n");
1225                 return -EINVAL;
1226         }
1227
1228         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1229                 if (fcport->port_type != FCT_TARGET)
1230                         continue;
1231
1232                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1233                         fcport->port_name, sizeof(fcport->port_name)))
1234                         continue;
1235                 break;
1236         }
1237
1238         if (!fcport) {
1239                 ql_log(ql_log_warn, vha, 0x7049,
1240                     "Failed to find port.\n");
1241                 return -EINVAL;
1242         }
1243
1244         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1245                 ql_log(ql_log_warn, vha, 0x704a,
1246                     "Port is not online.\n");
1247                 return -EINVAL;
1248         }
1249
1250         if (fcport->flags & FCF_LOGIN_NEEDED) {
1251                 ql_log(ql_log_warn, vha, 0x704b,
1252                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1253                 return -EINVAL;
1254         }
1255
1256         if (port_param->mode)
1257                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1258                         port_param->speed, mb);
1259         else
1260                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1261                         &port_param->speed, mb);
1262
1263         if (rval) {
1264                 ql_log(ql_log_warn, vha, 0x704c,
1265                     "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1266                     "%04x %x %04x %04x.\n", fcport->port_name[0],
1267                     fcport->port_name[1], fcport->port_name[2],
1268                     fcport->port_name[3], fcport->port_name[4],
1269                     fcport->port_name[5], fcport->port_name[6],
1270                     fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1271                 rval = 0;
1272                 bsg_job->reply->result = (DID_ERROR << 16);
1273
1274         } else {
1275                 if (!port_param->mode) {
1276                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1277                                 sizeof(struct qla_port_param);
1278
1279                         rsp_ptr = ((uint8_t *)bsg_job->reply) +
1280                                 sizeof(struct fc_bsg_reply);
1281
1282                         memcpy(rsp_ptr, port_param,
1283                                 sizeof(struct qla_port_param));
1284                 }
1285
1286                 bsg_job->reply->result = DID_OK;
1287         }
1288
1289         bsg_job->job_done(bsg_job);
1290         return rval;
1291 }
1292
1293 static int
1294 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1295         uint8_t is_update)
1296 {
1297         uint32_t start = 0;
1298         int valid = 0;
1299         struct qla_hw_data *ha = vha->hw;
1300
1301         if (unlikely(pci_channel_offline(ha->pdev)))
1302                 return -EINVAL;
1303
1304         start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1305         if (start > ha->optrom_size) {
1306                 ql_log(ql_log_warn, vha, 0x7055,
1307                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1308                 return -EINVAL;
1309         }
1310
1311         if (ha->optrom_state != QLA_SWAITING) {
1312                 ql_log(ql_log_info, vha, 0x7056,
1313                     "optrom_state %d.\n", ha->optrom_state);
1314                 return -EBUSY;
1315         }
1316
1317         ha->optrom_region_start = start;
1318         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1319         if (is_update) {
1320                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1321                         valid = 1;
1322                 else if (start == (ha->flt_region_boot * 4) ||
1323                     start == (ha->flt_region_fw * 4))
1324                         valid = 1;
1325                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1326                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1327                         valid = 1;
1328                 if (!valid) {
1329                         ql_log(ql_log_warn, vha, 0x7058,
1330                             "Invalid start region 0x%x/0x%x.\n", start,
1331                             bsg_job->request_payload.payload_len);
1332                         return -EINVAL;
1333                 }
1334
1335                 ha->optrom_region_size = start +
1336                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1337                     ha->optrom_size - start :
1338                     bsg_job->request_payload.payload_len;
1339                 ha->optrom_state = QLA_SWRITING;
1340         } else {
1341                 ha->optrom_region_size = start +
1342                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1343                     ha->optrom_size - start :
1344                     bsg_job->reply_payload.payload_len;
1345                 ha->optrom_state = QLA_SREADING;
1346         }
1347
1348         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1349         if (!ha->optrom_buffer) {
1350                 ql_log(ql_log_warn, vha, 0x7059,
1351                     "Read: Unable to allocate memory for optrom retrieval "
1352                     "(%x)\n", ha->optrom_region_size);
1353
1354                 ha->optrom_state = QLA_SWAITING;
1355                 return -ENOMEM;
1356         }
1357
1358         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1359         return 0;
1360 }
1361
1362 static int
1363 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1364 {
1365         struct Scsi_Host *host = bsg_job->shost;
1366         scsi_qla_host_t *vha = shost_priv(host);
1367         struct qla_hw_data *ha = vha->hw;
1368         int rval = 0;
1369
1370         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1371         if (rval)
1372                 return rval;
1373
1374         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1375             ha->optrom_region_start, ha->optrom_region_size);
1376
1377         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1378             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1379             ha->optrom_region_size);
1380
1381         bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1382         bsg_job->reply->result = DID_OK;
1383         vfree(ha->optrom_buffer);
1384         ha->optrom_buffer = NULL;
1385         ha->optrom_state = QLA_SWAITING;
1386         bsg_job->job_done(bsg_job);
1387         return rval;
1388 }
1389
1390 static int
1391 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1392 {
1393         struct Scsi_Host *host = bsg_job->shost;
1394         scsi_qla_host_t *vha = shost_priv(host);
1395         struct qla_hw_data *ha = vha->hw;
1396         int rval = 0;
1397
1398         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1399         if (rval)
1400                 return rval;
1401
1402         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1403             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1404             ha->optrom_region_size);
1405
1406         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1407             ha->optrom_region_start, ha->optrom_region_size);
1408
1409         bsg_job->reply->result = DID_OK;
1410         vfree(ha->optrom_buffer);
1411         ha->optrom_buffer = NULL;
1412         ha->optrom_state = QLA_SWAITING;
1413         bsg_job->job_done(bsg_job);
1414         return rval;
1415 }
1416
1417 static int
1418 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1419 {
1420         struct Scsi_Host *host = bsg_job->shost;
1421         scsi_qla_host_t *vha = shost_priv(host);
1422         struct qla_hw_data *ha = vha->hw;
1423         int rval = 0;
1424         uint8_t bsg[DMA_POOL_SIZE];
1425         struct qla_image_version_list *list = (void *)bsg;
1426         struct qla_image_version *image;
1427         uint32_t count;
1428         dma_addr_t sfp_dma;
1429         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1430         if (!sfp) {
1431                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1432                     EXT_STATUS_NO_MEMORY;
1433                 goto done;
1434         }
1435
1436         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1437             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1438
1439         image = list->version;
1440         count = list->count;
1441         while (count--) {
1442                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1443                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1444                     image->field_address.device, image->field_address.offset,
1445                     sizeof(image->field_info), image->field_address.option);
1446                 if (rval) {
1447                         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1448                             EXT_STATUS_MAILBOX;
1449                         goto dealloc;
1450                 }
1451                 image++;
1452         }
1453
1454         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1455
1456 dealloc:
1457         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1458
1459 done:
1460         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1461         bsg_job->reply->result = DID_OK << 16;
1462         bsg_job->job_done(bsg_job);
1463
1464         return 0;
1465 }
1466
1467 static int
1468 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1469 {
1470         struct Scsi_Host *host = bsg_job->shost;
1471         scsi_qla_host_t *vha = shost_priv(host);
1472         struct qla_hw_data *ha = vha->hw;
1473         int rval = 0;
1474         uint8_t bsg[DMA_POOL_SIZE];
1475         struct qla_status_reg *sr = (void *)bsg;
1476         dma_addr_t sfp_dma;
1477         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1478         if (!sfp) {
1479                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1480                     EXT_STATUS_NO_MEMORY;
1481                 goto done;
1482         }
1483
1484         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1485             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1486
1487         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1488             sr->field_address.device, sr->field_address.offset,
1489             sizeof(sr->status_reg), sr->field_address.option);
1490         sr->status_reg = *sfp;
1491
1492         if (rval) {
1493                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1494                     EXT_STATUS_MAILBOX;
1495                 goto dealloc;
1496         }
1497
1498         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1499             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1500
1501         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1502
1503 dealloc:
1504         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1505
1506 done:
1507         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1508         bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1509         bsg_job->reply->result = DID_OK << 16;
1510         bsg_job->job_done(bsg_job);
1511
1512         return 0;
1513 }
1514
1515 static int
1516 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1517 {
1518         struct Scsi_Host *host = bsg_job->shost;
1519         scsi_qla_host_t *vha = shost_priv(host);
1520         struct qla_hw_data *ha = vha->hw;
1521         int rval = 0;
1522         uint8_t bsg[DMA_POOL_SIZE];
1523         struct qla_status_reg *sr = (void *)bsg;
1524         dma_addr_t sfp_dma;
1525         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1526         if (!sfp) {
1527                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1528                     EXT_STATUS_NO_MEMORY;
1529                 goto done;
1530         }
1531
1532         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1533             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1534
1535         *sfp = sr->status_reg;
1536         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1537             sr->field_address.device, sr->field_address.offset,
1538             sizeof(sr->status_reg), sr->field_address.option);
1539
1540         if (rval) {
1541                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1542                     EXT_STATUS_MAILBOX;
1543                 goto dealloc;
1544         }
1545
1546         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1547
1548 dealloc:
1549         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1550
1551 done:
1552         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1553         bsg_job->reply->result = DID_OK << 16;
1554         bsg_job->job_done(bsg_job);
1555
1556         return 0;
1557 }
1558
1559 static int
1560 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1561 {
1562         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1563         case QL_VND_LOOPBACK:
1564                 return qla2x00_process_loopback(bsg_job);
1565
1566         case QL_VND_A84_RESET:
1567                 return qla84xx_reset(bsg_job);
1568
1569         case QL_VND_A84_UPDATE_FW:
1570                 return qla84xx_updatefw(bsg_job);
1571
1572         case QL_VND_A84_MGMT_CMD:
1573                 return qla84xx_mgmt_cmd(bsg_job);
1574
1575         case QL_VND_IIDMA:
1576                 return qla24xx_iidma(bsg_job);
1577
1578         case QL_VND_FCP_PRIO_CFG_CMD:
1579                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1580
1581         case QL_VND_READ_FLASH:
1582                 return qla2x00_read_optrom(bsg_job);
1583
1584         case QL_VND_UPDATE_FLASH:
1585                 return qla2x00_update_optrom(bsg_job);
1586
1587         case QL_VND_SET_FRU_VERSION:
1588                 return qla2x00_update_fru_versions(bsg_job);
1589
1590         case QL_VND_READ_FRU_STATUS:
1591                 return qla2x00_read_fru_status(bsg_job);
1592
1593         case QL_VND_WRITE_FRU_STATUS:
1594                 return qla2x00_write_fru_status(bsg_job);
1595
1596         default:
1597                 bsg_job->reply->result = (DID_ERROR << 16);
1598                 bsg_job->job_done(bsg_job);
1599                 return -ENOSYS;
1600         }
1601 }
1602
1603 int
1604 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1605 {
1606         int ret = -EINVAL;
1607         struct fc_rport *rport;
1608         fc_port_t *fcport = NULL;
1609         struct Scsi_Host *host;
1610         scsi_qla_host_t *vha;
1611
1612         /* In case no data transferred. */
1613         bsg_job->reply->reply_payload_rcv_len = 0;
1614
1615         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1616                 rport = bsg_job->rport;
1617                 fcport = *(fc_port_t **) rport->dd_data;
1618                 host = rport_to_shost(rport);
1619                 vha = shost_priv(host);
1620         } else {
1621                 host = bsg_job->shost;
1622                 vha = shost_priv(host);
1623         }
1624
1625         if (qla2x00_reset_active(vha)) {
1626                 ql_dbg(ql_dbg_user, vha, 0x709f,
1627                     "BSG: ISP abort active/needed -- cmd=%d.\n",
1628                     bsg_job->request->msgcode);
1629                 bsg_job->reply->result = (DID_ERROR << 16);
1630                 bsg_job->job_done(bsg_job);
1631                 return -EBUSY;
1632         }
1633
1634         ql_dbg(ql_dbg_user, vha, 0x7000,
1635             "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1636
1637         switch (bsg_job->request->msgcode) {
1638         case FC_BSG_RPT_ELS:
1639         case FC_BSG_HST_ELS_NOLOGIN:
1640                 ret = qla2x00_process_els(bsg_job);
1641                 break;
1642         case FC_BSG_HST_CT:
1643                 ret = qla2x00_process_ct(bsg_job);
1644                 break;
1645         case FC_BSG_HST_VENDOR:
1646                 ret = qla2x00_process_vendor_specific(bsg_job);
1647                 break;
1648         case FC_BSG_HST_ADD_RPORT:
1649         case FC_BSG_HST_DEL_RPORT:
1650         case FC_BSG_RPT_CT:
1651         default:
1652                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1653                 bsg_job->reply->result = ret;
1654                 break;
1655         }
1656         return ret;
1657 }
1658
1659 int
1660 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1661 {
1662         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1663         struct qla_hw_data *ha = vha->hw;
1664         srb_t *sp;
1665         int cnt, que;
1666         unsigned long flags;
1667         struct req_que *req;
1668
1669         /* find the bsg job from the active list of commands */
1670         spin_lock_irqsave(&ha->hardware_lock, flags);
1671         for (que = 0; que < ha->max_req_queues; que++) {
1672                 req = ha->req_q_map[que];
1673                 if (!req)
1674                         continue;
1675
1676                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1677                         sp = req->outstanding_cmds[cnt];
1678                         if (sp) {
1679                                 if (((sp->type == SRB_CT_CMD) ||
1680                                         (sp->type == SRB_ELS_CMD_HST))
1681                                         && (sp->u.bsg_job == bsg_job)) {
1682                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1683                                         if (ha->isp_ops->abort_command(sp)) {
1684                                                 ql_log(ql_log_warn, vha, 0x7089,
1685                                                     "mbx abort_command "
1686                                                     "failed.\n");
1687                                                 bsg_job->req->errors =
1688                                                 bsg_job->reply->result = -EIO;
1689                                         } else {
1690                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
1691                                                     "mbx abort_command "
1692                                                     "success.\n");
1693                                                 bsg_job->req->errors =
1694                                                 bsg_job->reply->result = 0;
1695                                         }
1696                                         spin_lock_irqsave(&ha->hardware_lock, flags);
1697                                         goto done;
1698                                 }
1699                         }
1700                 }
1701         }
1702         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1703         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1704         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1705         return 0;
1706
1707 done:
1708         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1709         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1710                 kfree(sp->fcport);
1711         mempool_free(sp, ha->srb_mempool);
1712         return 0;
1713 }