- patches.arch/x86_mce_intel_decode_physical_address.patch:
[linux-flexiantxendom0-3.2.10.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h>
14
15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host *,
17         struct req_que *, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21         sts_entry_t *);
22
23 /**
24  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25  * @irq:
26  * @dev_id: SCSI driver HA context
27  *
28  * Called by system whenever the host adapter generates an interrupt.
29  *
30  * Returns handled flag.
31  */
32 irqreturn_t
33 qla2100_intr_handler(int irq, void *dev_id)
34 {
35         scsi_qla_host_t *vha;
36         struct qla_hw_data *ha;
37         struct device_reg_2xxx __iomem *reg;
38         int             status;
39         unsigned long   iter;
40         uint16_t        hccr;
41         uint16_t        mb[4];
42         struct rsp_que *rsp;
43         unsigned long   flags;
44
45         rsp = (struct rsp_que *) dev_id;
46         if (!rsp) {
47                 printk(KERN_INFO
48                     "%s(): NULL response queue pointer\n", __func__);
49                 return (IRQ_NONE);
50         }
51
52         ha = rsp->hw;
53         reg = &ha->iobase->isp;
54         status = 0;
55
56         spin_lock_irqsave(&ha->hardware_lock, flags);
57         vha = pci_get_drvdata(ha->pdev);
58         for (iter = 50; iter--; ) {
59                 hccr = RD_REG_WORD(&reg->hccr);
60                 if (hccr & HCCR_RISC_PAUSE) {
61                         if (pci_channel_offline(ha->pdev))
62                                 break;
63
64                         /*
65                          * Issue a "HARD" reset in order for the RISC interrupt
66                          * bit to be cleared.  Schedule a big hammmer to get
67                          * out of the RISC PAUSED state.
68                          */
69                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70                         RD_REG_WORD(&reg->hccr);
71
72                         ha->isp_ops->fw_dump(vha, 1);
73                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74                         break;
75                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
76                         break;
77
78                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80                         RD_REG_WORD(&reg->hccr);
81
82                         /* Get mailbox data. */
83                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
85                                 qla2x00_mbx_completion(vha, mb[0]);
86                                 status |= MBX_INTERRUPT;
87                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
91                                 qla2x00_async_event(vha, rsp, mb);
92                         } else {
93                                 /*EMPTY*/
94                                 DEBUG2(printk("scsi(%ld): Unrecognized "
95                                     "interrupt type (%d).\n",
96                                     vha->host_no, mb[0]));
97                         }
98                         /* Release mailbox registers. */
99                         WRT_REG_WORD(&reg->semaphore, 0);
100                         RD_REG_WORD(&reg->semaphore);
101                 } else {
102                         qla2x00_process_response_queue(rsp);
103
104                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105                         RD_REG_WORD(&reg->hccr);
106                 }
107         }
108         spin_unlock_irqrestore(&ha->hardware_lock, flags);
109
110         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
112                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
113                 complete(&ha->mbx_intr_comp);
114         }
115
116         return (IRQ_HANDLED);
117 }
118
119 /**
120  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121  * @irq:
122  * @dev_id: SCSI driver HA context
123  *
124  * Called by system whenever the host adapter generates an interrupt.
125  *
126  * Returns handled flag.
127  */
128 irqreturn_t
129 qla2300_intr_handler(int irq, void *dev_id)
130 {
131         scsi_qla_host_t *vha;
132         struct device_reg_2xxx __iomem *reg;
133         int             status;
134         unsigned long   iter;
135         uint32_t        stat;
136         uint16_t        hccr;
137         uint16_t        mb[4];
138         struct rsp_que *rsp;
139         struct qla_hw_data *ha;
140         unsigned long   flags;
141
142         rsp = (struct rsp_que *) dev_id;
143         if (!rsp) {
144                 printk(KERN_INFO
145                     "%s(): NULL response queue pointer\n", __func__);
146                 return (IRQ_NONE);
147         }
148
149         ha = rsp->hw;
150         reg = &ha->iobase->isp;
151         status = 0;
152
153         spin_lock_irqsave(&ha->hardware_lock, flags);
154         vha = pci_get_drvdata(ha->pdev);
155         for (iter = 50; iter--; ) {
156                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157                 if (stat & HSR_RISC_PAUSED) {
158                         if (unlikely(pci_channel_offline(ha->pdev)))
159                                 break;
160
161                         hccr = RD_REG_WORD(&reg->hccr);
162                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163                                 qla_printk(KERN_INFO, ha, "Parity error -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165                         else
166                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
167                                     "HCCR=%x, Dumping firmware!\n", hccr);
168
169                         /*
170                          * Issue a "HARD" reset in order for the RISC
171                          * interrupt bit to be cleared.  Schedule a big
172                          * hammmer to get out of the RISC PAUSED state.
173                          */
174                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
175                         RD_REG_WORD(&reg->hccr);
176
177                         ha->isp_ops->fw_dump(vha, 1);
178                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
179                         break;
180                 } else if ((stat & HSR_RISC_INT) == 0)
181                         break;
182
183                 switch (stat & 0xff) {
184                 case 0x1:
185                 case 0x2:
186                 case 0x10:
187                 case 0x11:
188                         qla2x00_mbx_completion(vha, MSW(stat));
189                         status |= MBX_INTERRUPT;
190
191                         /* Release mailbox registers. */
192                         WRT_REG_WORD(&reg->semaphore, 0);
193                         break;
194                 case 0x12:
195                         mb[0] = MSW(stat);
196                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
197                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
199                         qla2x00_async_event(vha, rsp, mb);
200                         break;
201                 case 0x13:
202                         qla2x00_process_response_queue(rsp);
203                         break;
204                 case 0x15:
205                         mb[0] = MBA_CMPLT_1_16BIT;
206                         mb[1] = MSW(stat);
207                         qla2x00_async_event(vha, rsp, mb);
208                         break;
209                 case 0x16:
210                         mb[0] = MBA_SCSI_COMPLETION;
211                         mb[1] = MSW(stat);
212                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
213                         qla2x00_async_event(vha, rsp, mb);
214                         break;
215                 default:
216                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
217                             "(%d).\n",
218                             vha->host_no, stat & 0xff));
219                         break;
220                 }
221                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
222                 RD_REG_WORD_RELAXED(&reg->hccr);
223         }
224         spin_unlock_irqrestore(&ha->hardware_lock, flags);
225
226         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229                 complete(&ha->mbx_intr_comp);
230         }
231
232         return (IRQ_HANDLED);
233 }
234
235 /**
236  * qla2x00_mbx_completion() - Process mailbox command completions.
237  * @ha: SCSI driver HA context
238  * @mb0: Mailbox0 register
239  */
240 static void
241 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
242 {
243         uint16_t        cnt;
244         uint16_t __iomem *wptr;
245         struct qla_hw_data *ha = vha->hw;
246         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
247
248         /* Load return mailbox registers. */
249         ha->flags.mbox_int = 1;
250         ha->mailbox_out[0] = mb0;
251         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
252
253         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
254                 if (IS_QLA2200(ha) && cnt == 8)
255                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
256                 if (cnt == 4 || cnt == 5)
257                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
258                 else
259                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
260
261                 wptr++;
262         }
263
264         if (ha->mcp) {
265                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
266                     __func__, vha->host_no, ha->mcp->mb[0]));
267         } else {
268                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
269                     __func__, vha->host_no));
270         }
271 }
272
273 static void
274 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
275 {
276         static char *event[] =
277                 { "Complete", "Request Notification", "Time Extension" };
278         int rval;
279         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
280         uint16_t __iomem *wptr;
281         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
282
283         /* Seed data -- mailbox1 -> mailbox7. */
284         wptr = (uint16_t __iomem *)&reg24->mailbox1;
285         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286                 mb[cnt] = RD_REG_WORD(wptr);
287
288         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
289             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
290             event[aen & 0xff],
291             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
292
293         /* Acknowledgement needed? [Notify && non-zero timeout]. */
294         timeout = (descr >> 8) & 0xf;
295         if (aen != MBA_IDC_NOTIFY || !timeout)
296                 return;
297
298         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
299             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
300
301         rval = qla2x00_post_idc_ack_work(vha, mb);
302         if (rval != QLA_SUCCESS)
303                 qla_printk(KERN_WARNING, vha->hw,
304                     "IDC failed to post ACK.\n");
305 }
306
307 /**
308  * qla2x00_async_event() - Process aynchronous events.
309  * @ha: SCSI driver HA context
310  * @mb: Mailbox registers (0 - 3)
311  */
312 void
313 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
314 {
315 #define LS_UNKNOWN      2
316         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
317         char            *link_speed;
318         uint16_t        handle_cnt;
319         uint16_t        cnt, mbx;
320         uint32_t        handles[5];
321         struct qla_hw_data *ha = vha->hw;
322         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
324         uint32_t        rscn_entry, host_pid;
325         uint8_t         rscn_queue_index;
326         unsigned long   flags;
327
328         /* Setup to process RIO completion. */
329         handle_cnt = 0;
330         if (IS_QLA8XXX_TYPE(ha))
331                 goto skip_rio;
332         switch (mb[0]) {
333         case MBA_SCSI_COMPLETION:
334                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
335                 handle_cnt = 1;
336                 break;
337         case MBA_CMPLT_1_16BIT:
338                 handles[0] = mb[1];
339                 handle_cnt = 1;
340                 mb[0] = MBA_SCSI_COMPLETION;
341                 break;
342         case MBA_CMPLT_2_16BIT:
343                 handles[0] = mb[1];
344                 handles[1] = mb[2];
345                 handle_cnt = 2;
346                 mb[0] = MBA_SCSI_COMPLETION;
347                 break;
348         case MBA_CMPLT_3_16BIT:
349                 handles[0] = mb[1];
350                 handles[1] = mb[2];
351                 handles[2] = mb[3];
352                 handle_cnt = 3;
353                 mb[0] = MBA_SCSI_COMPLETION;
354                 break;
355         case MBA_CMPLT_4_16BIT:
356                 handles[0] = mb[1];
357                 handles[1] = mb[2];
358                 handles[2] = mb[3];
359                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
360                 handle_cnt = 4;
361                 mb[0] = MBA_SCSI_COMPLETION;
362                 break;
363         case MBA_CMPLT_5_16BIT:
364                 handles[0] = mb[1];
365                 handles[1] = mb[2];
366                 handles[2] = mb[3];
367                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
368                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
369                 handle_cnt = 5;
370                 mb[0] = MBA_SCSI_COMPLETION;
371                 break;
372         case MBA_CMPLT_2_32BIT:
373                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
374                 handles[1] = le32_to_cpu(
375                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
376                     RD_MAILBOX_REG(ha, reg, 6));
377                 handle_cnt = 2;
378                 mb[0] = MBA_SCSI_COMPLETION;
379                 break;
380         default:
381                 break;
382         }
383 skip_rio:
384         switch (mb[0]) {
385         case MBA_SCSI_COMPLETION:       /* Fast Post */
386                 if (!vha->flags.online)
387                         break;
388
389                 for (cnt = 0; cnt < handle_cnt; cnt++)
390                         qla2x00_process_completed_request(vha, rsp->req,
391                                 handles[cnt]);
392                 break;
393
394         case MBA_RESET:                 /* Reset */
395                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
396                         vha->host_no));
397
398                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
399                 break;
400
401         case MBA_SYSTEM_ERR:            /* System Error */
402                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
403                 qla_printk(KERN_INFO, ha,
404                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
405                     "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
406
407                 ha->isp_ops->fw_dump(vha, 1);
408
409                 if (IS_FWI2_CAPABLE(ha)) {
410                         if (mb[1] == 0 && mb[2] == 0) {
411                                 qla_printk(KERN_ERR, ha,
412                                     "Unrecoverable Hardware Error: adapter "
413                                     "marked OFFLINE!\n");
414                                 vha->flags.online = 0;
415                         } else
416                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
417                 } else if (mb[1] == 0) {
418                         qla_printk(KERN_INFO, ha,
419                             "Unrecoverable Hardware Error: adapter marked "
420                             "OFFLINE!\n");
421                         vha->flags.online = 0;
422                 } else
423                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
424                 break;
425
426         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
427                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
428                     vha->host_no, mb[1]));
429                 qla_printk(KERN_WARNING, ha,
430                     "ISP Request Transfer Error (%x).\n", mb[1]);
431
432                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
433                 break;
434
435         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
436                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
437                     vha->host_no));
438                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
439
440                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
441                 break;
442
443         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
444                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
445                     vha->host_no));
446                 break;
447
448         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
449                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
450                     mb[1]));
451                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
452
453                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
454                         atomic_set(&vha->loop_state, LOOP_DOWN);
455                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
456                         qla2x00_mark_all_devices_lost(vha, 1);
457                 }
458
459                 if (vha->vp_idx) {
460                         atomic_set(&vha->vp_state, VP_FAILED);
461                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
462                 }
463
464                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
465                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
466
467                 vha->flags.management_server_logged_in = 0;
468                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
469                 break;
470
471         case MBA_LOOP_UP:               /* Loop Up Event */
472                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
473                         link_speed = link_speeds[0];
474                         ha->link_data_rate = PORT_SPEED_1GB;
475                 } else {
476                         link_speed = link_speeds[LS_UNKNOWN];
477                         if (mb[1] < 5)
478                                 link_speed = link_speeds[mb[1]];
479                         else if (mb[1] == 0x13)
480                                 link_speed = link_speeds[5];
481                         ha->link_data_rate = mb[1];
482                 }
483
484                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
485                     vha->host_no, link_speed));
486                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
487                     link_speed);
488
489                 vha->flags.management_server_logged_in = 0;
490                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
491                 break;
492
493         case MBA_LOOP_DOWN:             /* Loop Down Event */
494                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
495                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
496                     "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
497                     mbx));
498                 qla_printk(KERN_INFO, ha,
499                     "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
500                     mbx);
501
502                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
503                         atomic_set(&vha->loop_state, LOOP_DOWN);
504                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
505                         vha->device_flags |= DFLG_NO_CABLE;
506                         qla2x00_mark_all_devices_lost(vha, 1);
507                 }
508
509                 if (vha->vp_idx) {
510                         atomic_set(&vha->vp_state, VP_FAILED);
511                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
512                 }
513
514                 vha->flags.management_server_logged_in = 0;
515                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
516                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
517                 break;
518
519         case MBA_LIP_RESET:             /* LIP reset occurred */
520                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
521                     vha->host_no, mb[1]));
522                 qla_printk(KERN_INFO, ha,
523                     "LIP reset occurred (%x).\n", mb[1]);
524
525                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
526                         atomic_set(&vha->loop_state, LOOP_DOWN);
527                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
528                         qla2x00_mark_all_devices_lost(vha, 1);
529                 }
530
531                 if (vha->vp_idx) {
532                         atomic_set(&vha->vp_state, VP_FAILED);
533                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
534                 }
535
536                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
537
538                 ha->operating_mode = LOOP;
539                 vha->flags.management_server_logged_in = 0;
540                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
541                 break;
542
543         /* case MBA_DCBX_COMPLETE: */
544         case MBA_POINT_TO_POINT:        /* Point-to-Point */
545                 if (IS_QLA2100(ha))
546                         break;
547
548                 if (IS_QLA8XXX_TYPE(ha))
549                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
550                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
551                 else
552                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
553                             "received.\n", vha->host_no));
554
555                 /*
556                  * Until there's a transition from loop down to loop up, treat
557                  * this as loop down only.
558                  */
559                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
560                         atomic_set(&vha->loop_state, LOOP_DOWN);
561                         if (!atomic_read(&vha->loop_down_timer))
562                                 atomic_set(&vha->loop_down_timer,
563                                     LOOP_DOWN_TIME);
564                         qla2x00_mark_all_devices_lost(vha, 1);
565                 }
566
567                 if (vha->vp_idx) {
568                         atomic_set(&vha->vp_state, VP_FAILED);
569                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
570                 }
571
572                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
573                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
574
575                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
576                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
577
578                 ha->flags.gpsc_supported = 1;
579                 vha->flags.management_server_logged_in = 0;
580                 break;
581
582         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
583                 if (IS_QLA2100(ha))
584                         break;
585
586                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
587                     "received.\n",
588                     vha->host_no));
589                 qla_printk(KERN_INFO, ha,
590                     "Configuration change detected: value=%x.\n", mb[1]);
591
592                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
593                         atomic_set(&vha->loop_state, LOOP_DOWN);
594                         if (!atomic_read(&vha->loop_down_timer))
595                                 atomic_set(&vha->loop_down_timer,
596                                     LOOP_DOWN_TIME);
597                         qla2x00_mark_all_devices_lost(vha, 1);
598                 }
599
600                 if (vha->vp_idx) {
601                         atomic_set(&vha->vp_state, VP_FAILED);
602                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
603                 }
604
605                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
606                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
607                 break;
608
609         case MBA_PORT_UPDATE:           /* Port database update */
610                 /*
611                  * Handle only global and vn-port update events
612                  *
613                  * Relevant inputs:
614                  * mb[1] = N_Port handle of changed port
615                  * OR 0xffff for global event
616                  * mb[2] = New login state
617                  * 7 = Port logged out
618                  * mb[3] = LSB is vp_idx, 0xff = all vps
619                  *
620                  * Skip processing if:
621                  *       Event is global, vp_idx is NOT all vps,
622                  *           vp_idx does not match
623                  *       Event is not global, vp_idx does not match
624                  */
625                 if (IS_QLA2XXX_MIDTYPE(ha) &&
626                     ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
627                         (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
628                         break;
629
630                 /* Global event -- port logout or port unavailable. */
631                 if (mb[1] == 0xffff && mb[2] == 0x7) {
632                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
633                             vha->host_no));
634                         DEBUG(printk(KERN_INFO
635                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
636                             vha->host_no, mb[1], mb[2], mb[3]));
637
638                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
639                                 atomic_set(&vha->loop_state, LOOP_DOWN);
640                                 atomic_set(&vha->loop_down_timer,
641                                     LOOP_DOWN_TIME);
642                                 vha->device_flags |= DFLG_NO_CABLE;
643                                 qla2x00_mark_all_devices_lost(vha, 1);
644                         }
645
646                         if (vha->vp_idx) {
647                                 atomic_set(&vha->vp_state, VP_FAILED);
648                                 fc_vport_set_state(vha->fc_vport,
649                                     FC_VPORT_FAILED);
650                                 qla2x00_mark_all_devices_lost(vha, 1);
651                         }
652
653                         vha->flags.management_server_logged_in = 0;
654                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
655                         break;
656                 }
657
658                 /*
659                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
660                  * event etc. earlier indicating loop is down) then process
661                  * it.  Otherwise ignore it and Wait for RSCN to come in.
662                  */
663                 atomic_set(&vha->loop_down_timer, 0);
664                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
665                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
666                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
667                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
668                             mb[2], mb[3]));
669                         break;
670                 }
671
672                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
673                     vha->host_no));
674                 DEBUG(printk(KERN_INFO
675                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
676                     vha->host_no, mb[1], mb[2], mb[3]));
677
678                 /*
679                  * Mark all devices as missing so we will login again.
680                  */
681                 atomic_set(&vha->loop_state, LOOP_UP);
682
683                 qla2x00_mark_all_devices_lost(vha, 1);
684
685                 vha->flags.rscn_queue_overflow = 1;
686
687                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
688                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
689                 break;
690
691         case MBA_RSCN_UPDATE:           /* State Change Registration */
692                 /* Check if the Vport has issued a SCR */
693                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
694                         break;
695                 /* Only handle SCNs for our Vport index. */
696                 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
697                         break;
698
699                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
700                     vha->host_no));
701                 DEBUG(printk(KERN_INFO
702                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
703                     vha->host_no, mb[1], mb[2], mb[3]));
704
705                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
706                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
707                                 | vha->d_id.b.al_pa;
708                 if (rscn_entry == host_pid) {
709                         DEBUG(printk(KERN_INFO
710                             "scsi(%ld): Ignoring RSCN update to local host "
711                             "port ID (%06x)\n",
712                             vha->host_no, host_pid));
713                         break;
714                 }
715
716                 /* Ignore reserved bits from RSCN-payload. */
717                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
718                 rscn_queue_index = vha->rscn_in_ptr + 1;
719                 if (rscn_queue_index == MAX_RSCN_COUNT)
720                         rscn_queue_index = 0;
721                 if (rscn_queue_index != vha->rscn_out_ptr) {
722                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
723                         vha->rscn_in_ptr = rscn_queue_index;
724                 } else {
725                         vha->flags.rscn_queue_overflow = 1;
726                 }
727
728                 atomic_set(&vha->loop_state, LOOP_UPDATE);
729                 atomic_set(&vha->loop_down_timer, 0);
730                 vha->flags.management_server_logged_in = 0;
731
732                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
733                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
734                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
735                 break;
736
737         /* case MBA_RIO_RESPONSE: */
738         case MBA_ZIO_RESPONSE:
739                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
740                     vha->host_no));
741
742                 if (IS_FWI2_CAPABLE(ha))
743                         qla24xx_process_response_queue(vha, rsp);
744                 else
745                         qla2x00_process_response_queue(rsp);
746                 break;
747
748         case MBA_DISCARD_RND_FRAME:
749                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
750                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
751                 break;
752
753         case MBA_TRACE_NOTIFICATION:
754                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
755                 vha->host_no, mb[1], mb[2]));
756                 break;
757
758         case MBA_ISP84XX_ALERT:
759                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
760                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
761
762                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
763                 switch (mb[1]) {
764                 case A84_PANIC_RECOVERY:
765                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
766                             "%04x %04x\n", mb[2], mb[3]);
767                         break;
768                 case A84_OP_LOGIN_COMPLETE:
769                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
770                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
771                             "firmware version %x\n", ha->cs84xx->op_fw_version));
772                         break;
773                 case A84_DIAG_LOGIN_COMPLETE:
774                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
775                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
776                             "diagnostic firmware version %x\n",
777                             ha->cs84xx->diag_fw_version));
778                         break;
779                 case A84_GOLD_LOGIN_COMPLETE:
780                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
781                         ha->cs84xx->fw_update = 1;
782                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
783                             "firmware version %x\n",
784                             ha->cs84xx->gold_fw_version));
785                         break;
786                 default:
787                         qla_printk(KERN_ERR, ha,
788                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
789                             mb[1], mb[2], mb[3]);
790                 }
791                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
792                 break;
793         case MBA_DCBX_START:
794                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
795                     vha->host_no, mb[1], mb[2], mb[3]));
796                 break;
797         case MBA_DCBX_PARAM_UPDATE:
798                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
799                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
800                 break;
801         case MBA_FCF_CONF_ERR:
802                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
803                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
804                 break;
805         case MBA_IDC_COMPLETE:
806         case MBA_IDC_NOTIFY:
807         case MBA_IDC_TIME_EXT:
808                 qla81xx_idc_event(vha, mb[0], mb[1]);
809                 break;
810         }
811
812         if (!vha->vp_idx && ha->num_vhosts)
813                 qla2x00_alert_all_vps(rsp, mb);
814 }
815
816 /**
817  * qla2x00_process_completed_request() - Process a Fast Post response.
818  * @ha: SCSI driver HA context
819  * @index: SRB index
820  */
821 static void
822 qla2x00_process_completed_request(struct scsi_qla_host *vha,
823                                 struct req_que *req, uint32_t index)
824 {
825         srb_t *sp;
826         struct qla_hw_data *ha = vha->hw;
827
828         /* Validate handle. */
829         if (index >= MAX_OUTSTANDING_COMMANDS) {
830                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
831                     vha->host_no, index));
832                 qla_printk(KERN_WARNING, ha,
833                     "Invalid SCSI completion handle %d.\n", index);
834
835                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
836                 return;
837         }
838
839         sp = req->outstanding_cmds[index];
840         if (sp) {
841                 /* Free outstanding command slot. */
842                 req->outstanding_cmds[index] = NULL;
843
844                 /* Save ISP completion status */
845                 sp->cmd->result = DID_OK << 16;
846                 qla2x00_sp_compl(ha, sp);
847         } else {
848                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
849                         " handle(0x%x)\n", vha->host_no, req->id, index));
850                 qla_printk(KERN_WARNING, ha,
851                     "Invalid ISP SCSI completion handle\n");
852
853                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
854         }
855 }
856
857 static srb_t *
858 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
859     struct req_que *req, void *iocb)
860 {
861         struct qla_hw_data *ha = vha->hw;
862         sts_entry_t *pkt = iocb;
863         srb_t *sp = NULL;
864         uint16_t index;
865
866         index = LSW(pkt->handle);
867         if (index >= MAX_OUTSTANDING_COMMANDS) {
868                 qla_printk(KERN_WARNING, ha,
869                     "%s: Invalid completion handle (%x).\n", func, index);
870                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
871                 goto done;
872         }
873         sp = req->outstanding_cmds[index];
874         if (!sp) {
875                 qla_printk(KERN_WARNING, ha,
876                     "%s: Invalid completion handle (%x) -- timed-out.\n", func,
877                     index);
878                 return sp;
879         }
880         if (sp->handle != index) {
881                 qla_printk(KERN_WARNING, ha,
882                     "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
883                     index);
884                 return NULL;
885         }
886
887         req->outstanding_cmds[index] = NULL;
888
889 done:
890         return sp;
891 }
892
893 static void
894 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
895     struct mbx_entry *mbx)
896 {
897         const char func[] = "MBX-IOCB";
898         const char *type;
899         fc_port_t *fcport;
900         srb_t *sp;
901         struct srb_iocb *lio;
902         struct srb_ctx *ctx;
903         uint16_t *data;
904         uint16_t status;
905
906         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
907         if (!sp)
908                 return;
909
910         ctx = sp->ctx;
911         lio = ctx->u.iocb_cmd;
912         type = ctx->name;
913         fcport = sp->fcport;
914         data = lio->u.logio.data;
915
916         data[0] = MBS_COMMAND_ERROR;
917         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
918             QLA_LOGIO_LOGIN_RETRIED : 0;
919         if (mbx->entry_status) {
920                 DEBUG2(printk(KERN_WARNING
921                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
922                     "status=%x state-flag=%x status-flags=%x.\n",
923                     fcport->vha->host_no, sp->handle, type,
924                     mbx->entry_status, le16_to_cpu(mbx->status),
925                     le16_to_cpu(mbx->state_flags),
926                     le16_to_cpu(mbx->status_flags)));
927                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
928
929                 goto logio_done;
930         }
931
932         status = le16_to_cpu(mbx->status);
933         if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
934             le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
935                 status = 0;
936         if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
937                 DEBUG2(printk(KERN_DEBUG
938                     "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
939                     fcport->vha->host_no, sp->handle, type,
940                     le16_to_cpu(mbx->mb1)));
941
942                 data[0] = MBS_COMMAND_COMPLETE;
943                 if (ctx->type == SRB_LOGIN_CMD) {
944                         fcport->port_type = FCT_TARGET;
945                         if (le16_to_cpu(mbx->mb1) & BIT_0)
946                                 fcport->port_type = FCT_INITIATOR;
947                         if (le16_to_cpu(mbx->mb1) & BIT_1)
948                                 fcport->flags |= FCF_FCP2_DEVICE;
949                 }
950                 goto logio_done;
951         }
952
953         data[0] = le16_to_cpu(mbx->mb0);
954         switch (data[0]) {
955         case MBS_PORT_ID_USED:
956                 data[1] = le16_to_cpu(mbx->mb1);
957                 break;
958         case MBS_LOOP_ID_USED:
959                 break;
960         default:
961                 data[0] = MBS_COMMAND_ERROR;
962                 break;
963         }
964
965         DEBUG2(printk(KERN_WARNING
966             "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
967             "mb6=%x mb7=%x.\n",
968             fcport->vha->host_no, sp->handle, type, status,
969             le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
970             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
971             le16_to_cpu(mbx->mb7)));
972
973 logio_done:
974         lio->done(sp);
975 }
976
977 static void
978 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
979     struct sts_entry_24xx *pkt, int iocb_type)
980 {
981         const char func[] = "ELS_CT_IOCB";
982         const char *type;
983         struct qla_hw_data *ha = vha->hw;
984         srb_t *sp;
985         struct srb_ctx *sp_bsg;
986         struct fc_bsg_job *bsg_job;
987         uint16_t comp_status;
988         uint32_t fw_status[3];
989         uint8_t* fw_sts_ptr;
990
991         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
992         if (!sp)
993                 return;
994         sp_bsg = sp->ctx;
995         bsg_job = sp_bsg->u.bsg_job;
996
997         type = NULL;
998         switch (sp_bsg->type) {
999         case SRB_ELS_CMD_RPT:
1000         case SRB_ELS_CMD_HST:
1001                 type = "els";
1002                 break;
1003         case SRB_CT_CMD:
1004                 type = "ct pass-through";
1005                 break;
1006         default:
1007                 qla_printk(KERN_WARNING, ha,
1008                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1009                     sp_bsg->type);
1010                 return;
1011         }
1012
1013         comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1014         fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1015         fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1016
1017         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1018          * fc payload  to the caller
1019          */
1020         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1021         bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1022
1023         if (comp_status != CS_COMPLETE) {
1024                 if (comp_status == CS_DATA_UNDERRUN) {
1025                         bsg_job->reply->result = DID_OK << 16;
1026                         bsg_job->reply->reply_payload_rcv_len =
1027                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1028
1029                         DEBUG2(qla_printk(KERN_WARNING, ha,
1030                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1031                             "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1032                                 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1033                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1034                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1035                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1036                 }
1037                 else {
1038                         DEBUG2(qla_printk(KERN_WARNING, ha,
1039                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1040                             "error subcode 1=0x%x error subcode 2=0x%x.\n",
1041                                 vha->host_no, sp->handle, type, comp_status,
1042                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1043                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1044                         bsg_job->reply->result = DID_ERROR << 16;
1045                         bsg_job->reply->reply_payload_rcv_len = 0;
1046                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1047                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1048                 }
1049                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1050         }
1051         else {
1052                 bsg_job->reply->result =  DID_OK << 16;;
1053                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1054                 bsg_job->reply_len = 0;
1055         }
1056
1057         dma_unmap_sg(&ha->pdev->dev,
1058             bsg_job->request_payload.sg_list,
1059             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1060         dma_unmap_sg(&ha->pdev->dev,
1061             bsg_job->reply_payload.sg_list,
1062             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1063         if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1064             (sp_bsg->type == SRB_CT_CMD))
1065                 kfree(sp->fcport);
1066         kfree(sp->ctx);
1067         mempool_free(sp, ha->srb_mempool);
1068         bsg_job->job_done(bsg_job);
1069 }
1070
1071 static void
1072 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1073     struct logio_entry_24xx *logio)
1074 {
1075         const char func[] = "LOGIO-IOCB";
1076         const char *type;
1077         fc_port_t *fcport;
1078         srb_t *sp;
1079         struct srb_iocb *lio;
1080         struct srb_ctx *ctx;
1081         uint16_t *data;
1082         uint32_t iop[2];
1083
1084         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1085         if (!sp)
1086                 return;
1087
1088         ctx = sp->ctx;
1089         lio = ctx->u.iocb_cmd;
1090         type = ctx->name;
1091         fcport = sp->fcport;
1092         data = lio->u.logio.data;
1093
1094         data[0] = MBS_COMMAND_ERROR;
1095         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1096                 QLA_LOGIO_LOGIN_RETRIED : 0;
1097         if (logio->entry_status) {
1098                 DEBUG2(printk(KERN_WARNING
1099                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1100                     fcport->vha->host_no, sp->handle, type,
1101                     logio->entry_status));
1102                 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1103
1104                 goto logio_done;
1105         }
1106
1107         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1108                 DEBUG2(printk(KERN_DEBUG
1109                     "scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
1110                     fcport->vha->host_no, sp->handle, type,
1111                     le32_to_cpu(logio->io_parameter[0])));
1112
1113                 data[0] = MBS_COMMAND_COMPLETE;
1114                 if (ctx->type != SRB_LOGIN_CMD)
1115                         goto logio_done;
1116
1117                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1118                 if (iop[0] & BIT_4) {
1119                         fcport->port_type = FCT_TARGET;
1120                         if (iop[0] & BIT_8)
1121                                 fcport->flags |= FCF_FCP2_DEVICE;
1122                 }
1123                 if (iop[0] & BIT_5)
1124                         fcport->port_type = FCT_INITIATOR;
1125                 if (logio->io_parameter[7] || logio->io_parameter[8])
1126                         fcport->supported_classes |= FC_COS_CLASS2;
1127                 if (logio->io_parameter[9] || logio->io_parameter[10])
1128                         fcport->supported_classes |= FC_COS_CLASS3;
1129
1130                 goto logio_done;
1131         }
1132
1133         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1134         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1135         switch (iop[0]) {
1136         case LSC_SCODE_PORTID_USED:
1137                 data[0] = MBS_PORT_ID_USED;
1138                 data[1] = LSW(iop[1]);
1139                 break;
1140         case LSC_SCODE_NPORT_USED:
1141                 data[0] = MBS_LOOP_ID_USED;
1142                 break;
1143         case LSC_SCODE_CMD_FAILED:
1144                 if ((iop[1] & 0xff) == 0x05) {
1145                         data[0] = MBS_NOT_LOGGED_IN;
1146                         break;
1147                 }
1148                 /* Fall through. */
1149         default:
1150                 data[0] = MBS_COMMAND_ERROR;
1151                 break;
1152         }
1153
1154         DEBUG2(printk(KERN_WARNING
1155             "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
1156             fcport->vha->host_no, sp->handle, type,
1157             le16_to_cpu(logio->comp_status),
1158             le32_to_cpu(logio->io_parameter[0]),
1159             le32_to_cpu(logio->io_parameter[1])));
1160
1161 logio_done:
1162         lio->done(sp);
1163 }
1164
1165 static void
1166 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1167     struct tsk_mgmt_entry *tsk)
1168 {
1169         const char func[] = "TMF-IOCB";
1170         const char *type;
1171         fc_port_t *fcport;
1172         srb_t *sp;
1173         struct srb_iocb *iocb;
1174         struct srb_ctx *ctx;
1175         struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1176         int error = 1;
1177
1178         sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1179         if (!sp)
1180                 return;
1181
1182         ctx = sp->ctx;
1183         iocb = ctx->u.iocb_cmd;
1184         type = ctx->name;
1185         fcport = sp->fcport;
1186
1187         if (sts->entry_status) {
1188                 DEBUG2(printk(KERN_WARNING
1189                     "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
1190                     fcport->vha->host_no, sp->handle, type,
1191                     sts->entry_status));
1192         } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1193                 DEBUG2(printk(KERN_WARNING
1194                     "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
1195                     fcport->vha->host_no, sp->handle, type,
1196                     sts->comp_status));
1197         } else if (!(le16_to_cpu(sts->scsi_status) &
1198             SS_RESPONSE_INFO_LEN_VALID)) {
1199                 DEBUG2(printk(KERN_WARNING
1200                     "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
1201                     fcport->vha->host_no, sp->handle, type,
1202                     sts->scsi_status));
1203         } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1204                 DEBUG2(printk(KERN_WARNING
1205                     "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
1206                     fcport->vha->host_no, sp->handle, type,
1207                     sts->rsp_data_len));
1208         } else if (sts->data[3]) {
1209                 DEBUG2(printk(KERN_WARNING
1210                     "scsi(%ld:%x): Async-%s error - response(%x).\n",
1211                     fcport->vha->host_no, sp->handle, type,
1212                     sts->data[3]));
1213         } else {
1214                 error = 0;
1215         }
1216
1217         if (error) {
1218                 iocb->u.tmf.data = error;
1219                 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
1220         }
1221
1222         iocb->done(sp);
1223 }
1224
1225 static void
1226 qla24xx_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1227     struct mrk_entry_24xx *mrk)
1228 {
1229         const char func[] = "MRK-IOCB";
1230         const char *type;
1231         fc_port_t *fcport;
1232         srb_t *sp;
1233         struct srb_iocb *iocb;
1234         struct srb_ctx *ctx;
1235         struct sts_entry_24xx *sts = (struct sts_entry_24xx *)mrk;
1236
1237         sp = qla2x00_get_sp_from_handle(vha, func, req, mrk);
1238         if (!sp)
1239                 return;
1240
1241         ctx = sp->ctx;
1242         iocb = ctx->u.iocb_cmd;
1243         type = ctx->name;
1244         fcport = sp->fcport;
1245
1246         if (sts->entry_status) {
1247                 iocb->u.marker.data = 1;
1248                 DEBUG2(printk(KERN_WARNING
1249                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1250                     fcport->vha->host_no, sp->handle, type,
1251                     sts->entry_status));
1252                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mrk, sizeof(*sts)));
1253         }
1254
1255         iocb->done(sp);
1256 }
1257
1258 /**
1259  * qla2x00_process_response_queue() - Process response queue entries.
1260  * @ha: SCSI driver HA context
1261  */
1262 void
1263 qla2x00_process_response_queue(struct rsp_que *rsp)
1264 {
1265         struct scsi_qla_host *vha;
1266         struct qla_hw_data *ha = rsp->hw;
1267         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1268         sts_entry_t     *pkt;
1269         uint16_t        handle_cnt;
1270         uint16_t        cnt;
1271
1272         vha = pci_get_drvdata(ha->pdev);
1273
1274         if (!vha->flags.online)
1275                 return;
1276
1277         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1278                 pkt = (sts_entry_t *)rsp->ring_ptr;
1279
1280                 rsp->ring_index++;
1281                 if (rsp->ring_index == rsp->length) {
1282                         rsp->ring_index = 0;
1283                         rsp->ring_ptr = rsp->ring;
1284                 } else {
1285                         rsp->ring_ptr++;
1286                 }
1287
1288                 if (pkt->entry_status != 0) {
1289                         DEBUG3(printk(KERN_INFO
1290                             "scsi(%ld): Process error entry.\n", vha->host_no));
1291
1292                         qla2x00_error_entry(vha, rsp, pkt);
1293                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1294                         wmb();
1295                         continue;
1296                 }
1297
1298                 switch (pkt->entry_type) {
1299                 case STATUS_TYPE:
1300                         qla2x00_status_entry(vha, rsp, pkt);
1301                         break;
1302                 case STATUS_TYPE_21:
1303                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1304                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1305                                 qla2x00_process_completed_request(vha, rsp->req,
1306                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1307                         }
1308                         break;
1309                 case STATUS_TYPE_22:
1310                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1311                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1312                                 qla2x00_process_completed_request(vha, rsp->req,
1313                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1314                         }
1315                         break;
1316                 case STATUS_CONT_TYPE:
1317                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1318                         break;
1319                 case MBX_IOCB_TYPE:
1320                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1321                             (struct mbx_entry *)pkt);
1322                         break;
1323                 default:
1324                         /* Type Not Supported. */
1325                         DEBUG4(printk(KERN_WARNING
1326                             "scsi(%ld): Received unknown response pkt type %x "
1327                             "entry status=%x.\n",
1328                             vha->host_no, pkt->entry_type, pkt->entry_status));
1329                         break;
1330                 }
1331                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1332                 wmb();
1333         }
1334
1335         /* Adjust ring index */
1336         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1337 }
1338
1339 static inline void
1340 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
1341         struct rsp_que *rsp)
1342 {
1343         struct scsi_cmnd *cp = sp->cmd;
1344
1345         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1346                 sense_len = SCSI_SENSE_BUFFERSIZE;
1347
1348         sp->request_sense_length = sense_len;
1349         sp->request_sense_ptr = cp->sense_buffer;
1350         if (sp->request_sense_length > 32)
1351                 sense_len = 32;
1352
1353         memcpy(cp->sense_buffer, sense_data, sense_len);
1354
1355         sp->request_sense_ptr += sense_len;
1356         sp->request_sense_length -= sense_len;
1357         if (sp->request_sense_length != 0)
1358                 rsp->status_srb = sp;
1359
1360         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1361             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
1362             cp->device->channel, cp->device->id, cp->device->lun, cp,
1363             cp->serial_number));
1364         if (sense_len)
1365                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1366 }
1367
1368 struct scsi_dif_tuple {
1369         __be16 guard;       /* Checksum */
1370         __be16 app_tag;         /* APPL identifer */
1371         __be32 ref_tag;         /* Target LBA or indirect LBA */
1372 };
1373
1374 /*
1375  * Checks the guard or meta-data for the type of error
1376  * detected by the HBA. In case of errors, we set the
1377  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1378  * to indicate to the kernel that the HBA detected error.
1379  */
1380 static inline void
1381 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1382 {
1383         struct scsi_cmnd *cmd = sp->cmd;
1384         struct scsi_dif_tuple   *ep =
1385                         (struct scsi_dif_tuple *)&sts24->data[20];
1386         struct scsi_dif_tuple   *ap =
1387                         (struct scsi_dif_tuple *)&sts24->data[12];
1388         uint32_t        e_ref_tag, a_ref_tag;
1389         uint16_t        e_app_tag, a_app_tag;
1390         uint16_t        e_guard, a_guard;
1391
1392         e_ref_tag = be32_to_cpu(ep->ref_tag);
1393         a_ref_tag = be32_to_cpu(ap->ref_tag);
1394         e_app_tag = be16_to_cpu(ep->app_tag);
1395         a_app_tag = be16_to_cpu(ap->app_tag);
1396         e_guard = be16_to_cpu(ep->guard);
1397         a_guard = be16_to_cpu(ap->guard);
1398
1399         DEBUG18(printk(KERN_DEBUG
1400             "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
1401
1402         DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1403             " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1404             " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
1405             cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1406             a_app_tag, e_app_tag, a_guard, e_guard));
1407
1408
1409         /* check guard */
1410         if (e_guard != a_guard) {
1411                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1412                     0x10, 0x1);
1413                 set_driver_byte(cmd, DRIVER_SENSE);
1414                 set_host_byte(cmd, DID_ABORT);
1415                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1416                 return;
1417         }
1418
1419         /* check appl tag */
1420         if (e_app_tag != a_app_tag) {
1421                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1422                     0x10, 0x2);
1423                 set_driver_byte(cmd, DRIVER_SENSE);
1424                 set_host_byte(cmd, DID_ABORT);
1425                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1426                 return;
1427         }
1428
1429         /* check ref tag */
1430         if (e_ref_tag != a_ref_tag) {
1431                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1432                     0x10, 0x3);
1433                 set_driver_byte(cmd, DRIVER_SENSE);
1434                 set_host_byte(cmd, DID_ABORT);
1435                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1436                 return;
1437         }
1438 }
1439
1440 /**
1441  * qla2x00_status_entry() - Process a Status IOCB entry.
1442  * @ha: SCSI driver HA context
1443  * @pkt: Entry pointer
1444  */
1445 static void
1446 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1447 {
1448         srb_t           *sp;
1449         fc_port_t       *fcport;
1450         struct scsi_cmnd *cp;
1451         sts_entry_t *sts;
1452         struct sts_entry_24xx *sts24;
1453         uint16_t        comp_status;
1454         uint16_t        scsi_status;
1455         uint16_t        ox_id;
1456         uint8_t         lscsi_status;
1457         int32_t         resid;
1458         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
1459         uint8_t         *rsp_info, *sense_data;
1460         struct qla_hw_data *ha = vha->hw;
1461         uint32_t handle;
1462         uint16_t que;
1463         struct req_que *req;
1464         int logit = 1;
1465
1466         sts = (sts_entry_t *) pkt;
1467         sts24 = (struct sts_entry_24xx *) pkt;
1468         if (IS_FWI2_CAPABLE(ha)) {
1469                 comp_status = le16_to_cpu(sts24->comp_status);
1470                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1471         } else {
1472                 comp_status = le16_to_cpu(sts->comp_status);
1473                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1474         }
1475         handle = (uint32_t) LSW(sts->handle);
1476         que = MSW(sts->handle);
1477         req = ha->req_q_map[que];
1478
1479         /* Fast path completion. */
1480         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1481                 qla2x00_process_completed_request(vha, req, handle);
1482
1483                 return;
1484         }
1485
1486         /* Validate handle. */
1487         if (handle < MAX_OUTSTANDING_COMMANDS) {
1488                 sp = req->outstanding_cmds[handle];
1489                 req->outstanding_cmds[handle] = NULL;
1490         } else
1491                 sp = NULL;
1492
1493         if (sp == NULL) {
1494                 qla_printk(KERN_WARNING, ha,
1495                     "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1496                     sts->handle);
1497
1498                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1499                 qla2xxx_wake_dpc(vha);
1500                 return;
1501         }
1502         cp = sp->cmd;
1503         if (cp == NULL) {
1504                 qla_printk(KERN_WARNING, ha,
1505                     "scsi(%ld): Command already returned (0x%x/%p).\n",
1506                     vha->host_no, sts->handle, sp);
1507
1508                 return;
1509         }
1510
1511         lscsi_status = scsi_status & STATUS_MASK;
1512
1513         fcport = sp->fcport;
1514
1515         ox_id = 0;
1516         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1517         if (IS_FWI2_CAPABLE(ha)) {
1518                 if (scsi_status & SS_SENSE_LEN_VALID)
1519                         sense_len = le32_to_cpu(sts24->sense_len);
1520                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1521                         rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1522                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1523                         resid_len = le32_to_cpu(sts24->rsp_residual_count);
1524                 if (comp_status == CS_DATA_UNDERRUN)
1525                         fw_resid_len = le32_to_cpu(sts24->residual_len);
1526                 rsp_info = sts24->data;
1527                 sense_data = sts24->data;
1528                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1529                 ox_id = le16_to_cpu(sts24->ox_id);
1530         } else {
1531                 if (scsi_status & SS_SENSE_LEN_VALID)
1532                         sense_len = le16_to_cpu(sts->req_sense_length);
1533                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1534                         rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1535                 resid_len = le32_to_cpu(sts->residual_length);
1536                 rsp_info = sts->rsp_info;
1537                 sense_data = sts->req_sense_data;
1538         }
1539
1540         /* Check for any FCP transport errors. */
1541         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1542                 /* Sense data lies beyond any FCP RESPONSE data. */
1543                 if (IS_FWI2_CAPABLE(ha))
1544                         sense_data += rsp_info_len;
1545                 if (rsp_info_len > 3 && rsp_info[3]) {
1546                         DEBUG2(qla_printk(KERN_INFO, ha,
1547                             "scsi(%ld:%d:%d): FCP I/O protocol failure "
1548                             "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
1549                             cp->device->lun, rsp_info_len, rsp_info[3]));
1550
1551                         cp->result = DID_BUS_BUSY << 16;
1552                         goto out;
1553                 }
1554         }
1555
1556         /* Check for overrun. */
1557         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1558             scsi_status & SS_RESIDUAL_OVER)
1559                 comp_status = CS_DATA_OVERRUN;
1560
1561         /*
1562          * Based on Host and scsi status generate status code for Linux
1563          */
1564         switch (comp_status) {
1565         case CS_COMPLETE:
1566         case CS_QUEUE_FULL:
1567                 if (scsi_status == 0) {
1568                         cp->result = DID_OK << 16;
1569                         break;
1570                 }
1571                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1572                         resid = resid_len;
1573                         scsi_set_resid(cp, resid);
1574
1575                         if (!lscsi_status &&
1576                             ((unsigned)(scsi_bufflen(cp) - resid) <
1577                              cp->underflow)) {
1578                                 qla_printk(KERN_INFO, ha,
1579                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1580                                     "detected (0x%x of 0x%x bytes).\n",
1581                                     vha->host_no, cp->device->id,
1582                                     cp->device->lun, resid, scsi_bufflen(cp));
1583
1584                                 cp->result = DID_ERROR << 16;
1585                                 break;
1586                         }
1587                 }
1588                 cp->result = DID_OK << 16 | lscsi_status;
1589
1590                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1591                         DEBUG2(qla_printk(KERN_INFO, ha,
1592                             "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1593                             vha->host_no, cp->device->id, cp->device->lun));
1594                         break;
1595                 }
1596                 logit = 0;
1597                 if (lscsi_status != SS_CHECK_CONDITION)
1598                         break;
1599
1600                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1601                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1602                         break;
1603
1604                 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1605                 break;
1606
1607         case CS_DATA_UNDERRUN:
1608                 /* Use F/W calculated residual length. */
1609                 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1610                 scsi_set_resid(cp, resid);
1611                 if (scsi_status & SS_RESIDUAL_UNDER) {
1612                         if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1613                                 DEBUG2(qla_printk(KERN_INFO, ha,
1614                                     "scsi(%ld:%d:%d) Dropped frame(s) detected "
1615                                     "(0x%x of 0x%x bytes).\n", vha->host_no,
1616                                     cp->device->id, cp->device->lun, resid,
1617                                     scsi_bufflen(cp)));
1618
1619                                 cp->result = DID_ERROR << 16 | lscsi_status;
1620                                 break;
1621                         }
1622
1623                         if (!lscsi_status &&
1624                             ((unsigned)(scsi_bufflen(cp) - resid) <
1625                             cp->underflow)) {
1626                                 qla_printk(KERN_INFO, ha,
1627                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1628                                     "detected (0x%x of 0x%x bytes).\n",
1629                                     vha->host_no, cp->device->id,
1630                                     cp->device->lun, resid, scsi_bufflen(cp));
1631
1632                                 cp->result = DID_ERROR << 16;
1633                                 break;
1634                         }
1635                 } else if (!lscsi_status) {
1636                         DEBUG2(qla_printk(KERN_INFO, ha,
1637                             "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1638                             "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1639                             cp->device->lun, resid, scsi_bufflen(cp)));
1640
1641                         cp->result = DID_ERROR << 16;
1642                         break;
1643                 }
1644
1645                 cp->result = DID_OK << 16 | lscsi_status;
1646                 logit = 0;
1647
1648                 /*
1649                  * Check to see if SCSI Status is non zero. If so report SCSI
1650                  * Status.
1651                  */
1652                 if (lscsi_status != 0) {
1653                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1654                                 DEBUG2(qla_printk(KERN_INFO, ha,
1655                                     "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1656                                     vha->host_no, cp->device->id,
1657                                     cp->device->lun));
1658                                 logit = 1;
1659                                 break;
1660                         }
1661                         if (lscsi_status != SS_CHECK_CONDITION)
1662                                 break;
1663
1664                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1665                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1666                                 break;
1667
1668                         qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1669                 }
1670                 break;
1671
1672         case CS_PORT_LOGGED_OUT:
1673         case CS_PORT_CONFIG_CHG:
1674         case CS_PORT_BUSY:
1675         case CS_INCOMPLETE:
1676         case CS_PORT_UNAVAILABLE:
1677         case CS_TIMEOUT:
1678                 /*
1679                  * We are going to have the fc class block the rport
1680                  * while we try to recover so instruct the mid layer
1681                  * to requeue until the class decides how to handle this.
1682                  */
1683                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1684
1685                 if (comp_status == CS_TIMEOUT) {
1686                         if (IS_FWI2_CAPABLE(ha))
1687                                 break;
1688                         else if ((le16_to_cpu(sts->status_flags) &
1689                             SF_LOGOUT_SENT) == 0)
1690                                 break;
1691                 }
1692
1693                 DEBUG2(qla_printk(KERN_INFO, ha,
1694                         "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
1695                         vha->host_no, cp->device->id, cp->device->lun,
1696                         atomic_read(&fcport->state)));
1697
1698                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1699                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1700                 break;
1701
1702         case CS_RESET:
1703         case CS_ABORTED:
1704                 cp->result = DID_RESET << 16;
1705                 break;
1706
1707         case CS_DIF_ERROR:
1708                 qla2x00_handle_dif_error(sp, sts24);
1709                 break;
1710         default:
1711                 cp->result = DID_ERROR << 16;
1712                 break;
1713         }
1714
1715 out:
1716         if (logit)
1717                 DEBUG2(qla_printk(KERN_INFO, ha,
1718                     "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1719                     "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
1720                     "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1721                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1722                     cp->result, ox_id, cp->serial_number, cp->cmnd[0],
1723                     cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1724                     resid_len, fw_resid_len));
1725
1726         if (rsp->status_srb == NULL)
1727                 qla2x00_sp_compl(ha, sp);
1728 }
1729
1730 /**
1731  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1732  * @ha: SCSI driver HA context
1733  * @pkt: Entry pointer
1734  *
1735  * Extended sense data.
1736  */
1737 static void
1738 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1739 {
1740         uint8_t         sense_sz = 0;
1741         struct qla_hw_data *ha = rsp->hw;
1742         srb_t           *sp = rsp->status_srb;
1743         struct scsi_cmnd *cp;
1744
1745         if (sp != NULL && sp->request_sense_length != 0) {
1746                 cp = sp->cmd;
1747                 if (cp == NULL) {
1748                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1749                             "sp=%p.\n", __func__, sp));
1750                         qla_printk(KERN_INFO, ha,
1751                             "cmd is NULL: already returned to OS (sp=%p)\n",
1752                             sp);
1753
1754                         rsp->status_srb = NULL;
1755                         return;
1756                 }
1757
1758                 if (sp->request_sense_length > sizeof(pkt->data)) {
1759                         sense_sz = sizeof(pkt->data);
1760                 } else {
1761                         sense_sz = sp->request_sense_length;
1762                 }
1763
1764                 /* Move sense data. */
1765                 if (IS_FWI2_CAPABLE(ha))
1766                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1767                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1768                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1769
1770                 sp->request_sense_ptr += sense_sz;
1771                 sp->request_sense_length -= sense_sz;
1772
1773                 /* Place command on done queue. */
1774                 if (sp->request_sense_length == 0) {
1775                         rsp->status_srb = NULL;
1776                         qla2x00_sp_compl(ha, sp);
1777                 }
1778         }
1779 }
1780
1781 /**
1782  * qla2x00_error_entry() - Process an error entry.
1783  * @ha: SCSI driver HA context
1784  * @pkt: Entry pointer
1785  */
1786 static void
1787 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1788 {
1789         srb_t *sp;
1790         struct qla_hw_data *ha = vha->hw;
1791         uint32_t handle = LSW(pkt->handle);
1792         uint16_t que = MSW(pkt->handle);
1793         struct req_que *req = ha->req_q_map[que];
1794 #if defined(QL_DEBUG_LEVEL_2)
1795         if (pkt->entry_status & RF_INV_E_ORDER)
1796                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1797         else if (pkt->entry_status & RF_INV_E_COUNT)
1798                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1799         else if (pkt->entry_status & RF_INV_E_PARAM)
1800                 qla_printk(KERN_ERR, ha,
1801                     "%s: Invalid Entry Parameter\n", __func__);
1802         else if (pkt->entry_status & RF_INV_E_TYPE)
1803                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1804         else if (pkt->entry_status & RF_BUSY)
1805                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1806         else
1807                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1808 #endif
1809
1810         /* Validate handle. */
1811         if (handle < MAX_OUTSTANDING_COMMANDS)
1812                 sp = req->outstanding_cmds[handle];
1813         else
1814                 sp = NULL;
1815
1816         if (sp) {
1817                 /* Free outstanding command slot. */
1818                 req->outstanding_cmds[handle] = NULL;
1819
1820                 /* Bad payload or header */
1821                 if (pkt->entry_status &
1822                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1823                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1824                         sp->cmd->result = DID_ERROR << 16;
1825                 } else if (pkt->entry_status & RF_BUSY) {
1826                         sp->cmd->result = DID_BUS_BUSY << 16;
1827                 } else {
1828                         sp->cmd->result = DID_ERROR << 16;
1829                 }
1830                 qla2x00_sp_compl(ha, sp);
1831
1832         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1833             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1834                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1835                     vha->host_no));
1836                 qla_printk(KERN_WARNING, ha,
1837                     "Error entry - invalid handle\n");
1838
1839                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1840                 qla2xxx_wake_dpc(vha);
1841         }
1842 }
1843
1844 /**
1845  * qla24xx_mbx_completion() - Process mailbox command completions.
1846  * @ha: SCSI driver HA context
1847  * @mb0: Mailbox0 register
1848  */
1849 static void
1850 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1851 {
1852         uint16_t        cnt;
1853         uint16_t __iomem *wptr;
1854         struct qla_hw_data *ha = vha->hw;
1855         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1856
1857         /* Load return mailbox registers. */
1858         ha->flags.mbox_int = 1;
1859         ha->mailbox_out[0] = mb0;
1860         wptr = (uint16_t __iomem *)&reg->mailbox1;
1861
1862         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1863                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1864                 wptr++;
1865         }
1866
1867         if (ha->mcp) {
1868                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1869                     __func__, vha->host_no, ha->mcp->mb[0]));
1870         } else {
1871                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1872                     __func__, vha->host_no));
1873         }
1874 }
1875
1876 /**
1877  * qla24xx_process_response_queue() - Process response queue entries.
1878  * @ha: SCSI driver HA context
1879  */
1880 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1881         struct rsp_que *rsp)
1882 {
1883         struct sts_entry_24xx *pkt;
1884         struct qla_hw_data *ha = vha->hw;
1885
1886         if (!vha->flags.online)
1887                 return;
1888
1889         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1890                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1891
1892                 rsp->ring_index++;
1893                 if (rsp->ring_index == rsp->length) {
1894                         rsp->ring_index = 0;
1895                         rsp->ring_ptr = rsp->ring;
1896                 } else {
1897                         rsp->ring_ptr++;
1898                 }
1899
1900                 if (pkt->entry_status != 0) {
1901                         DEBUG3(printk(KERN_INFO
1902                             "scsi(%ld): Process error entry.\n", vha->host_no));
1903
1904                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1905                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1906                         wmb();
1907                         continue;
1908                 }
1909
1910                 switch (pkt->entry_type) {
1911                 case STATUS_TYPE:
1912                         qla2x00_status_entry(vha, rsp, pkt);
1913                         break;
1914                 case STATUS_CONT_TYPE:
1915                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1916                         break;
1917                 case VP_RPT_ID_IOCB_TYPE:
1918                         qla24xx_report_id_acquisition(vha,
1919                             (struct vp_rpt_id_entry_24xx *)pkt);
1920                         break;
1921                 case LOGINOUT_PORT_IOCB_TYPE:
1922                         qla24xx_logio_entry(vha, rsp->req,
1923                             (struct logio_entry_24xx *)pkt);
1924                         break;
1925                 case TSK_MGMT_IOCB_TYPE:
1926                         qla24xx_tm_iocb_entry(vha, rsp->req,
1927                             (struct tsk_mgmt_entry *)pkt);
1928                         break;
1929                 case MARKER_TYPE:
1930                         qla24xx_marker_iocb_entry(vha, rsp->req,
1931                             (struct mrk_entry_24xx *)pkt);
1932                         break;
1933                 case CT_IOCB_TYPE:
1934                         qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1935                         clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
1936                         break;
1937                 case ELS_IOCB_TYPE:
1938                         qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
1939                         break;
1940                 default:
1941                         /* Type Not Supported. */
1942                         DEBUG4(printk(KERN_WARNING
1943                             "scsi(%ld): Received unknown response pkt type %x "
1944                             "entry status=%x.\n",
1945                             vha->host_no, pkt->entry_type, pkt->entry_status));
1946                         break;
1947                 }
1948                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1949                 wmb();
1950         }
1951
1952         /* Adjust ring index */
1953         if (IS_QLA82XX(ha)) {
1954                 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1955                 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
1956         } else
1957                 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
1958 }
1959
1960 static void
1961 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1962 {
1963         int rval;
1964         uint32_t cnt;
1965         struct qla_hw_data *ha = vha->hw;
1966         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1967
1968         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1969                 return;
1970
1971         rval = QLA_SUCCESS;
1972         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1973         RD_REG_DWORD(&reg->iobase_addr);
1974         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1975         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1976             rval == QLA_SUCCESS; cnt--) {
1977                 if (cnt) {
1978                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1979                         udelay(10);
1980                 } else
1981                         rval = QLA_FUNCTION_TIMEOUT;
1982         }
1983         if (rval == QLA_SUCCESS)
1984                 goto next_test;
1985
1986         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1987         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1988             rval == QLA_SUCCESS; cnt--) {
1989                 if (cnt) {
1990                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1991                         udelay(10);
1992                 } else
1993                         rval = QLA_FUNCTION_TIMEOUT;
1994         }
1995         if (rval != QLA_SUCCESS)
1996                 goto done;
1997
1998 next_test:
1999         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2000                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
2001
2002 done:
2003         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2004         RD_REG_DWORD(&reg->iobase_window);
2005 }
2006
2007 /**
2008  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2009  * @irq:
2010  * @dev_id: SCSI driver HA context
2011  *
2012  * Called by system whenever the host adapter generates an interrupt.
2013  *
2014  * Returns handled flag.
2015  */
2016 irqreturn_t
2017 qla24xx_intr_handler(int irq, void *dev_id)
2018 {
2019         scsi_qla_host_t *vha;
2020         struct qla_hw_data *ha;
2021         struct device_reg_24xx __iomem *reg;
2022         int             status;
2023         unsigned long   iter;
2024         uint32_t        stat;
2025         uint32_t        hccr;
2026         uint16_t        mb[4];
2027         struct rsp_que *rsp;
2028         unsigned long   flags;
2029
2030         rsp = (struct rsp_que *) dev_id;
2031         if (!rsp) {
2032                 printk(KERN_INFO
2033                     "%s(): NULL response queue pointer\n", __func__);
2034                 return IRQ_NONE;
2035         }
2036
2037         ha = rsp->hw;
2038         reg = &ha->iobase->isp24;
2039         status = 0;
2040
2041         if (unlikely(pci_channel_offline(ha->pdev)))
2042                 return IRQ_HANDLED;
2043
2044         spin_lock_irqsave(&ha->hardware_lock, flags);
2045         vha = pci_get_drvdata(ha->pdev);
2046         for (iter = 50; iter--; ) {
2047                 stat = RD_REG_DWORD(&reg->host_status);
2048                 if (stat & HSRX_RISC_PAUSED) {
2049                         if (unlikely(pci_channel_offline(ha->pdev)))
2050                                 break;
2051
2052                         hccr = RD_REG_DWORD(&reg->hccr);
2053
2054                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2055                             "Dumping firmware!\n", hccr);
2056
2057                         qla2xxx_check_risc_status(vha);
2058
2059                         ha->isp_ops->fw_dump(vha, 1);
2060                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2061                         break;
2062                 } else if ((stat & HSRX_RISC_INT) == 0)
2063                         break;
2064
2065                 switch (stat & 0xff) {
2066                 case 0x1:
2067                 case 0x2:
2068                 case 0x10:
2069                 case 0x11:
2070                         qla24xx_mbx_completion(vha, MSW(stat));
2071                         status |= MBX_INTERRUPT;
2072
2073                         break;
2074                 case 0x12:
2075                         mb[0] = MSW(stat);
2076                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2077                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2078                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2079                         qla2x00_async_event(vha, rsp, mb);
2080                         break;
2081                 case 0x13:
2082                 case 0x14:
2083                         qla24xx_process_response_queue(vha, rsp);
2084                         break;
2085                 default:
2086                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2087                             "(%d).\n",
2088                             vha->host_no, stat & 0xff));
2089                         break;
2090                 }
2091                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2092                 RD_REG_DWORD_RELAXED(&reg->hccr);
2093         }
2094         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2095
2096         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2097             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2098                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2099                 complete(&ha->mbx_intr_comp);
2100         }
2101
2102         return IRQ_HANDLED;
2103 }
2104
2105 static irqreturn_t
2106 qla24xx_msix_rsp_q(int irq, void *dev_id)
2107 {
2108         struct qla_hw_data *ha;
2109         struct rsp_que *rsp;
2110         struct device_reg_24xx __iomem *reg;
2111         struct scsi_qla_host *vha;
2112         unsigned long flags;
2113
2114         rsp = (struct rsp_que *) dev_id;
2115         if (!rsp) {
2116                 printk(KERN_INFO
2117                 "%s(): NULL response queue pointer\n", __func__);
2118                 return IRQ_NONE;
2119         }
2120         ha = rsp->hw;
2121         reg = &ha->iobase->isp24;
2122
2123         spin_lock_irqsave(&ha->hardware_lock, flags);
2124
2125         vha = pci_get_drvdata(ha->pdev);
2126         qla24xx_process_response_queue(vha, rsp);
2127         if (!ha->flags.disable_msix_handshake) {
2128                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2129                 RD_REG_DWORD_RELAXED(&reg->hccr);
2130         }
2131         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2132
2133         return IRQ_HANDLED;
2134 }
2135
2136 static irqreturn_t
2137 qla25xx_msix_rsp_q(int irq, void *dev_id)
2138 {
2139         struct qla_hw_data *ha;
2140         struct rsp_que *rsp;
2141         struct device_reg_24xx __iomem *reg;
2142         unsigned long flags;
2143
2144         rsp = (struct rsp_que *) dev_id;
2145         if (!rsp) {
2146                 printk(KERN_INFO
2147                         "%s(): NULL response queue pointer\n", __func__);
2148                 return IRQ_NONE;
2149         }
2150         ha = rsp->hw;
2151
2152         /* Clear the interrupt, if enabled, for this response queue */
2153         if (rsp->options & ~BIT_6) {
2154                 reg = &ha->iobase->isp24;
2155                 spin_lock_irqsave(&ha->hardware_lock, flags);
2156                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2157                 RD_REG_DWORD_RELAXED(&reg->hccr);
2158                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2159         }
2160         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2161
2162         return IRQ_HANDLED;
2163 }
2164
2165 static irqreturn_t
2166 qla24xx_msix_default(int irq, void *dev_id)
2167 {
2168         scsi_qla_host_t *vha;
2169         struct qla_hw_data *ha;
2170         struct rsp_que *rsp;
2171         struct device_reg_24xx __iomem *reg;
2172         int             status;
2173         uint32_t        stat;
2174         uint32_t        hccr;
2175         uint16_t        mb[4];
2176         unsigned long flags;
2177
2178         rsp = (struct rsp_que *) dev_id;
2179         if (!rsp) {
2180                 DEBUG(printk(
2181                 "%s(): NULL response queue pointer\n", __func__));
2182                 return IRQ_NONE;
2183         }
2184         ha = rsp->hw;
2185         reg = &ha->iobase->isp24;
2186         status = 0;
2187
2188         spin_lock_irqsave(&ha->hardware_lock, flags);
2189         vha = pci_get_drvdata(ha->pdev);
2190         do {
2191                 stat = RD_REG_DWORD(&reg->host_status);
2192                 if (stat & HSRX_RISC_PAUSED) {
2193                         if (unlikely(pci_channel_offline(ha->pdev)))
2194                                 break;
2195
2196                         hccr = RD_REG_DWORD(&reg->hccr);
2197
2198                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2199                             "Dumping firmware!\n", hccr);
2200
2201                         qla2xxx_check_risc_status(vha);
2202
2203                         ha->isp_ops->fw_dump(vha, 1);
2204                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2205                         break;
2206                 } else if ((stat & HSRX_RISC_INT) == 0)
2207                         break;
2208
2209                 switch (stat & 0xff) {
2210                 case 0x1:
2211                 case 0x2:
2212                 case 0x10:
2213                 case 0x11:
2214                         qla24xx_mbx_completion(vha, MSW(stat));
2215                         status |= MBX_INTERRUPT;
2216
2217                         break;
2218                 case 0x12:
2219                         mb[0] = MSW(stat);
2220                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2221                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2222                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2223                         qla2x00_async_event(vha, rsp, mb);
2224                         break;
2225                 case 0x13:
2226                 case 0x14:
2227                         qla24xx_process_response_queue(vha, rsp);
2228                         break;
2229                 default:
2230                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2231                             "(%d).\n",
2232                             vha->host_no, stat & 0xff));
2233                         break;
2234                 }
2235                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2236         } while (0);
2237         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2238
2239         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2240             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2241                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2242                 complete(&ha->mbx_intr_comp);
2243         }
2244         return IRQ_HANDLED;
2245 }
2246
2247 /* Interrupt handling helpers. */
2248
2249 struct qla_init_msix_entry {
2250         const char *name;
2251         irq_handler_t handler;
2252 };
2253
2254 static struct qla_init_msix_entry msix_entries[3] = {
2255         { "qla2xxx (default)", qla24xx_msix_default },
2256         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2257         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2258 };
2259
2260 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2261         { "qla2xxx (default)", qla82xx_msix_default },
2262         { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2263 };
2264
2265 static void
2266 qla24xx_disable_msix(struct qla_hw_data *ha)
2267 {
2268         int i;
2269         struct qla_msix_entry *qentry;
2270
2271         for (i = 0; i < ha->msix_count; i++) {
2272                 qentry = &ha->msix_entries[i];
2273                 if (qentry->have_irq)
2274                         free_irq(qentry->vector, qentry->rsp);
2275         }
2276         pci_disable_msix(ha->pdev);
2277         kfree(ha->msix_entries);
2278         ha->msix_entries = NULL;
2279         ha->flags.msix_enabled = 0;
2280 }
2281
2282 static int
2283 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2284 {
2285 #define MIN_MSIX_COUNT  2
2286         int i, ret;
2287         struct msix_entry *entries;
2288         struct qla_msix_entry *qentry;
2289
2290         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2291                         GFP_KERNEL);
2292         if (!entries)
2293                 return -ENOMEM;
2294
2295         for (i = 0; i < ha->msix_count; i++)
2296                 entries[i].entry = i;
2297
2298         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2299         if (ret) {
2300                 if (ret < MIN_MSIX_COUNT)
2301                         goto msix_failed;
2302
2303                 qla_printk(KERN_WARNING, ha,
2304                         "MSI-X: Failed to enable support -- %d/%d\n"
2305                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
2306                 ha->msix_count = ret;
2307                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2308                 if (ret) {
2309 msix_failed:
2310                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
2311                                 " support, giving up -- %d/%d\n",
2312                                 ha->msix_count, ret);
2313                         goto msix_out;
2314                 }
2315                 ha->max_rsp_queues = ha->msix_count - 1;
2316         }
2317         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2318                                 ha->msix_count, GFP_KERNEL);
2319         if (!ha->msix_entries) {
2320                 ret = -ENOMEM;
2321                 goto msix_out;
2322         }
2323         ha->flags.msix_enabled = 1;
2324
2325         for (i = 0; i < ha->msix_count; i++) {
2326                 qentry = &ha->msix_entries[i];
2327                 qentry->vector = entries[i].vector;
2328                 qentry->entry = entries[i].entry;
2329                 qentry->have_irq = 0;
2330                 qentry->rsp = NULL;
2331         }
2332
2333         /* Enable MSI-X vectors for the base queue */
2334         for (i = 0; i < 2; i++) {
2335                 qentry = &ha->msix_entries[i];
2336                 if (IS_QLA82XX(ha)) {
2337                         ret = request_irq(qentry->vector,
2338                                 qla82xx_msix_entries[i].handler,
2339                                 0, qla82xx_msix_entries[i].name, rsp);
2340                 } else {
2341                         ret = request_irq(qentry->vector,
2342                                 msix_entries[i].handler,
2343                                 0, msix_entries[i].name, rsp);
2344                 }
2345                 if (ret) {
2346                         qla_printk(KERN_WARNING, ha,
2347                         "MSI-X: Unable to register handler -- %x/%d.\n",
2348                         qentry->vector, ret);
2349                         qla24xx_disable_msix(ha);
2350                         ha->mqenable = 0;
2351                         goto msix_out;
2352                 }
2353                 qentry->have_irq = 1;
2354                 qentry->rsp = rsp;
2355                 rsp->msix = qentry;
2356         }
2357
2358         /* Enable MSI-X vector for response queue update for queue 0 */
2359         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2360                 ha->mqenable = 1;
2361
2362 msix_out:
2363         kfree(entries);
2364         return ret;
2365 }
2366
2367 int
2368 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2369 {
2370         int ret;
2371         device_reg_t __iomem *reg = ha->iobase;
2372
2373         /* If possible, enable MSI-X. */
2374         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2375                 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2376                 goto skip_msi;
2377
2378         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2379                 (ha->pdev->subsystem_device == 0x7040 ||
2380                 ha->pdev->subsystem_device == 0x7041 ||
2381                 ha->pdev->subsystem_device == 0x1705)) {
2382                 DEBUG2(qla_printk(KERN_WARNING, ha,
2383                         "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
2384                         ha->pdev->subsystem_vendor,
2385                         ha->pdev->subsystem_device));
2386                 goto skip_msi;
2387         }
2388
2389         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2390                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2391                 DEBUG2(qla_printk(KERN_WARNING, ha,
2392                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2393                         ha->pdev->revision, ha->fw_attributes));
2394                 goto skip_msix;
2395         }
2396
2397         ret = qla24xx_enable_msix(ha, rsp);
2398         if (!ret) {
2399                 DEBUG2(qla_printk(KERN_INFO, ha,
2400                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2401                     ha->fw_attributes));
2402                 goto clear_risc_ints;
2403         }
2404         qla_printk(KERN_WARNING, ha,
2405             "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
2406 skip_msix:
2407
2408         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2409             !IS_QLA8001(ha))
2410                 goto skip_msi;
2411
2412         ret = pci_enable_msi(ha->pdev);
2413         if (!ret) {
2414                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2415                 ha->flags.msi_enabled = 1;
2416         } else
2417                 qla_printk(KERN_WARNING, ha,
2418                     "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2419 skip_msi:
2420
2421         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2422             IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2423         if (ret) {
2424                 qla_printk(KERN_WARNING, ha,
2425                     "Failed to reserve interrupt %d already in use.\n",
2426                     ha->pdev->irq);
2427                 goto fail;
2428         }
2429         ha->flags.inta_enabled = 1;
2430 clear_risc_ints:
2431
2432         /*
2433          * FIXME: Noted that 8014s were being dropped during NK testing.
2434          * Timing deltas during MSI-X/INTa transitions?
2435          */
2436         if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2437                 goto fail;
2438         spin_lock_irq(&ha->hardware_lock);
2439         if (IS_FWI2_CAPABLE(ha)) {
2440                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2441                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2442         } else {
2443                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2444                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2445                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2446         }
2447         spin_unlock_irq(&ha->hardware_lock);
2448
2449 fail:
2450         return ret;
2451 }
2452
2453 void
2454 qla2x00_free_irqs(scsi_qla_host_t *vha)
2455 {
2456         struct qla_hw_data *ha = vha->hw;
2457         struct rsp_que *rsp = ha->rsp_q_map[0];
2458
2459         if (ha->flags.msix_enabled)
2460                 qla24xx_disable_msix(ha);
2461         else if (ha->flags.msi_enabled) {
2462                 free_irq(ha->pdev->irq, rsp);
2463                 pci_disable_msi(ha->pdev);
2464         } else
2465                 free_irq(ha->pdev->irq, rsp);
2466 }
2467
2468
2469 int qla25xx_request_irq(struct rsp_que *rsp)
2470 {
2471         struct qla_hw_data *ha = rsp->hw;
2472         struct qla_init_msix_entry *intr = &msix_entries[2];
2473         struct qla_msix_entry *msix = rsp->msix;
2474         int ret;
2475
2476         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2477         if (ret) {
2478                 qla_printk(KERN_WARNING, ha,
2479                         "MSI-X: Unable to register handler -- %x/%d.\n",
2480                         msix->vector, ret);
2481                 return ret;
2482         }
2483         msix->have_irq = 1;
2484         msix->rsp = rsp;
2485         return ret;
2486 }