01819d34201aec7b66cca833af6f1dc975e4f899
[linux-flexiantxendom0-natty.git] / drivers / staging / crystalhd / crystalhd_hw.c
1 /***************************************************************************
2  * Copyright (c) 2005-2009, Broadcom Corporation.
3  *
4  *  Name: crystalhd_hw . c
5  *
6  *  Description:
7  *              BCM70010 Linux driver HW layer.
8  *
9  **********************************************************************
10  * This file is part of the crystalhd device driver.
11  *
12  * This driver is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation, version 2 of the License.
15  *
16  * This driver is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this driver.  If not, see <http://www.gnu.org/licenses/>.
23  **********************************************************************/
24
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include "crystalhd_hw.h"
28
29 /* Functions internal to this file */
30
31 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
32 {
33         bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
34         bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
35 }
36
37
38 static void crystalhd_start_dram(struct crystalhd_adp *adp)
39 {
40         bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) <<  0) |
41         /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) <<  4) | // trcd */
42                       ((15 / 5 - 1) <<  7) |    /* trp */
43                       ((10 / 5 - 1) << 10) |    /* trrd */
44                       ((15 / 5 + 1) << 12) |    /* twr */
45                       ((2 + 1) << 16) |         /* twtr */
46                       ((70 / 5 - 2) << 19) |    /* trfc */
47                       (0 << 23));
48
49         bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
50         bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
51         bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
52         bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
53         bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
54         bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
55         bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
56         /* setting the refresh rate here */
57         bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
58 }
59
60
61 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
62 {
63         link_misc_perst_deco_ctrl rst_deco_cntrl;
64         link_misc_perst_clk_ctrl rst_clk_cntrl;
65         uint32_t temp;
66
67         /*
68          * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
69          * delay to allow PLL to lock Clear alternate clock, stop clock bits
70          */
71         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
72         rst_clk_cntrl.pll_pwr_dn = 0;
73         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
74         msleep_interruptible(50);
75
76         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
77         rst_clk_cntrl.stop_core_clk = 0;
78         rst_clk_cntrl.sel_alt_clk = 0;
79
80         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
81         msleep_interruptible(50);
82
83         /*
84          * Bus Arbiter Timeout: GISB_ARBITER_TIMER
85          * Set internal bus arbiter timeout to 40us based on core clock speed
86          * (63MHz * 40us = 0x9D8)
87          */
88         crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
89
90         /*
91          * Decoder clocks: MISC_PERST_DECODER_CTRL
92          * Enable clocks while 7412 reset is asserted, delay
93          * De-assert 7412 reset
94          */
95         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
96         rst_deco_cntrl.stop_bcm_7412_clk = 0;
97         rst_deco_cntrl.bcm7412_rst = 1;
98         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
99         msleep_interruptible(10);
100
101         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
102         rst_deco_cntrl.bcm7412_rst = 0;
103         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
104         msleep_interruptible(50);
105
106         /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
107         crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
108
109         /* Clear bit 29 of 0x404 */
110         temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
111         temp &= ~BC_BIT(29);
112         crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
113
114         /* 2.5V regulator must be set to 2.6 volts (+6%) */
115         /* FIXME: jarod: what's the point of this reg read? */
116         temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
117         crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
118
119         return true;
120 }
121
122 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
123 {
124         link_misc_perst_deco_ctrl rst_deco_cntrl;
125         link_misc_perst_clk_ctrl  rst_clk_cntrl;
126         uint32_t                  temp;
127
128         /*
129          * Decoder clocks: MISC_PERST_DECODER_CTRL
130          * Assert 7412 reset, delay
131          * Assert 7412 stop clock
132          */
133         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
134         rst_deco_cntrl.stop_bcm_7412_clk = 1;
135         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
136         msleep_interruptible(50);
137
138         /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
139          * Set internal bus arbiter timeout to 40us based on core clock speed
140          * (6.75MHZ * 40us = 0x10E)
141          */
142         crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
143
144         /* Link clocks: MISC_PERST_CLOCK_CTRL
145          * Stop core clk, delay
146          * Set alternate clk, delay, set PLL power down
147          */
148         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
149         rst_clk_cntrl.stop_core_clk = 1;
150         rst_clk_cntrl.sel_alt_clk = 1;
151         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
152         msleep_interruptible(50);
153
154         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
155         rst_clk_cntrl.pll_pwr_dn = 1;
156         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
157
158         /*
159          * Read and restore the Transaction Configuration Register
160          * after core reset
161          */
162         temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
163
164         /*
165          * Link core soft reset: MISC3_RESET_CTRL
166          * - Write BIT[0]=1 and read it back for core reset to take place
167          */
168         crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
169         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
170         msleep_interruptible(50);
171
172         /* restore the transaction configuration register */
173         crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
174
175         return true;
176 }
177
178 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
179 {
180         intr_mask_reg   intr_mask;
181         intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
182         intr_mask.mask_pcie_err = 1;
183         intr_mask.mask_pcie_rbusmast_err = 1;
184         intr_mask.mask_pcie_rgr_bridge   = 1;
185         intr_mask.mask_rx_done = 1;
186         intr_mask.mask_rx_err  = 1;
187         intr_mask.mask_tx_done = 1;
188         intr_mask.mask_tx_err  = 1;
189         crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
190
191         return;
192 }
193
194 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
195 {
196         intr_mask_reg   intr_mask;
197         intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
198         intr_mask.mask_pcie_err = 1;
199         intr_mask.mask_pcie_rbusmast_err = 1;
200         intr_mask.mask_pcie_rgr_bridge   = 1;
201         intr_mask.mask_rx_done = 1;
202         intr_mask.mask_rx_err  = 1;
203         intr_mask.mask_tx_done = 1;
204         intr_mask.mask_tx_err  = 1;
205         crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
206
207         return;
208 }
209
210 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
211 {
212         uint32_t reg;
213
214         /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
215         reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
216         if (reg)
217                 crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
218
219         reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
220         if (reg)
221                 crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
222
223         reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
224         if (reg)
225                 crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
226 }
227
228 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
229 {
230         uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
231
232         if (intr_sts) {
233                 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
234
235                 /* Write End Of Interrupt for PCIE */
236                 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
237         }
238 }
239
240 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
241 {
242         uint32_t val;
243
244         /* Assert c011 soft reset*/
245         bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
246         msleep_interruptible(50);
247
248         /* Release c011 soft reset*/
249         bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
250
251         /* Disable Stuffing..*/
252         val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
253         val |= BC_BIT(8);
254         crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
255 }
256
257 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
258 {
259         uint32_t i = 0, reg;
260
261         crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
262
263         crystalhd_reg_wr(adp, AES_CMD, 0);
264         crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
265         crystalhd_reg_wr(adp, AES_CMD, 0x1);
266
267         /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
268         for (i = 0; i < 100; ++i) {
269                 reg = crystalhd_reg_rd(adp, AES_STATUS);
270                 if (reg & 0x1)
271                         return true;
272                 msleep_interruptible(10);
273         }
274
275         return false;
276 }
277
278
279 static bool crystalhd_start_device(struct crystalhd_adp *adp)
280 {
281         uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
282
283         BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
284
285         reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
286         reg_pwrmgmt &= ~ASPM_L1_ENABLE;
287
288         crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
289
290         if (!crystalhd_bring_out_of_rst(adp)) {
291                 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
292                 return false;
293         }
294
295         crystalhd_disable_interrupts(adp);
296
297         crystalhd_clear_errors(adp);
298
299         crystalhd_clear_interrupts(adp);
300
301         crystalhd_enable_interrupts(adp);
302
303         /* Enable the option for getting the total no. of DWORDS
304          * that have been transfered by the RXDMA engine
305          */
306         dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
307         dbg_options |= 0x10;
308         crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
309
310         /* Enable PCI Global Control options */
311         glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
312         glb_cntrl |= 0x100;
313         glb_cntrl |= 0x8000;
314         crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
315
316         crystalhd_enable_interrupts(adp);
317
318         crystalhd_soft_rst(adp);
319         crystalhd_start_dram(adp);
320         crystalhd_enable_uarts(adp);
321
322         return true;
323 }
324
325 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
326 {
327         uint32_t reg;
328
329         BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
330         /* Clear and disable interrupts */
331         crystalhd_disable_interrupts(adp);
332         crystalhd_clear_errors(adp);
333         crystalhd_clear_interrupts(adp);
334
335         if (!crystalhd_put_in_reset(adp))
336                 BCMLOG_ERR("Failed to Put Link To Reset State\n");
337
338         reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
339         reg |= ASPM_L1_ENABLE;
340         crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
341
342         /* Set PCI Clk Req */
343         reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
344         reg |= PCI_CLK_REQ_ENABLE;
345         crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
346
347         return true;
348 }
349
350 static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
351 {
352         unsigned long flags = 0;
353         crystalhd_rx_dma_pkt *temp = NULL;
354
355         if (!hw)
356                 return NULL;
357
358         spin_lock_irqsave(&hw->lock, flags);
359         temp = hw->rx_pkt_pool_head;
360         if (temp) {
361                 hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
362                 temp->dio_req = NULL;
363                 temp->pkt_tag = 0;
364                 temp->flags = 0;
365         }
366         spin_unlock_irqrestore(&hw->lock, flags);
367
368         return temp;
369 }
370
371 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
372                                    crystalhd_rx_dma_pkt *pkt)
373 {
374         unsigned long flags = 0;
375
376         if (!hw || !pkt)
377                 return;
378
379         spin_lock_irqsave(&hw->lock, flags);
380         pkt->next = hw->rx_pkt_pool_head;
381         hw->rx_pkt_pool_head = pkt;
382         spin_unlock_irqrestore(&hw->lock, flags);
383 }
384
385 /*
386  * Call back from TX - IOQ deletion.
387  *
388  * This routine will release the TX DMA rings allocated
389  * druing setup_dma rings interface.
390  *
391  * Memory is allocated per DMA ring basis. This is just
392  * a place holder to be able to create the dio queues.
393  */
394 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
395 {
396 }
397
398 /*
399  * Rx Packet release callback..
400  *
401  * Release All user mapped capture buffers and Our DMA packets
402  * back to our free pool. The actual cleanup of the DMA
403  * ring descriptors happen during dma ring release.
404  */
405 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
406 {
407         struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
408         crystalhd_rx_dma_pkt *pkt = (crystalhd_rx_dma_pkt *)data;
409
410         if (!pkt || !hw) {
411                 BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
412                 return;
413         }
414
415         if (pkt->dio_req)
416                 crystalhd_unmap_dio(hw->adp, pkt->dio_req);
417         else
418                 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
419
420         crystalhd_hw_free_rx_pkt(hw, pkt);
421 }
422
423 #define crystalhd_hw_delete_ioq(adp, q)         \
424         if (q) {                                \
425                 crystalhd_delete_dioq(adp, q);  \
426                 q = NULL;                       \
427         }
428
429 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
430 {
431         if (!hw)
432                 return;
433
434         BCMLOG(BCMLOG_DBG, "Deleting IOQs \n");
435         crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
436         crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
437         crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
438         crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
439         crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
440 }
441
442 #define crystalhd_hw_create_ioq(sts, hw, q, cb)                 \
443 do {                                                            \
444         sts = crystalhd_create_dioq(hw->adp, &q, cb, hw);       \
445         if (sts != BC_STS_SUCCESS)                              \
446                 goto hw_create_ioq_err;                         \
447 } while (0)
448
449 /*
450  * Create IOQs..
451  *
452  * TX - Active & Free
453  * RX - Active, Ready and Free.
454  */
455 static BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw   *hw)
456 {
457         BC_STATUS   sts = BC_STS_SUCCESS;
458
459         if (!hw) {
460                 BCMLOG_ERR("Invalid Arg!!\n");
461                 return BC_STS_INV_ARG;
462         }
463
464         crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
465                               crystalhd_tx_desc_rel_call_back);
466         crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
467                               crystalhd_tx_desc_rel_call_back);
468
469         crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
470                               crystalhd_rx_pkt_rel_call_back);
471         crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
472                               crystalhd_rx_pkt_rel_call_back);
473         crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
474                               crystalhd_rx_pkt_rel_call_back);
475
476         return sts;
477
478 hw_create_ioq_err:
479         crystalhd_hw_delete_ioqs(hw);
480
481         return sts;
482 }
483
484
485 static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
486                                  bool b_188_byte_pkts,  uint8_t flags)
487 {
488         uint32_t base, end, writep, readp;
489         uint32_t cpbSize, cpbFullness, fifoSize;
490
491         if (flags & 0x02) { /* ASF Bit is set */
492                 base   = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
493                 end    = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
494                 writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
495                 readp  = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
496         } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
497                 base   = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
498                 end    = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
499                 writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
500                 readp  = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
501         } else {
502                 base   = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
503                 end    = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
504                 writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
505                 readp  = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
506         }
507
508         cpbSize = end - base;
509         if (writep >= readp)
510                 cpbFullness = writep - readp;
511         else
512                 cpbFullness = (end - base) - (readp - writep);
513
514         fifoSize = cpbSize - cpbFullness;
515
516         if (fifoSize < BC_INFIFO_THRESHOLD)
517                 return true;
518
519         if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
520                 return true;
521
522         return false;
523 }
524
525 static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
526                                             uint32_t list_id, BC_STATUS cs)
527 {
528         tx_dma_pkt *tx_req;
529
530         if (!hw || !list_id) {
531                 BCMLOG_ERR("Invalid Arg..\n");
532                 return BC_STS_INV_ARG;
533         }
534
535         hw->pwr_lock--;
536
537         tx_req = (tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
538         if (!tx_req) {
539                 if (cs != BC_STS_IO_USER_ABORT)
540                         BCMLOG_ERR("Find and Fetch Did not find req\n");
541                 return BC_STS_NO_DATA;
542         }
543
544         if (tx_req->call_back) {
545                 tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
546                 tx_req->dio_req   = NULL;
547                 tx_req->cb_event  = NULL;
548                 tx_req->call_back = NULL;
549         } else {
550                 BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
551                        tx_req->list_tag);
552         }
553
554         /* Now put back the tx_list back in FreeQ */
555         tx_req->list_tag = 0;
556
557         return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
558 }
559
560 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
561 {
562         uint32_t err_mask, tmp;
563         unsigned long flags = 0;
564
565         err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
566                 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
567                 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
568
569         if (!(err_sts & err_mask))
570                 return false;
571
572         BCMLOG_ERR("Error on Tx-L0 %x \n", err_sts);
573
574         tmp = err_mask;
575
576         if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
577                 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
578
579         if (tmp) {
580                 spin_lock_irqsave(&hw->lock, flags);
581                 /* reset list index.*/
582                 hw->tx_list_post_index = 0;
583                 spin_unlock_irqrestore(&hw->lock, flags);
584         }
585
586         tmp = err_sts & err_mask;
587         crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
588
589         return true;
590 }
591
592 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
593 {
594         uint32_t err_mask, tmp;
595         unsigned long flags = 0;
596
597         err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
598                 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
599                 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
600
601         if (!(err_sts & err_mask))
602                 return false;
603
604         BCMLOG_ERR("Error on Tx-L1 %x \n", err_sts);
605
606         tmp = err_mask;
607
608         if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
609                 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
610
611         if (tmp) {
612                 spin_lock_irqsave(&hw->lock, flags);
613                 /* reset list index.*/
614                 hw->tx_list_post_index = 0;
615                 spin_unlock_irqrestore(&hw->lock, flags);
616         }
617
618         tmp = err_sts & err_mask;
619         crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
620
621         return true;
622 }
623
624 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
625 {
626         uint32_t err_sts;
627
628         if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
629                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
630                                            BC_STS_SUCCESS);
631
632         if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
633                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
634                                            BC_STS_SUCCESS);
635
636         if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
637                          INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
638                          /* No error mask set.. */
639                          return;
640         }
641
642         /* Handle Tx errors. */
643         err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
644
645         if (crystalhd_tx_list0_handler(hw, err_sts))
646                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
647                                            BC_STS_ERROR);
648
649         if (crystalhd_tx_list1_handler(hw, err_sts))
650                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
651                                            BC_STS_ERROR);
652
653         hw->stats.tx_errors++;
654 }
655
656 static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc,
657                                  uint32_t ul_desc_index, uint32_t cnt)
658 {
659         uint32_t ix, ll = 0;
660
661         if (!p_dma_desc || !cnt)
662                 return;
663
664         /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
665          * setting ll (log level, I presume) to non-zero? */
666         if (!ll)
667                 return;
668
669         for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
670                 BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
671                        ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
672                        ul_desc_index,
673                        p_dma_desc[ul_desc_index].buff_addr_high,
674                        p_dma_desc[ul_desc_index].buff_addr_low,
675                        p_dma_desc[ul_desc_index].next_desc_addr_high,
676                        p_dma_desc[ul_desc_index].next_desc_addr_low,
677                        p_dma_desc[ul_desc_index].xfer_size,
678                        p_dma_desc[ul_desc_index].intr_enable,
679                        p_dma_desc[ul_desc_index].last_rec_indicator);
680         }
681
682 }
683
684 static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
685                                       dma_descriptor *desc,
686                                       dma_addr_t desc_paddr_base,
687                                       uint32_t sg_cnt, uint32_t sg_st_ix,
688                                       uint32_t sg_st_off, uint32_t xfr_sz)
689 {
690         uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
691         dma_addr_t desc_phy_addr = desc_paddr_base;
692         addr_64 addr_temp;
693
694         if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
695             (!sg_cnt && !ioreq->uinfo.dir_tx)) {
696                 BCMLOG_ERR("Invalid Args\n");
697                 return BC_STS_INV_ARG;
698         }
699
700         for (ix = 0; ix < sg_cnt; ix++) {
701
702                 /* Setup SGLE index. */
703                 sg_ix = ix + sg_st_ix;
704
705                 /* Get SGLE length */
706                 len = crystalhd_get_sgle_len(ioreq, sg_ix);
707                 if (len % 4) {
708                         BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
709                         return BC_STS_NOT_IMPL;
710                 }
711                 /* Setup DMA desc with Phy addr & Length at current index. */
712                 addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
713                 if (sg_ix == sg_st_ix) {
714                         addr_temp.full_addr += sg_st_off;
715                         len -= sg_st_off;
716                 }
717                 memset(&desc[ix], 0, sizeof(desc[ix]));
718                 desc[ix].buff_addr_low  = addr_temp.low_part;
719                 desc[ix].buff_addr_high = addr_temp.high_part;
720                 desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
721
722                 /* Chain DMA descriptor.  */
723                 addr_temp.full_addr = desc_phy_addr + sizeof(dma_descriptor);
724                 desc[ix].next_desc_addr_low = addr_temp.low_part;
725                 desc[ix].next_desc_addr_high = addr_temp.high_part;
726
727                 if ((count + len) > xfr_sz)
728                         len = xfr_sz - count;
729
730                 /* Debug.. */
731                 if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
732                         BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
733                                    len, ix, count, xfr_sz, sg_cnt);
734                         return BC_STS_ERROR;
735                 }
736                 /* Length expects Multiple of 4 */
737                 desc[ix].xfer_size = (len / 4);
738
739                 crystalhd_hw_dump_desc(desc, ix, 1);
740
741                 count += len;
742                 desc_phy_addr += sizeof(dma_descriptor);
743         }
744
745         last_desc_ix = ix - 1;
746
747         if (ioreq->fb_size) {
748                 memset(&desc[ix], 0, sizeof(desc[ix]));
749                 addr_temp.full_addr     = ioreq->fb_pa;
750                 desc[ix].buff_addr_low  = addr_temp.low_part;
751                 desc[ix].buff_addr_high = addr_temp.high_part;
752                 desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
753                 desc[ix].xfer_size      = 1;
754                 desc[ix].fill_bytes     = 4 - ioreq->fb_size;
755                 count += ioreq->fb_size;
756                 last_desc_ix++;
757         }
758
759         /* setup last descriptor..*/
760         desc[last_desc_ix].last_rec_indicator  = 1;
761         desc[last_desc_ix].next_desc_addr_low  = 0;
762         desc[last_desc_ix].next_desc_addr_high = 0;
763         desc[last_desc_ix].intr_enable = 1;
764
765         crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
766
767         if (count != xfr_sz) {
768                 BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
769                 return BC_STS_ERROR;
770         }
771
772         return BC_STS_SUCCESS;
773 }
774
775 static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq,
776                                               pdma_desc_mem pdesc_mem,
777                                               uint32_t *uv_desc_index)
778 {
779         dma_descriptor *desc = NULL;
780         dma_addr_t desc_paddr_base = 0;
781         uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
782         uint32_t xfr_sz = 0;
783         BC_STATUS sts = BC_STS_SUCCESS;
784
785         /* Check params.. */
786         if (!ioreq || !pdesc_mem || !uv_desc_index) {
787                 BCMLOG_ERR("Invalid Args\n");
788                 return BC_STS_INV_ARG;
789         }
790
791         if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
792             !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
793                 BCMLOG_ERR("Invalid Args\n");
794                 return BC_STS_INV_ARG;
795         }
796
797         if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
798                 BCMLOG_ERR("UV offset for TX??\n");
799                 return BC_STS_INV_ARG;
800
801         }
802
803         desc = pdesc_mem->pdma_desc_start;
804         desc_paddr_base = pdesc_mem->phy_addr;
805
806         if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
807                 sg_cnt = ioreq->sg_cnt;
808                 xfr_sz = ioreq->uinfo.xfr_len;
809         } else {
810                 sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
811                 xfr_sz = ioreq->uinfo.uv_offset;
812         }
813
814         sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
815                                    sg_st_ix, sg_st_off, xfr_sz);
816
817         if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
818                 return sts;
819
820         /* Prepare for UV mapping.. */
821         desc = &pdesc_mem->pdma_desc_start[sg_cnt];
822         desc_paddr_base = pdesc_mem->phy_addr +
823                           (sg_cnt * sizeof(dma_descriptor));
824
825         /* Done with desc addr.. now update sg stuff.*/
826         sg_cnt    = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
827         xfr_sz    = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
828         sg_st_ix  = ioreq->uinfo.uv_sg_ix;
829         sg_st_off = ioreq->uinfo.uv_sg_off;
830
831         sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
832                                    sg_st_ix, sg_st_off, xfr_sz);
833         if (sts != BC_STS_SUCCESS)
834                 return sts;
835
836         *uv_desc_index = sg_st_ix;
837
838         return sts;
839 }
840
841 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
842 {
843         uint32_t dma_cntrl;
844
845         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
846         if (!(dma_cntrl & DMA_START_BIT)) {
847                 dma_cntrl |= DMA_START_BIT;
848                 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
849                                dma_cntrl);
850         }
851
852         return;
853 }
854
855 /* _CHECK_THIS_
856  *
857  * Verify if the Stop generates a completion interrupt or not.
858  * if it does not generate an interrupt, then add polling here.
859  */
860 static BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
861 {
862         uint32_t dma_cntrl, cnt = 30;
863         uint32_t l1 = 1, l2 = 1;
864         unsigned long flags = 0;
865
866         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
867
868         BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
869
870         /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
871         if (!dma_cntrl & DMA_START_BIT) {
872                 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
873                 return BC_STS_SUCCESS;
874         }
875
876         crystalhd_disable_interrupts(hw->adp);
877
878         /* Issue stop to HW */
879         /* This bit when set gave problems. Please check*/
880         dma_cntrl &= ~DMA_START_BIT;
881         crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
882
883         BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
884
885         /* Poll for 3seconds (30 * 100ms) on both the lists..*/
886         while ((l1 || l2) && cnt) {
887
888                 if (l1) {
889                         l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
890                         l1 &= DMA_START_BIT;
891                 }
892
893                 if (l2) {
894                         l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
895                         l2 &= DMA_START_BIT;
896                 }
897
898                 msleep_interruptible(100);
899
900                 cnt--;
901         }
902
903         if (!cnt) {
904                 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
905                 crystalhd_enable_interrupts(hw->adp);
906                 return BC_STS_ERROR;
907         }
908
909         spin_lock_irqsave(&hw->lock, flags);
910         hw->tx_list_post_index = 0;
911         spin_unlock_irqrestore(&hw->lock, flags);
912         BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
913         crystalhd_enable_interrupts(hw->adp);
914
915         return BC_STS_SUCCESS;
916 }
917
918 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
919 {
920         /*
921         * Position of the PIB Entries can be found at
922         * 0th and the 1st location of the Circular list.
923         */
924         uint32_t Q_addr;
925         uint32_t pib_cnt, r_offset, w_offset;
926
927         Q_addr = hw->pib_del_Q_addr;
928
929         /* Get the Read Pointer */
930         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
931
932         /* Get the Write Pointer */
933         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
934
935         if (r_offset == w_offset)
936                 return 0;       /* Queue is empty */
937
938         if (w_offset > r_offset)
939                 pib_cnt = w_offset - r_offset;
940         else
941                 pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
942                           (r_offset + MIN_PIB_Q_DEPTH);
943
944         if (pib_cnt > MAX_PIB_Q_DEPTH) {
945                 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
946                 return 0;
947         }
948
949         return pib_cnt;
950 }
951
952 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
953 {
954         uint32_t Q_addr;
955         uint32_t addr_entry, r_offset, w_offset;
956
957         Q_addr = hw->pib_del_Q_addr;
958
959         /* Get the Read Pointer 0Th Location is Read Pointer */
960         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
961
962         /* Get the Write Pointer 1st Location is Write pointer */
963         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
964
965         /* Queue is empty */
966         if (r_offset == w_offset)
967                 return 0;
968
969         if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
970                 return 0;
971
972         /* Get the Actual Address of the PIB */
973         crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
974                        1, &addr_entry);
975
976         /* Increment the Read Pointer */
977         r_offset++;
978
979         if (MAX_PIB_Q_DEPTH == r_offset)
980                 r_offset = MIN_PIB_Q_DEPTH;
981
982         /* Write back the read pointer to It's Location */
983         crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
984
985         return addr_entry;
986 }
987
988 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
989 {
990         uint32_t Q_addr;
991         uint32_t r_offset, w_offset, n_offset;
992
993         Q_addr = hw->pib_rel_Q_addr;
994
995         /* Get the Read Pointer */
996         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
997
998         /* Get the Write Pointer */
999         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1000
1001         if ((r_offset < MIN_PIB_Q_DEPTH) ||
1002             (r_offset >= MAX_PIB_Q_DEPTH))
1003                 return false;
1004
1005         n_offset = w_offset + 1;
1006
1007         if (MAX_PIB_Q_DEPTH == n_offset)
1008                 n_offset = MIN_PIB_Q_DEPTH;
1009
1010         if (r_offset == n_offset)
1011                 return false; /* should never happen */
1012
1013         /* Write the DRAM ADDR to the Queue at Next Offset */
1014         crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1015                        1, &addr_to_rel);
1016
1017         /* Put the New value of the write pointer in Queue */
1018         crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1019
1020         return true;
1021 }
1022
1023 static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib)
1024 {
1025         if (!src_pib || !dst_pib) {
1026                 BCMLOG_ERR("Invalid Arguments\n");
1027                 return;
1028         }
1029
1030         dst_pib->timeStamp           = 0;
1031         dst_pib->picture_number      = src_pib->ppb.picture_number;
1032         dst_pib->width               = src_pib->ppb.width;
1033         dst_pib->height              = src_pib->ppb.height;
1034         dst_pib->chroma_format       = src_pib->ppb.chroma_format;
1035         dst_pib->pulldown            = src_pib->ppb.pulldown;
1036         dst_pib->flags               = src_pib->ppb.flags;
1037         dst_pib->sess_num            = src_pib->ptsStcOffset;
1038         dst_pib->aspect_ratio        = src_pib->ppb.aspect_ratio;
1039         dst_pib->colour_primaries     = src_pib->ppb.colour_primaries;
1040         dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1041         dst_pib->frame_rate             = src_pib->resolution ;
1042         return;
1043 }
1044
1045 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1046 {
1047         unsigned int cnt;
1048         C011_PIB src_pib;
1049         uint32_t pib_addr, pib_cnt;
1050         BC_PIC_INFO_BLOCK *AppPib;
1051         crystalhd_rx_dma_pkt *rx_pkt = NULL;
1052
1053         pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1054
1055         if (!pib_cnt)
1056                 return;
1057
1058         for (cnt = 0; cnt < pib_cnt; cnt++) {
1059
1060                 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1061                 crystalhd_mem_rd(hw->adp, pib_addr, sizeof(C011_PIB) / 4,
1062                                (uint32_t *)&src_pib);
1063
1064                 if (src_pib.bFormatChange) {
1065                         rx_pkt = (crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
1066                         if (!rx_pkt)
1067                                 return;
1068                         rx_pkt->flags = 0;
1069                         rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
1070                         AppPib = &rx_pkt->pib;
1071                         cpy_pib_to_app(&src_pib, AppPib);
1072
1073                         BCMLOG(BCMLOG_DBG,
1074                                "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1075                                rx_pkt->pib.picture_number,
1076                                rx_pkt->pib.aspect_ratio,
1077                                rx_pkt->pib.chroma_format,
1078                                rx_pkt->pib.colour_primaries,
1079                                rx_pkt->pib.frame_rate,
1080                                rx_pkt->pib.height,
1081                                rx_pkt->pib.height,
1082                                rx_pkt->pib.n_drop,
1083                                rx_pkt->pib.pulldown,
1084                                rx_pkt->pib.ycom);
1085
1086                         crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
1087
1088                 }
1089
1090                 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1091         }
1092 }
1093
1094 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1095 {
1096         uint32_t        dma_cntrl;
1097
1098         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1099         if (!(dma_cntrl & DMA_START_BIT)) {
1100                 dma_cntrl |= DMA_START_BIT;
1101                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1102         }
1103
1104         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1105         if (!(dma_cntrl & DMA_START_BIT)) {
1106                 dma_cntrl |= DMA_START_BIT;
1107                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1108         }
1109
1110         return;
1111 }
1112
1113 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1114 {
1115         uint32_t dma_cntrl = 0, count = 30;
1116         uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1117
1118         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1119         if ((dma_cntrl & DMA_START_BIT)) {
1120                 dma_cntrl &= ~DMA_START_BIT;
1121                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1122         }
1123
1124         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1125         if ((dma_cntrl & DMA_START_BIT)) {
1126                 dma_cntrl &= ~DMA_START_BIT;
1127                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1128         }
1129
1130         /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1131         while ((l0y || l0uv || l1y || l1uv) && count) {
1132
1133                 if (l0y) {
1134                         l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1135                         l0y &= DMA_START_BIT;
1136                         if (!l0y) {
1137                                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1138                         }
1139                 }
1140
1141                 if (l1y) {
1142                         l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1143                         l1y &= DMA_START_BIT;
1144                         if (!l1y) {
1145                                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1146                         }
1147                 }
1148
1149                 if (l0uv) {
1150                         l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1151                         l0uv &= DMA_START_BIT;
1152                         if (!l0uv) {
1153                                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1154                         }
1155                 }
1156
1157                 if (l1uv) {
1158                         l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1159                         l1uv &= DMA_START_BIT;
1160                         if (!l1uv) {
1161                                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1162                         }
1163                 }
1164                 msleep_interruptible(100);
1165                 count--;
1166         }
1167
1168         hw->rx_list_post_index = 0;
1169
1170         BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1171                count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1172 }
1173
1174 static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_dma_pkt *rx_pkt)
1175 {
1176         uint32_t y_low_addr_reg, y_high_addr_reg;
1177         uint32_t uv_low_addr_reg, uv_high_addr_reg;
1178         addr_64 desc_addr;
1179         unsigned long flags;
1180
1181         if (!hw || !rx_pkt) {
1182                 BCMLOG_ERR("Invalid Arguments\n");
1183                 return BC_STS_INV_ARG;
1184         }
1185
1186         if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1187                 BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1188                 return BC_STS_INV_ARG;
1189         }
1190
1191         spin_lock_irqsave(&hw->rx_lock, flags);
1192         /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
1193         if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1194                 spin_unlock_irqrestore(&hw->rx_lock, flags);
1195                 return BC_STS_BUSY;
1196         }
1197
1198         if (!hw->rx_list_post_index) {
1199                 y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1200                 y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1201                 uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1202                 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1203         } else {
1204                 y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1205                 y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1206                 uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1207                 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1208         }
1209         rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1210         hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1211         if (rx_pkt->uv_phy_addr)
1212                 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1213         hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1214         spin_unlock_irqrestore(&hw->rx_lock, flags);
1215
1216         crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
1217
1218         crystalhd_start_rx_dma_engine(hw);
1219         /* Program the Y descriptor */
1220         desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1221         crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1222         crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1223
1224         if (rx_pkt->uv_phy_addr) {
1225                 /* Program the UV descriptor */
1226                 desc_addr.full_addr = rx_pkt->uv_phy_addr;
1227                 crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
1228                 crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
1229         }
1230
1231         return BC_STS_SUCCESS;
1232 }
1233
1234 static BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1235                                           crystalhd_rx_dma_pkt *rx_pkt)
1236 {
1237         BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1238
1239         if (sts == BC_STS_BUSY)
1240                 crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1241                                  false, rx_pkt->pkt_tag);
1242
1243         return sts;
1244 }
1245
1246 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1247                              uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1248 {
1249         uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1250
1251         if (!list_index) {
1252                 y_dn_sz_reg  = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1253                 uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1254         } else {
1255                 y_dn_sz_reg  = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1256                 uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1257         }
1258
1259         *y_dw_dnsz  = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1260         *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1261 }
1262
1263 /*
1264  * This function should be called only after making sure that the two DMA
1265  * lists are free. This function does not check if DMA's are active, before
1266  * turning off the DMA.
1267  */
1268 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1269 {
1270         uint32_t dma_cntrl, aspm;
1271
1272         hw->stop_pending = 0;
1273
1274         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1275         if (dma_cntrl & DMA_START_BIT) {
1276                 dma_cntrl &= ~DMA_START_BIT;
1277                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1278         }
1279
1280         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1281         if (dma_cntrl & DMA_START_BIT) {
1282                 dma_cntrl &= ~DMA_START_BIT;
1283                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1284         }
1285         hw->rx_list_post_index = 0;
1286
1287         aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1288         aspm |= ASPM_L1_ENABLE;
1289         /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1290         crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1291 }
1292
1293 static BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
1294                                      BC_STATUS comp_sts)
1295 {
1296         crystalhd_rx_dma_pkt *rx_pkt = NULL;
1297         uint32_t y_dw_dnsz, uv_dw_dnsz;
1298         BC_STATUS sts = BC_STS_SUCCESS;
1299
1300         if (!hw || list_index >= DMA_ENGINE_CNT) {
1301                 BCMLOG_ERR("Invalid Arguments\n");
1302                 return BC_STS_INV_ARG;
1303         }
1304
1305         rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1306                                              hw->rx_pkt_tag_seed + list_index);
1307         if (!rx_pkt) {
1308                 BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1309                            hw->rx_list_post_index, hw->rx_list_sts[0],
1310                            hw->rx_list_sts[1], list_index,
1311                            hw->rx_pkt_tag_seed + list_index, comp_sts);
1312                 return BC_STS_INV_ARG;
1313         }
1314
1315         if (comp_sts == BC_STS_SUCCESS) {
1316                 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1317                 rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1318                 rx_pkt->flags = COMP_FLAG_DATA_VALID;
1319                 if (rx_pkt->uv_phy_addr)
1320                         rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1321                 crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1322                                 hw->rx_pkt_tag_seed + list_index);
1323                 return sts;
1324         }
1325
1326         /* Check if we can post this DIO again. */
1327         return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1328 }
1329
1330 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1331                                      uint32_t y_err_sts, uint32_t uv_err_sts)
1332 {
1333         uint32_t tmp;
1334         list_sts tmp_lsts;
1335
1336         if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1337                 return false;
1338
1339         tmp_lsts = hw->rx_list_sts[0];
1340
1341         /* Y0 - DMA */
1342         tmp = y_err_sts & GET_Y0_ERR_MSK;
1343         if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1344                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1345
1346         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1347                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1348                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1349         }
1350
1351         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1352                 hw->rx_list_sts[0] &= ~rx_y_mask;
1353                 hw->rx_list_sts[0] |= rx_y_error;
1354                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1355         }
1356
1357         if (tmp) {
1358                 hw->rx_list_sts[0] &= ~rx_y_mask;
1359                 hw->rx_list_sts[0] |= rx_y_error;
1360                 hw->rx_list_post_index = 0;
1361         }
1362
1363         /* UV0 - DMA */
1364         tmp = uv_err_sts & GET_UV0_ERR_MSK;
1365         if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1366                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1367
1368         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1369                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1370                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1371         }
1372
1373         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1374                 hw->rx_list_sts[0] &= ~rx_uv_mask;
1375                 hw->rx_list_sts[0] |= rx_uv_error;
1376                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1377         }
1378
1379         if (tmp) {
1380                 hw->rx_list_sts[0] &= ~rx_uv_mask;
1381                 hw->rx_list_sts[0] |= rx_uv_error;
1382                 hw->rx_list_post_index = 0;
1383         }
1384
1385         if (y_err_sts & GET_Y0_ERR_MSK) {
1386                 tmp = y_err_sts & GET_Y0_ERR_MSK;
1387                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1388         }
1389
1390         if (uv_err_sts & GET_UV0_ERR_MSK) {
1391                 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1392                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1393         }
1394
1395         return (tmp_lsts != hw->rx_list_sts[0]);
1396 }
1397
1398 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1399                                      uint32_t y_err_sts, uint32_t uv_err_sts)
1400 {
1401         uint32_t tmp;
1402         list_sts tmp_lsts;
1403
1404         if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1405                 return false;
1406
1407         tmp_lsts = hw->rx_list_sts[1];
1408
1409         /* Y1 - DMA */
1410         tmp = y_err_sts & GET_Y1_ERR_MSK;
1411         if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1412                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1413
1414         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1415                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1416                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1417         }
1418
1419         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1420                 /* Add retry-support..*/
1421                 hw->rx_list_sts[1] &= ~rx_y_mask;
1422                 hw->rx_list_sts[1] |= rx_y_error;
1423                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1424         }
1425
1426         if (tmp) {
1427                 hw->rx_list_sts[1] &= ~rx_y_mask;
1428                 hw->rx_list_sts[1] |= rx_y_error;
1429                 hw->rx_list_post_index = 0;
1430         }
1431
1432         /* UV1 - DMA */
1433         tmp = uv_err_sts & GET_UV1_ERR_MSK;
1434         if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) {
1435                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1436         }
1437
1438         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1439                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1440                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1441         }
1442
1443         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1444                 /* Add retry-support*/
1445                 hw->rx_list_sts[1] &= ~rx_uv_mask;
1446                 hw->rx_list_sts[1] |= rx_uv_error;
1447                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1448         }
1449
1450         if (tmp) {
1451                 hw->rx_list_sts[1] &= ~rx_uv_mask;
1452                 hw->rx_list_sts[1] |= rx_uv_error;
1453                 hw->rx_list_post_index = 0;
1454         }
1455
1456         if (y_err_sts & GET_Y1_ERR_MSK) {
1457                 tmp = y_err_sts & GET_Y1_ERR_MSK;
1458                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1459         }
1460
1461         if (uv_err_sts & GET_UV1_ERR_MSK) {
1462                 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1463                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1464         }
1465
1466         return (tmp_lsts != hw->rx_list_sts[1]);
1467 }
1468
1469
1470 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1471 {
1472         unsigned long flags;
1473         uint32_t i, list_avail = 0;
1474         BC_STATUS comp_sts = BC_STS_NO_DATA;
1475         uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1476         bool ret = 0;
1477
1478         if (!hw) {
1479                 BCMLOG_ERR("Invalid Arguments\n");
1480                 return;
1481         }
1482
1483         if (!(intr_sts & GET_RX_INTR_MASK))
1484                 return;
1485
1486         y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1487         uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1488
1489         for (i = 0; i < DMA_ENGINE_CNT; i++) {
1490                 /* Update States..*/
1491                 spin_lock_irqsave(&hw->rx_lock, flags);
1492                 if (i == 0)
1493                         ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1494                 else
1495                         ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1496                 if (ret) {
1497                         switch (hw->rx_list_sts[i]) {
1498                         case sts_free:
1499                                 comp_sts = BC_STS_SUCCESS;
1500                                 list_avail = 1;
1501                                 break;
1502                         case rx_y_error:
1503                         case rx_uv_error:
1504                         case rx_sts_error:
1505                                 /* We got error on both or Y or uv. */
1506                                 hw->stats.rx_errors++;
1507                                 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1508                                 /* FIXME: jarod: this is where my mini pci-e card is tripping up */
1509                                 BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
1510                                        "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1511                                        i, hw->stats.rx_errors, y_err_sts,
1512                                        uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1513                                 hw->rx_list_sts[i] = sts_free;
1514                                 comp_sts = BC_STS_ERROR;
1515                                 break;
1516                         default:
1517                                 /* Wait for completion..*/
1518                                 comp_sts = BC_STS_NO_DATA;
1519                                 break;
1520                         }
1521                 }
1522                 spin_unlock_irqrestore(&hw->rx_lock, flags);
1523
1524                 /* handle completion...*/
1525                 if (comp_sts != BC_STS_NO_DATA) {
1526                         crystalhd_rx_pkt_done(hw, i, comp_sts);
1527                         comp_sts = BC_STS_NO_DATA;
1528                 }
1529         }
1530
1531         if (list_avail) {
1532                 if (hw->stop_pending) {
1533                         if ((hw->rx_list_sts[0] == sts_free) &&
1534                             (hw->rx_list_sts[1] == sts_free))
1535                                 crystalhd_hw_finalize_pause(hw);
1536                 } else {
1537                         crystalhd_hw_start_capture(hw);
1538                 }
1539         }
1540 }
1541
1542 static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1543                                           BC_FW_CMD *fw_cmd)
1544 {
1545         BC_STATUS sts = BC_STS_SUCCESS;
1546         DecRspChannelStartVideo *st_rsp = NULL;
1547
1548         switch (fw_cmd->cmd[0]) {
1549         case eCMD_C011_DEC_CHAN_START_VIDEO:
1550                 st_rsp = (DecRspChannelStartVideo *)fw_cmd->rsp;
1551                 hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1552                 hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1553                 BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1554                        hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1555                 break;
1556         case eCMD_C011_INIT:
1557                 if (!(crystalhd_load_firmware_config(hw->adp))) {
1558                         BCMLOG_ERR("Invalid Params.\n");
1559                         sts = BC_STS_FW_AUTH_FAILED;
1560                 }
1561                 break;
1562         default:
1563                 break;
1564         }
1565         return sts;
1566 }
1567
1568 static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1569 {
1570         uint32_t reg;
1571         link_misc_perst_decoder_ctrl rst_cntrl_reg;
1572
1573         /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1574         rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
1575
1576         rst_cntrl_reg.bcm_7412_rst = 1;
1577         crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1578         msleep_interruptible(50);
1579
1580         rst_cntrl_reg.bcm_7412_rst = 0;
1581         crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1582
1583         /* Close all banks, put DDR in idle */
1584         bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1585
1586         /* Set bit 25 (drop CKE pin of DDR) */
1587         reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1588         reg |= 0x02000000;
1589         bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1590
1591         /* Reset the audio block */
1592         bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1593
1594         /* Power down Raptor PLL */
1595         reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1596         reg |= 0x00008000;
1597         bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1598
1599         /* Power down all Audio PLL */
1600         bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1601
1602         /* Power down video clock (75MHz) */
1603         reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1604         reg |= 0x00008000;
1605         bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1606
1607         /* Power down video clock (75MHz) */
1608         reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1609         reg |= 0x00008000;
1610         bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1611
1612         /* Power down core clock (200MHz) */
1613         reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1614         reg |= 0x00008000;
1615         bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1616
1617         /* Power down core clock (200MHz) */
1618         reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1619         reg |= 0x00008000;
1620         bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1621
1622         return BC_STS_SUCCESS;
1623 }
1624
1625 /************************************************
1626 **
1627 *************************************************/
1628
1629 BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
1630 {
1631         uint32_t reg_data, cnt, *temp_buff;
1632         uint32_t fw_sig_len = 36;
1633         uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1634
1635         BCMLOG_ENTER;
1636
1637         if (!adp || !buffer || !sz) {
1638                 BCMLOG_ERR("Invalid Params.\n");
1639                 return BC_STS_INV_ARG;
1640         }
1641
1642         reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1643         if (!(reg_data & 0x02)) {
1644                 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1645                 return BC_STS_ERROR;
1646         }
1647
1648         reg_data = 0;
1649         crystalhd_reg_wr(adp, DCI_CMD, 0);
1650         reg_data |= BC_BIT(0);
1651         crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1652
1653         reg_data = 0;
1654         cnt = 1000;
1655         msleep_interruptible(10);
1656
1657         while (reg_data != BC_BIT(4)) {
1658                 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1659                 reg_data &= BC_BIT(4);
1660                 if (--cnt == 0) {
1661                         BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1662                         return BC_STS_TIMEOUT;
1663                 }
1664         }
1665
1666         msleep_interruptible(10);
1667         /*  Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1668         crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1669         temp_buff = (uint32_t *)buffer;
1670         for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1671                 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1672                 crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1673                 dram_offset += 4;
1674                 temp_buff++;
1675         }
1676         msleep_interruptible(10);
1677
1678         temp_buff++;
1679
1680         sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1681         for (cnt = 0; cnt < 8; cnt++) {
1682                 uint32_t swapped_data = *temp_buff;
1683                 swapped_data = bswap_32_1(swapped_data);
1684                 crystalhd_reg_wr(adp, sig_reg, swapped_data);
1685                 sig_reg -= 4;
1686                 temp_buff++;
1687         }
1688         msleep_interruptible(10);
1689
1690         reg_data = 0;
1691         reg_data |= BC_BIT(1);
1692         crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1693         msleep_interruptible(10);
1694
1695         reg_data = 0;
1696         reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1697
1698         if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1699                 cnt = 1000;
1700                 while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1701                         reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1702                         reg_data &= BC_BIT(0);
1703                         if (!(--cnt))
1704                                 break;
1705                         msleep_interruptible(10);
1706                 }
1707                 reg_data = 0;
1708                 reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1709                 reg_data |= BC_BIT(4);
1710                 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1711
1712         } else {
1713                 BCMLOG_ERR("F/w Signature mismatch\n");
1714                 return BC_STS_FW_AUTH_FAILED;
1715         }
1716
1717         BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1718         return BC_STS_SUCCESS;;
1719 }
1720
1721 BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd)
1722 {
1723         uint32_t cnt = 0, cmd_res_addr;
1724         uint32_t *cmd_buff, *res_buff;
1725         wait_queue_head_t fw_cmd_event;
1726         int rc = 0;
1727         BC_STATUS sts;
1728
1729         crystalhd_create_event(&fw_cmd_event);
1730
1731         BCMLOG_ENTER;
1732
1733         if (!hw || !fw_cmd) {
1734                 BCMLOG_ERR("Invalid Arguments\n");
1735                 return BC_STS_INV_ARG;
1736         }
1737
1738         cmd_buff = fw_cmd->cmd;
1739         res_buff = fw_cmd->rsp;
1740
1741         if (!cmd_buff || !res_buff) {
1742                 BCMLOG_ERR("Invalid Parameters for F/W Command \n");
1743                 return BC_STS_INV_ARG;
1744         }
1745
1746         hw->pwr_lock++;
1747
1748         hw->fwcmd_evt_sts = 0;
1749         hw->pfw_cmd_event = &fw_cmd_event;
1750
1751         /*Write the command to the memory*/
1752         crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1753
1754         /*Memory Read for memory arbitrator flush*/
1755         crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1756
1757         /* Write the command address to mailbox */
1758         bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1759         msleep_interruptible(50);
1760
1761         crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1762
1763         if (!rc) {
1764                 sts = BC_STS_SUCCESS;
1765         } else if (rc == -EBUSY) {
1766                 BCMLOG_ERR("Firmware command T/O\n");
1767                 sts = BC_STS_TIMEOUT;
1768         } else if (rc == -EINTR) {
1769                 BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1770                 sts = BC_STS_IO_USER_ABORT;
1771         } else {
1772                 BCMLOG_ERR("FwCmd IO Error.\n");
1773                 sts = BC_STS_IO_ERROR;
1774         }
1775
1776         if (sts != BC_STS_SUCCESS) {
1777                 BCMLOG_ERR("FwCmd Failed.\n");
1778                 hw->pwr_lock--;
1779                 return sts;
1780         }
1781
1782         /*Get the Responce Address*/
1783         cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1784
1785         /*Read the Response*/
1786         crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1787
1788         hw->pwr_lock--;
1789
1790         if (res_buff[2] != C011_RET_SUCCESS) {
1791                 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1792                 return BC_STS_FW_CMD_ERR;
1793         }
1794
1795         sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1796         if (sts != BC_STS_SUCCESS)
1797                 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1798
1799         return sts;
1800 }
1801
1802 bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1803 {
1804         uint32_t intr_sts = 0;
1805         uint32_t deco_intr = 0;
1806         bool rc = 0;
1807
1808         if (!adp || !hw->dev_started)
1809                 return rc;
1810
1811         hw->stats.num_interrupts++;
1812         hw->pwr_lock++;
1813
1814         deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1815         intr_sts  = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1816
1817         if (intr_sts) {
1818                 /* let system know we processed interrupt..*/
1819                 rc = 1;
1820                 hw->stats.dev_interrupts++;
1821         }
1822
1823         if (deco_intr && (deco_intr != 0xdeaddead)) {
1824
1825                 if (deco_intr & 0x80000000) {
1826                         /*Set the Event and the status flag*/
1827                         if (hw->pfw_cmd_event) {
1828                                 hw->fwcmd_evt_sts = 1;
1829                                 crystalhd_set_event(hw->pfw_cmd_event);
1830                         }
1831                 }
1832
1833                 if (deco_intr & BC_BIT(1))
1834                         crystalhd_hw_proc_pib(hw);
1835
1836                 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1837                 /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
1838                 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1839                 rc = 1;
1840         }
1841
1842         /* Rx interrupts */
1843         crystalhd_rx_isr(hw, intr_sts);
1844
1845         /* Tx interrupts*/
1846         crystalhd_tx_isr(hw, intr_sts);
1847
1848         /* Clear interrupts */
1849         if (rc) {
1850                 if (intr_sts)
1851                         crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1852
1853                 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1854         }
1855
1856         hw->pwr_lock--;
1857
1858         return rc;
1859 }
1860
1861 BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
1862 {
1863         if (!hw || !adp) {
1864                 BCMLOG_ERR("Invalid Arguments\n");
1865                 return BC_STS_INV_ARG;
1866         }
1867
1868         if (hw->dev_started)
1869                 return BC_STS_SUCCESS;
1870
1871         memset(hw, 0, sizeof(struct crystalhd_hw));
1872
1873         hw->adp = adp;
1874         spin_lock_init(&hw->lock);
1875         spin_lock_init(&hw->rx_lock);
1876         /* FIXME: jarod: what are these magic numbers?!? */
1877         hw->tx_ioq_tag_seed = 0x70023070;
1878         hw->rx_pkt_tag_seed = 0x70029070;
1879
1880         hw->stop_pending = 0;
1881         crystalhd_start_device(hw->adp);
1882         hw->dev_started = true;
1883
1884         /* set initial core clock  */
1885         hw->core_clock_mhz = CLOCK_PRESET;
1886         hw->prev_n = 0;
1887         hw->pwr_lock = 0;
1888         crystalhd_hw_set_core_clock(hw);
1889
1890         return BC_STS_SUCCESS;
1891 }
1892
1893 BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1894 {
1895         if (!hw) {
1896                 BCMLOG_ERR("Invalid Arguments\n");
1897                 return BC_STS_INV_ARG;
1898         }
1899
1900         if (!hw->dev_started)
1901                 return BC_STS_SUCCESS;
1902
1903         /* Stop and DDR sleep will happen in here */
1904         crystalhd_hw_suspend(hw);
1905         hw->dev_started = false;
1906
1907         return BC_STS_SUCCESS;
1908 }
1909
1910 BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1911 {
1912         unsigned int i;
1913         void *mem;
1914         size_t mem_len;
1915         dma_addr_t phy_addr;
1916         BC_STATUS sts = BC_STS_SUCCESS;
1917         crystalhd_rx_dma_pkt *rpkt;
1918
1919         if (!hw || !hw->adp) {
1920                 BCMLOG_ERR("Invalid Arguments\n");
1921                 return BC_STS_INV_ARG;
1922         }
1923
1924         sts = crystalhd_hw_create_ioqs(hw);
1925         if (sts != BC_STS_SUCCESS) {
1926                 BCMLOG_ERR("Failed to create IOQs..\n");
1927                 return sts;
1928         }
1929
1930         mem_len = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
1931
1932         for (i = 0; i < BC_TX_LIST_CNT; i++) {
1933                 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1934                 if (mem) {
1935                         memset(mem, 0, mem_len);
1936                 } else {
1937                         BCMLOG_ERR("Insufficient Memory For TX\n");
1938                         crystalhd_hw_free_dma_rings(hw);
1939                         return BC_STS_INSUFF_RES;
1940                 }
1941                 /* rx_pkt_pool -- static memory allocation  */
1942                 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1943                 hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1944                 hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1945                                                  sizeof(dma_descriptor);
1946                 hw->tx_pkt_pool[i].list_tag = 0;
1947
1948                 /* Add TX dma requests to Free Queue..*/
1949                 sts = crystalhd_dioq_add(hw->tx_freeq,
1950                                        &hw->tx_pkt_pool[i], false, 0);
1951                 if (sts != BC_STS_SUCCESS) {
1952                         crystalhd_hw_free_dma_rings(hw);
1953                         return sts;
1954                 }
1955         }
1956
1957         for (i = 0; i < BC_RX_LIST_CNT; i++) {
1958                 rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
1959                 if (!rpkt) {
1960                         BCMLOG_ERR("Insufficient Memory For RX\n");
1961                         crystalhd_hw_free_dma_rings(hw);
1962                         return BC_STS_INSUFF_RES;
1963                 }
1964
1965                 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1966                 if (mem) {
1967                         memset(mem, 0, mem_len);
1968                 } else {
1969                         BCMLOG_ERR("Insufficient Memory For RX\n");
1970                         crystalhd_hw_free_dma_rings(hw);
1971                         return BC_STS_INSUFF_RES;
1972                 }
1973                 rpkt->desc_mem.pdma_desc_start = mem;
1974                 rpkt->desc_mem.phy_addr = phy_addr;
1975                 rpkt->desc_mem.sz  = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
1976                 rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
1977                 crystalhd_hw_free_rx_pkt(hw, rpkt);
1978         }
1979
1980         return BC_STS_SUCCESS;
1981 }
1982
1983 BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
1984 {
1985         unsigned int i;
1986         crystalhd_rx_dma_pkt *rpkt = NULL;
1987
1988         if (!hw || !hw->adp) {
1989                 BCMLOG_ERR("Invalid Arguments\n");
1990                 return BC_STS_INV_ARG;
1991         }
1992
1993         /* Delete all IOQs.. */
1994         crystalhd_hw_delete_ioqs(hw);
1995
1996         for (i = 0; i < BC_TX_LIST_CNT; i++) {
1997                 if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
1998                         bc_kern_dma_free(hw->adp,
1999                                 hw->tx_pkt_pool[i].desc_mem.sz,
2000                                 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
2001                                 hw->tx_pkt_pool[i].desc_mem.phy_addr);
2002
2003                         hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2004                 }
2005         }
2006
2007         BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2008         do {
2009                 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2010                 if (!rpkt)
2011                         break;
2012                 bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2013                                  rpkt->desc_mem.pdma_desc_start,
2014                                  rpkt->desc_mem.phy_addr);
2015                 kfree(rpkt);
2016         } while (rpkt);
2017
2018         return BC_STS_SUCCESS;
2019 }
2020
2021 BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
2022                              hw_comp_callback call_back,
2023                              wait_queue_head_t *cb_event, uint32_t *list_id,
2024                              uint8_t data_flags)
2025 {
2026         tx_dma_pkt *tx_dma_packet = NULL;
2027         uint32_t first_desc_u_addr, first_desc_l_addr;
2028         uint32_t low_addr, high_addr;
2029         addr_64 desc_addr;
2030         BC_STATUS sts, add_sts;
2031         uint32_t dummy_index = 0;
2032         unsigned long flags;
2033         bool rc;
2034
2035         if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2036                 BCMLOG_ERR("Invalid Arguments\n");
2037                 return BC_STS_INV_ARG;
2038         }
2039
2040         /*
2041          * Since we hit code in busy condition very frequently,
2042          * we will check the code in status first before
2043          * checking the availability of free elem.
2044          *
2045          * This will avoid the Q fetch/add in normal condition.
2046          */
2047         rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2048                                   false, data_flags);
2049         if (rc) {
2050                 hw->stats.cin_busy++;
2051                 return BC_STS_BUSY;
2052         }
2053
2054         /* Get a list from TxFreeQ */
2055         tx_dma_packet = (tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
2056         if (!tx_dma_packet) {
2057                 BCMLOG_ERR("No empty elements..\n");
2058                 return BC_STS_ERR_USAGE;
2059         }
2060
2061         sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2062                                            &tx_dma_packet->desc_mem,
2063                                            &dummy_index);
2064         if (sts != BC_STS_SUCCESS) {
2065                 add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2066                                            false, 0);
2067                 if (add_sts != BC_STS_SUCCESS)
2068                         BCMLOG_ERR("double fault..\n");
2069
2070                 return sts;
2071         }
2072
2073         hw->pwr_lock++;
2074
2075         desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2076         low_addr = desc_addr.low_part;
2077         high_addr = desc_addr.high_part;
2078
2079         tx_dma_packet->call_back = call_back;
2080         tx_dma_packet->cb_event  = cb_event;
2081         tx_dma_packet->dio_req   = ioreq;
2082
2083         spin_lock_irqsave(&hw->lock, flags);
2084
2085         if (hw->tx_list_post_index == 0) {
2086                 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2087                 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2088         } else {
2089                 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2090                 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2091         }
2092
2093         *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2094                                              hw->tx_list_post_index;
2095
2096         hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2097
2098         spin_unlock_irqrestore(&hw->lock, flags);
2099
2100
2101         /* Insert in Active Q..*/
2102         crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2103                          tx_dma_packet->list_tag);
2104
2105         /*
2106          * Interrupt will come as soon as you write
2107          * the valid bit. So be ready for that. All
2108          * the initialization should happen before that.
2109          */
2110         crystalhd_start_tx_dma_engine(hw);
2111         crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2112
2113         crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
2114                                         /* Be sure we set the valid bit ^^^^ */
2115
2116         return BC_STS_SUCCESS;
2117 }
2118
2119 /*
2120  * This is a force cancel and we are racing with ISR.
2121  *
2122  * Will try to remove the req from ActQ before ISR gets it.
2123  * If ISR gets it first then the completion happens in the
2124  * normal path and we will return _STS_NO_DATA from here.
2125  *
2126  * FIX_ME: Not Tested the actual condition..
2127  */
2128 BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
2129 {
2130         if (!hw || !list_id) {
2131                 BCMLOG_ERR("Invalid Arguments\n");
2132                 return BC_STS_INV_ARG;
2133         }
2134
2135         crystalhd_stop_tx_dma_engine(hw);
2136         crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2137
2138         return BC_STS_SUCCESS;
2139 }
2140
2141 BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2142                                     crystalhd_dio_req *ioreq, bool en_post)
2143 {
2144         crystalhd_rx_dma_pkt *rpkt;
2145         uint32_t tag, uv_desc_ix = 0;
2146         BC_STATUS sts;
2147
2148         if (!hw || !ioreq) {
2149                 BCMLOG_ERR("Invalid Arguments\n");
2150                 return BC_STS_INV_ARG;
2151         }
2152
2153         rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2154         if (!rpkt) {
2155                 BCMLOG_ERR("Insufficient resources\n");
2156                 return BC_STS_INSUFF_RES;
2157         }
2158
2159         rpkt->dio_req = ioreq;
2160         tag = rpkt->pkt_tag;
2161
2162         sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
2163         if (sts != BC_STS_SUCCESS)
2164                 return sts;
2165
2166         rpkt->uv_phy_addr = 0;
2167
2168         /* Store the address of UV in the rx packet for post*/
2169         if (uv_desc_ix)
2170                 rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2171                                     (sizeof(dma_descriptor) * (uv_desc_ix + 1));
2172
2173         if (en_post)
2174                 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2175         else
2176                 sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2177
2178         return sts;
2179 }
2180
2181 BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2182                                     BC_PIC_INFO_BLOCK *pib,
2183                                     crystalhd_dio_req **ioreq)
2184 {
2185         crystalhd_rx_dma_pkt *rpkt;
2186         uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2187         uint32_t sig_pending = 0;
2188
2189
2190         if (!hw || !ioreq || !pib) {
2191                 BCMLOG_ERR("Invalid Arguments\n");
2192                 return BC_STS_INV_ARG;
2193         }
2194
2195         rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2196         if (!rpkt) {
2197                 if (sig_pending) {
2198                         BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
2199                         return BC_STS_IO_USER_ABORT;
2200                 } else {
2201                         return BC_STS_TIMEOUT;
2202                 }
2203         }
2204
2205         rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2206
2207         if (rpkt->flags & COMP_FLAG_PIB_VALID)
2208                 memcpy(pib, &rpkt->pib, sizeof(*pib));
2209
2210         *ioreq = rpkt->dio_req;
2211
2212         crystalhd_hw_free_rx_pkt(hw, rpkt);
2213
2214         return BC_STS_SUCCESS;
2215 }
2216
2217 BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2218 {
2219         crystalhd_rx_dma_pkt *rx_pkt;
2220         BC_STATUS sts;
2221         uint32_t i;
2222
2223         if (!hw) {
2224                 BCMLOG_ERR("Invalid Arguments\n");
2225                 return BC_STS_INV_ARG;
2226         }
2227
2228         /* This is start of capture.. Post to both the lists.. */
2229         for (i = 0; i < DMA_ENGINE_CNT; i++) {
2230                 rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2231                 if (!rx_pkt)
2232                         return BC_STS_NO_DATA;
2233                 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2234                 if (BC_STS_SUCCESS != sts)
2235                         break;
2236
2237         }
2238
2239         return BC_STS_SUCCESS;
2240 }
2241
2242 BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2243 {
2244         void *temp = NULL;
2245
2246         if (!hw) {
2247                 BCMLOG_ERR("Invalid Arguments\n");
2248                 return BC_STS_INV_ARG;
2249         }
2250
2251         crystalhd_stop_rx_dma_engine(hw);
2252
2253         do {
2254                 temp = crystalhd_dioq_fetch(hw->rx_freeq);
2255                 if (temp)
2256                         crystalhd_rx_pkt_rel_call_back(hw, temp);
2257         } while (temp);
2258
2259         return BC_STS_SUCCESS;
2260 }
2261
2262 BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2263 {
2264         hw->stats.pause_cnt++;
2265         hw->stop_pending = 1;
2266
2267         if ((hw->rx_list_sts[0] == sts_free) &&
2268             (hw->rx_list_sts[1] == sts_free))
2269                 crystalhd_hw_finalize_pause(hw);
2270
2271         return BC_STS_SUCCESS;
2272 }
2273
2274 BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2275 {
2276         BC_STATUS sts;
2277         uint32_t aspm;
2278
2279         hw->stop_pending = 0;
2280
2281         aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2282         aspm &= ~ASPM_L1_ENABLE;
2283 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2284         crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2285
2286         sts = crystalhd_hw_start_capture(hw);
2287         return sts;
2288 }
2289
2290 BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2291 {
2292         BC_STATUS sts;
2293
2294         if (!hw) {
2295                 BCMLOG_ERR("Invalid Arguments\n");
2296                 return BC_STS_INV_ARG;
2297         }
2298
2299         sts = crystalhd_put_ddr2sleep(hw);
2300         if (sts != BC_STS_SUCCESS) {
2301                 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2302                 return BC_STS_ERROR;
2303         }
2304
2305         if (!crystalhd_stop_device(hw->adp)) {
2306                 BCMLOG_ERR("Failed to Stop Device!!\n");
2307                 return BC_STS_ERROR;
2308         }
2309
2310         return BC_STS_SUCCESS;
2311 }
2312
2313 void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
2314 {
2315         if (!hw) {
2316                 BCMLOG_ERR("Invalid Arguments\n");
2317                 return;
2318         }
2319
2320         /* if called w/NULL stats, its a req to zero out the stats */
2321         if (!stats) {
2322                 memset(&hw->stats, 0, sizeof(hw->stats));
2323                 return;
2324         }
2325
2326         hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2327         hw->stats.rdyq_count  = crystalhd_dioq_count(hw->rx_rdyq);
2328         memcpy(stats, &hw->stats, sizeof(*stats));
2329 }
2330
2331 BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2332 {
2333         uint32_t reg, n, i;
2334         uint32_t vco_mg, refresh_reg;
2335
2336         if (!hw) {
2337                 BCMLOG_ERR("Invalid Arguments\n");
2338                 return BC_STS_INV_ARG;
2339         }
2340
2341         /* FIXME: jarod: wha? */
2342         /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2343         n = hw->core_clock_mhz/5;
2344
2345         if (n == hw->prev_n)
2346                 return BC_STS_CLK_NOCHG;
2347
2348         if (hw->pwr_lock > 0) {
2349                 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2350                 return BC_STS_CLK_NOCHG;
2351         }
2352
2353         i = n * 27;
2354         if (i < 560)
2355                 vco_mg = 0;
2356         else if (i < 900)
2357                 vco_mg = 1;
2358         else if (i < 1030)
2359                 vco_mg = 2;
2360         else
2361                 vco_mg = 3;
2362
2363         reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2364
2365         reg &= 0xFFFFCFC0;
2366         reg |= n;
2367         reg |= vco_mg << 12;
2368
2369         BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2370                hw->core_clock_mhz, n, vco_mg);
2371
2372         /* Change the DRAM refresh rate to accomodate the new frequency */
2373         /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2374         refresh_reg = (7 * hw->core_clock_mhz / 16);
2375         bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2376
2377         bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2378
2379         i = 0;
2380
2381         for (i = 0; i < 10; i++) {
2382                 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2383
2384                 if (reg & 0x00020000) {
2385                         hw->prev_n = n;
2386                         /* FIXME: jarod: outputting a random "C" is... confusing... */
2387                         BCMLOG(BCMLOG_INFO, "C");
2388                         return BC_STS_SUCCESS;
2389                 } else {
2390                         msleep_interruptible(10);
2391                 }
2392         }
2393         BCMLOG(BCMLOG_INFO, "clk change failed\n");
2394         return BC_STS_CLK_NOCHG;
2395 }