dmaengine: move last completed cookie into generic dma_chan structure
[linux-flexiantxendom0-3.2.10.git] / drivers / dma / pl330.c
1 /* linux/drivers/dma/pl330.c
2  *
3  * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4  *      Jaswinder Singh <jassi.brar@samsung.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/io.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dmaengine.h>
17 #include <linux/interrupt.h>
18 #include <linux/amba/bus.h>
19 #include <linux/amba/pl330.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/of.h>
23
24 #define NR_DEFAULT_DESC 16
25
26 enum desc_status {
27         /* In the DMAC pool */
28         FREE,
29         /*
30          * Allocted to some channel during prep_xxx
31          * Also may be sitting on the work_list.
32          */
33         PREP,
34         /*
35          * Sitting on the work_list and already submitted
36          * to the PL330 core. Not more than two descriptors
37          * of a channel can be BUSY at any time.
38          */
39         BUSY,
40         /*
41          * Sitting on the channel work_list but xfer done
42          * by PL330 core
43          */
44         DONE,
45 };
46
47 struct dma_pl330_chan {
48         /* Schedule desc completion */
49         struct tasklet_struct task;
50
51         /* DMA-Engine Channel */
52         struct dma_chan chan;
53
54         /* List of to be xfered descriptors */
55         struct list_head work_list;
56
57         /* Pointer to the DMAC that manages this channel,
58          * NULL if the channel is available to be acquired.
59          * As the parent, this DMAC also provides descriptors
60          * to the channel.
61          */
62         struct dma_pl330_dmac *dmac;
63
64         /* To protect channel manipulation */
65         spinlock_t lock;
66
67         /* Token of a hardware channel thread of PL330 DMAC
68          * NULL if the channel is available to be acquired.
69          */
70         void *pl330_chid;
71
72         /* For D-to-M and M-to-D channels */
73         int burst_sz; /* the peripheral fifo width */
74         int burst_len; /* the number of burst */
75         dma_addr_t fifo_addr;
76
77         /* for cyclic capability */
78         bool cyclic;
79 };
80
81 struct dma_pl330_dmac {
82         struct pl330_info pif;
83
84         /* DMA-Engine Device */
85         struct dma_device ddma;
86
87         /* Pool of descriptors available for the DMAC's channels */
88         struct list_head desc_pool;
89         /* To protect desc_pool manipulation */
90         spinlock_t pool_lock;
91
92         /* Peripheral channels connected to this DMAC */
93         struct dma_pl330_chan *peripherals; /* keep at end */
94
95         struct clk *clk;
96 };
97
98 struct dma_pl330_desc {
99         /* To attach to a queue as child */
100         struct list_head node;
101
102         /* Descriptor for the DMA Engine API */
103         struct dma_async_tx_descriptor txd;
104
105         /* Xfer for PL330 core */
106         struct pl330_xfer px;
107
108         struct pl330_reqcfg rqcfg;
109         struct pl330_req req;
110
111         enum desc_status status;
112
113         /* The channel which currently holds this desc */
114         struct dma_pl330_chan *pchan;
115 };
116
117 /* forward declaration */
118 static struct amba_driver pl330_driver;
119
120 static inline struct dma_pl330_chan *
121 to_pchan(struct dma_chan *ch)
122 {
123         if (!ch)
124                 return NULL;
125
126         return container_of(ch, struct dma_pl330_chan, chan);
127 }
128
129 static inline struct dma_pl330_desc *
130 to_desc(struct dma_async_tx_descriptor *tx)
131 {
132         return container_of(tx, struct dma_pl330_desc, txd);
133 }
134
135 static inline void free_desc_list(struct list_head *list)
136 {
137         struct dma_pl330_dmac *pdmac;
138         struct dma_pl330_desc *desc;
139         struct dma_pl330_chan *pch;
140         unsigned long flags;
141
142         if (list_empty(list))
143                 return;
144
145         /* Finish off the work list */
146         list_for_each_entry(desc, list, node) {
147                 dma_async_tx_callback callback;
148                 void *param;
149
150                 /* All desc in a list belong to same channel */
151                 pch = desc->pchan;
152                 callback = desc->txd.callback;
153                 param = desc->txd.callback_param;
154
155                 if (callback)
156                         callback(param);
157
158                 desc->pchan = NULL;
159         }
160
161         pdmac = pch->dmac;
162
163         spin_lock_irqsave(&pdmac->pool_lock, flags);
164         list_splice_tail_init(list, &pdmac->desc_pool);
165         spin_unlock_irqrestore(&pdmac->pool_lock, flags);
166 }
167
168 static inline void handle_cyclic_desc_list(struct list_head *list)
169 {
170         struct dma_pl330_desc *desc;
171         struct dma_pl330_chan *pch;
172         unsigned long flags;
173
174         if (list_empty(list))
175                 return;
176
177         list_for_each_entry(desc, list, node) {
178                 dma_async_tx_callback callback;
179
180                 /* Change status to reload it */
181                 desc->status = PREP;
182                 pch = desc->pchan;
183                 callback = desc->txd.callback;
184                 if (callback)
185                         callback(desc->txd.callback_param);
186         }
187
188         spin_lock_irqsave(&pch->lock, flags);
189         list_splice_tail_init(list, &pch->work_list);
190         spin_unlock_irqrestore(&pch->lock, flags);
191 }
192
193 static inline void fill_queue(struct dma_pl330_chan *pch)
194 {
195         struct dma_pl330_desc *desc;
196         int ret;
197
198         list_for_each_entry(desc, &pch->work_list, node) {
199
200                 /* If already submitted */
201                 if (desc->status == BUSY)
202                         break;
203
204                 ret = pl330_submit_req(pch->pl330_chid,
205                                                 &desc->req);
206                 if (!ret) {
207                         desc->status = BUSY;
208                         break;
209                 } else if (ret == -EAGAIN) {
210                         /* QFull or DMAC Dying */
211                         break;
212                 } else {
213                         /* Unacceptable request */
214                         desc->status = DONE;
215                         dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
216                                         __func__, __LINE__, desc->txd.cookie);
217                         tasklet_schedule(&pch->task);
218                 }
219         }
220 }
221
222 static void pl330_tasklet(unsigned long data)
223 {
224         struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
225         struct dma_pl330_desc *desc, *_dt;
226         unsigned long flags;
227         LIST_HEAD(list);
228
229         spin_lock_irqsave(&pch->lock, flags);
230
231         /* Pick up ripe tomatoes */
232         list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
233                 if (desc->status == DONE) {
234                         pch->chan.completed_cookie = desc->txd.cookie;
235                         list_move_tail(&desc->node, &list);
236                 }
237
238         /* Try to submit a req imm. next to the last completed cookie */
239         fill_queue(pch);
240
241         /* Make sure the PL330 Channel thread is active */
242         pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
243
244         spin_unlock_irqrestore(&pch->lock, flags);
245
246         if (pch->cyclic)
247                 handle_cyclic_desc_list(&list);
248         else
249                 free_desc_list(&list);
250 }
251
252 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
253 {
254         struct dma_pl330_desc *desc = token;
255         struct dma_pl330_chan *pch = desc->pchan;
256         unsigned long flags;
257
258         /* If desc aborted */
259         if (!pch)
260                 return;
261
262         spin_lock_irqsave(&pch->lock, flags);
263
264         desc->status = DONE;
265
266         spin_unlock_irqrestore(&pch->lock, flags);
267
268         tasklet_schedule(&pch->task);
269 }
270
271 bool pl330_filter(struct dma_chan *chan, void *param)
272 {
273         u8 *peri_id;
274
275         if (chan->device->dev->driver != &pl330_driver.drv)
276                 return false;
277
278 #ifdef CONFIG_OF
279         if (chan->device->dev->of_node) {
280                 const __be32 *prop_value;
281                 phandle phandle;
282                 struct device_node *node;
283
284                 prop_value = ((struct property *)param)->value;
285                 phandle = be32_to_cpup(prop_value++);
286                 node = of_find_node_by_phandle(phandle);
287                 return ((chan->private == node) &&
288                                 (chan->chan_id == be32_to_cpup(prop_value)));
289         }
290 #endif
291
292         peri_id = chan->private;
293         return *peri_id == (unsigned)param;
294 }
295 EXPORT_SYMBOL(pl330_filter);
296
297 static int pl330_alloc_chan_resources(struct dma_chan *chan)
298 {
299         struct dma_pl330_chan *pch = to_pchan(chan);
300         struct dma_pl330_dmac *pdmac = pch->dmac;
301         unsigned long flags;
302
303         spin_lock_irqsave(&pch->lock, flags);
304
305         chan->completed_cookie = chan->cookie = 1;
306         pch->cyclic = false;
307
308         pch->pl330_chid = pl330_request_channel(&pdmac->pif);
309         if (!pch->pl330_chid) {
310                 spin_unlock_irqrestore(&pch->lock, flags);
311                 return 0;
312         }
313
314         tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
315
316         spin_unlock_irqrestore(&pch->lock, flags);
317
318         return 1;
319 }
320
321 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
322 {
323         struct dma_pl330_chan *pch = to_pchan(chan);
324         struct dma_pl330_desc *desc, *_dt;
325         unsigned long flags;
326         struct dma_pl330_dmac *pdmac = pch->dmac;
327         struct dma_slave_config *slave_config;
328         LIST_HEAD(list);
329
330         switch (cmd) {
331         case DMA_TERMINATE_ALL:
332                 spin_lock_irqsave(&pch->lock, flags);
333
334                 /* FLUSH the PL330 Channel thread */
335                 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
336
337                 /* Mark all desc done */
338                 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
339                         desc->status = DONE;
340                         pch->completed = desc->txd.cookie;
341                         list_move_tail(&desc->node, &list);
342                 }
343
344                 list_splice_tail_init(&list, &pdmac->desc_pool);
345                 spin_unlock_irqrestore(&pch->lock, flags);
346                 break;
347         case DMA_SLAVE_CONFIG:
348                 slave_config = (struct dma_slave_config *)arg;
349
350                 if (slave_config->direction == DMA_MEM_TO_DEV) {
351                         if (slave_config->dst_addr)
352                                 pch->fifo_addr = slave_config->dst_addr;
353                         if (slave_config->dst_addr_width)
354                                 pch->burst_sz = __ffs(slave_config->dst_addr_width);
355                         if (slave_config->dst_maxburst)
356                                 pch->burst_len = slave_config->dst_maxburst;
357                 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
358                         if (slave_config->src_addr)
359                                 pch->fifo_addr = slave_config->src_addr;
360                         if (slave_config->src_addr_width)
361                                 pch->burst_sz = __ffs(slave_config->src_addr_width);
362                         if (slave_config->src_maxburst)
363                                 pch->burst_len = slave_config->src_maxburst;
364                 }
365                 break;
366         default:
367                 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
368                 return -ENXIO;
369         }
370
371         return 0;
372 }
373
374 static void pl330_free_chan_resources(struct dma_chan *chan)
375 {
376         struct dma_pl330_chan *pch = to_pchan(chan);
377         unsigned long flags;
378
379         spin_lock_irqsave(&pch->lock, flags);
380
381         tasklet_kill(&pch->task);
382
383         pl330_release_channel(pch->pl330_chid);
384         pch->pl330_chid = NULL;
385
386         if (pch->cyclic)
387                 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
388
389         spin_unlock_irqrestore(&pch->lock, flags);
390 }
391
392 static enum dma_status
393 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
394                  struct dma_tx_state *txstate)
395 {
396         struct dma_pl330_chan *pch = to_pchan(chan);
397         dma_cookie_t last_done, last_used;
398         int ret;
399
400         last_done = chan->completed_cookie;
401         last_used = chan->cookie;
402
403         ret = dma_async_is_complete(cookie, last_done, last_used);
404
405         dma_set_tx_state(txstate, last_done, last_used, 0);
406
407         return ret;
408 }
409
410 static void pl330_issue_pending(struct dma_chan *chan)
411 {
412         pl330_tasklet((unsigned long) to_pchan(chan));
413 }
414
415 /*
416  * We returned the last one of the circular list of descriptor(s)
417  * from prep_xxx, so the argument to submit corresponds to the last
418  * descriptor of the list.
419  */
420 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
421 {
422         struct dma_pl330_desc *desc, *last = to_desc(tx);
423         struct dma_pl330_chan *pch = to_pchan(tx->chan);
424         dma_cookie_t cookie;
425         unsigned long flags;
426
427         spin_lock_irqsave(&pch->lock, flags);
428
429         /* Assign cookies to all nodes */
430         cookie = tx->chan->cookie;
431
432         while (!list_empty(&last->node)) {
433                 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
434
435                 if (++cookie < 0)
436                         cookie = 1;
437                 desc->txd.cookie = cookie;
438
439                 list_move_tail(&desc->node, &pch->work_list);
440         }
441
442         if (++cookie < 0)
443                 cookie = 1;
444         last->txd.cookie = cookie;
445
446         list_add_tail(&last->node, &pch->work_list);
447
448         tx->chan->cookie = cookie;
449
450         spin_unlock_irqrestore(&pch->lock, flags);
451
452         return cookie;
453 }
454
455 static inline void _init_desc(struct dma_pl330_desc *desc)
456 {
457         desc->pchan = NULL;
458         desc->req.x = &desc->px;
459         desc->req.token = desc;
460         desc->rqcfg.swap = SWAP_NO;
461         desc->rqcfg.privileged = 0;
462         desc->rqcfg.insnaccess = 0;
463         desc->rqcfg.scctl = SCCTRL0;
464         desc->rqcfg.dcctl = DCCTRL0;
465         desc->req.cfg = &desc->rqcfg;
466         desc->req.xfer_cb = dma_pl330_rqcb;
467         desc->txd.tx_submit = pl330_tx_submit;
468
469         INIT_LIST_HEAD(&desc->node);
470 }
471
472 /* Returns the number of descriptors added to the DMAC pool */
473 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
474 {
475         struct dma_pl330_desc *desc;
476         unsigned long flags;
477         int i;
478
479         if (!pdmac)
480                 return 0;
481
482         desc = kmalloc(count * sizeof(*desc), flg);
483         if (!desc)
484                 return 0;
485
486         spin_lock_irqsave(&pdmac->pool_lock, flags);
487
488         for (i = 0; i < count; i++) {
489                 _init_desc(&desc[i]);
490                 list_add_tail(&desc[i].node, &pdmac->desc_pool);
491         }
492
493         spin_unlock_irqrestore(&pdmac->pool_lock, flags);
494
495         return count;
496 }
497
498 static struct dma_pl330_desc *
499 pluck_desc(struct dma_pl330_dmac *pdmac)
500 {
501         struct dma_pl330_desc *desc = NULL;
502         unsigned long flags;
503
504         if (!pdmac)
505                 return NULL;
506
507         spin_lock_irqsave(&pdmac->pool_lock, flags);
508
509         if (!list_empty(&pdmac->desc_pool)) {
510                 desc = list_entry(pdmac->desc_pool.next,
511                                 struct dma_pl330_desc, node);
512
513                 list_del_init(&desc->node);
514
515                 desc->status = PREP;
516                 desc->txd.callback = NULL;
517         }
518
519         spin_unlock_irqrestore(&pdmac->pool_lock, flags);
520
521         return desc;
522 }
523
524 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
525 {
526         struct dma_pl330_dmac *pdmac = pch->dmac;
527         u8 *peri_id = pch->chan.private;
528         struct dma_pl330_desc *desc;
529
530         /* Pluck one desc from the pool of DMAC */
531         desc = pluck_desc(pdmac);
532
533         /* If the DMAC pool is empty, alloc new */
534         if (!desc) {
535                 if (!add_desc(pdmac, GFP_ATOMIC, 1))
536                         return NULL;
537
538                 /* Try again */
539                 desc = pluck_desc(pdmac);
540                 if (!desc) {
541                         dev_err(pch->dmac->pif.dev,
542                                 "%s:%d ALERT!\n", __func__, __LINE__);
543                         return NULL;
544                 }
545         }
546
547         /* Initialize the descriptor */
548         desc->pchan = pch;
549         desc->txd.cookie = 0;
550         async_tx_ack(&desc->txd);
551
552         desc->req.peri = peri_id ? pch->chan.chan_id : 0;
553
554         dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
555
556         return desc;
557 }
558
559 static inline void fill_px(struct pl330_xfer *px,
560                 dma_addr_t dst, dma_addr_t src, size_t len)
561 {
562         px->next = NULL;
563         px->bytes = len;
564         px->dst_addr = dst;
565         px->src_addr = src;
566 }
567
568 static struct dma_pl330_desc *
569 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
570                 dma_addr_t src, size_t len)
571 {
572         struct dma_pl330_desc *desc = pl330_get_desc(pch);
573
574         if (!desc) {
575                 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
576                         __func__, __LINE__);
577                 return NULL;
578         }
579
580         /*
581          * Ideally we should lookout for reqs bigger than
582          * those that can be programmed with 256 bytes of
583          * MC buffer, but considering a req size is seldom
584          * going to be word-unaligned and more than 200MB,
585          * we take it easy.
586          * Also, should the limit is reached we'd rather
587          * have the platform increase MC buffer size than
588          * complicating this API driver.
589          */
590         fill_px(&desc->px, dst, src, len);
591
592         return desc;
593 }
594
595 /* Call after fixing burst size */
596 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
597 {
598         struct dma_pl330_chan *pch = desc->pchan;
599         struct pl330_info *pi = &pch->dmac->pif;
600         int burst_len;
601
602         burst_len = pi->pcfg.data_bus_width / 8;
603         burst_len *= pi->pcfg.data_buf_dep;
604         burst_len >>= desc->rqcfg.brst_size;
605
606         /* src/dst_burst_len can't be more than 16 */
607         if (burst_len > 16)
608                 burst_len = 16;
609
610         while (burst_len > 1) {
611                 if (!(len % (burst_len << desc->rqcfg.brst_size)))
612                         break;
613                 burst_len--;
614         }
615
616         return burst_len;
617 }
618
619 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
620                 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
621                 size_t period_len, enum dma_transfer_direction direction)
622 {
623         struct dma_pl330_desc *desc;
624         struct dma_pl330_chan *pch = to_pchan(chan);
625         dma_addr_t dst;
626         dma_addr_t src;
627
628         desc = pl330_get_desc(pch);
629         if (!desc) {
630                 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
631                         __func__, __LINE__);
632                 return NULL;
633         }
634
635         switch (direction) {
636         case DMA_MEM_TO_DEV:
637                 desc->rqcfg.src_inc = 1;
638                 desc->rqcfg.dst_inc = 0;
639                 desc->req.rqtype = MEMTODEV;
640                 src = dma_addr;
641                 dst = pch->fifo_addr;
642                 break;
643         case DMA_DEV_TO_MEM:
644                 desc->rqcfg.src_inc = 0;
645                 desc->rqcfg.dst_inc = 1;
646                 desc->req.rqtype = DEVTOMEM;
647                 src = pch->fifo_addr;
648                 dst = dma_addr;
649                 break;
650         default:
651                 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
652                 __func__, __LINE__);
653                 return NULL;
654         }
655
656         desc->rqcfg.brst_size = pch->burst_sz;
657         desc->rqcfg.brst_len = 1;
658
659         pch->cyclic = true;
660
661         fill_px(&desc->px, dst, src, period_len);
662
663         return &desc->txd;
664 }
665
666 static struct dma_async_tx_descriptor *
667 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
668                 dma_addr_t src, size_t len, unsigned long flags)
669 {
670         struct dma_pl330_desc *desc;
671         struct dma_pl330_chan *pch = to_pchan(chan);
672         struct pl330_info *pi;
673         int burst;
674
675         if (unlikely(!pch || !len))
676                 return NULL;
677
678         pi = &pch->dmac->pif;
679
680         desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
681         if (!desc)
682                 return NULL;
683
684         desc->rqcfg.src_inc = 1;
685         desc->rqcfg.dst_inc = 1;
686         desc->req.rqtype = MEMTOMEM;
687
688         /* Select max possible burst size */
689         burst = pi->pcfg.data_bus_width / 8;
690
691         while (burst > 1) {
692                 if (!(len % burst))
693                         break;
694                 burst /= 2;
695         }
696
697         desc->rqcfg.brst_size = 0;
698         while (burst != (1 << desc->rqcfg.brst_size))
699                 desc->rqcfg.brst_size++;
700
701         desc->rqcfg.brst_len = get_burst_len(desc, len);
702
703         desc->txd.flags = flags;
704
705         return &desc->txd;
706 }
707
708 static struct dma_async_tx_descriptor *
709 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
710                 unsigned int sg_len, enum dma_transfer_direction direction,
711                 unsigned long flg)
712 {
713         struct dma_pl330_desc *first, *desc = NULL;
714         struct dma_pl330_chan *pch = to_pchan(chan);
715         struct scatterlist *sg;
716         unsigned long flags;
717         int i;
718         dma_addr_t addr;
719
720         if (unlikely(!pch || !sgl || !sg_len))
721                 return NULL;
722
723         addr = pch->fifo_addr;
724
725         first = NULL;
726
727         for_each_sg(sgl, sg, sg_len, i) {
728
729                 desc = pl330_get_desc(pch);
730                 if (!desc) {
731                         struct dma_pl330_dmac *pdmac = pch->dmac;
732
733                         dev_err(pch->dmac->pif.dev,
734                                 "%s:%d Unable to fetch desc\n",
735                                 __func__, __LINE__);
736                         if (!first)
737                                 return NULL;
738
739                         spin_lock_irqsave(&pdmac->pool_lock, flags);
740
741                         while (!list_empty(&first->node)) {
742                                 desc = list_entry(first->node.next,
743                                                 struct dma_pl330_desc, node);
744                                 list_move_tail(&desc->node, &pdmac->desc_pool);
745                         }
746
747                         list_move_tail(&first->node, &pdmac->desc_pool);
748
749                         spin_unlock_irqrestore(&pdmac->pool_lock, flags);
750
751                         return NULL;
752                 }
753
754                 if (!first)
755                         first = desc;
756                 else
757                         list_add_tail(&desc->node, &first->node);
758
759                 if (direction == DMA_MEM_TO_DEV) {
760                         desc->rqcfg.src_inc = 1;
761                         desc->rqcfg.dst_inc = 0;
762                         desc->req.rqtype = MEMTODEV;
763                         fill_px(&desc->px,
764                                 addr, sg_dma_address(sg), sg_dma_len(sg));
765                 } else {
766                         desc->rqcfg.src_inc = 0;
767                         desc->rqcfg.dst_inc = 1;
768                         desc->req.rqtype = DEVTOMEM;
769                         fill_px(&desc->px,
770                                 sg_dma_address(sg), addr, sg_dma_len(sg));
771                 }
772
773                 desc->rqcfg.brst_size = pch->burst_sz;
774                 desc->rqcfg.brst_len = 1;
775         }
776
777         /* Return the last desc in the chain */
778         desc->txd.flags = flg;
779         return &desc->txd;
780 }
781
782 static irqreturn_t pl330_irq_handler(int irq, void *data)
783 {
784         if (pl330_update(data))
785                 return IRQ_HANDLED;
786         else
787                 return IRQ_NONE;
788 }
789
790 static int __devinit
791 pl330_probe(struct amba_device *adev, const struct amba_id *id)
792 {
793         struct dma_pl330_platdata *pdat;
794         struct dma_pl330_dmac *pdmac;
795         struct dma_pl330_chan *pch;
796         struct pl330_info *pi;
797         struct dma_device *pd;
798         struct resource *res;
799         int i, ret, irq;
800         int num_chan;
801
802         pdat = adev->dev.platform_data;
803
804         /* Allocate a new DMAC and its Channels */
805         pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
806         if (!pdmac) {
807                 dev_err(&adev->dev, "unable to allocate mem\n");
808                 return -ENOMEM;
809         }
810
811         pi = &pdmac->pif;
812         pi->dev = &adev->dev;
813         pi->pl330_data = NULL;
814         pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
815
816         res = &adev->res;
817         request_mem_region(res->start, resource_size(res), "dma-pl330");
818
819         pi->base = ioremap(res->start, resource_size(res));
820         if (!pi->base) {
821                 ret = -ENXIO;
822                 goto probe_err1;
823         }
824
825         pdmac->clk = clk_get(&adev->dev, "dma");
826         if (IS_ERR(pdmac->clk)) {
827                 dev_err(&adev->dev, "Cannot get operation clock.\n");
828                 ret = -EINVAL;
829                 goto probe_err2;
830         }
831
832         amba_set_drvdata(adev, pdmac);
833
834 #ifndef CONFIG_PM_RUNTIME
835         /* enable dma clk */
836         clk_enable(pdmac->clk);
837 #endif
838
839         irq = adev->irq[0];
840         ret = request_irq(irq, pl330_irq_handler, 0,
841                         dev_name(&adev->dev), pi);
842         if (ret)
843                 goto probe_err3;
844
845         ret = pl330_add(pi);
846         if (ret)
847                 goto probe_err4;
848
849         INIT_LIST_HEAD(&pdmac->desc_pool);
850         spin_lock_init(&pdmac->pool_lock);
851
852         /* Create a descriptor pool of default size */
853         if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
854                 dev_warn(&adev->dev, "unable to allocate desc\n");
855
856         pd = &pdmac->ddma;
857         INIT_LIST_HEAD(&pd->channels);
858
859         /* Initialize channel parameters */
860         num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
861                         (u8)pi->pcfg.num_chan);
862         pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
863
864         for (i = 0; i < num_chan; i++) {
865                 pch = &pdmac->peripherals[i];
866                 if (!adev->dev.of_node)
867                         pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
868                 else
869                         pch->chan.private = adev->dev.of_node;
870
871                 INIT_LIST_HEAD(&pch->work_list);
872                 spin_lock_init(&pch->lock);
873                 pch->pl330_chid = NULL;
874                 pch->chan.device = pd;
875                 pch->dmac = pdmac;
876
877                 /* Add the channel to the DMAC list */
878                 list_add_tail(&pch->chan.device_node, &pd->channels);
879         }
880
881         pd->dev = &adev->dev;
882         if (pdat) {
883                 pd->cap_mask = pdat->cap_mask;
884         } else {
885                 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
886                 if (pi->pcfg.num_peri) {
887                         dma_cap_set(DMA_SLAVE, pd->cap_mask);
888                         dma_cap_set(DMA_CYCLIC, pd->cap_mask);
889                 }
890         }
891
892         pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
893         pd->device_free_chan_resources = pl330_free_chan_resources;
894         pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
895         pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
896         pd->device_tx_status = pl330_tx_status;
897         pd->device_prep_slave_sg = pl330_prep_slave_sg;
898         pd->device_control = pl330_control;
899         pd->device_issue_pending = pl330_issue_pending;
900
901         ret = dma_async_device_register(pd);
902         if (ret) {
903                 dev_err(&adev->dev, "unable to register DMAC\n");
904                 goto probe_err5;
905         }
906
907         dev_info(&adev->dev,
908                 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
909         dev_info(&adev->dev,
910                 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
911                 pi->pcfg.data_buf_dep,
912                 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
913                 pi->pcfg.num_peri, pi->pcfg.num_events);
914
915         return 0;
916
917 probe_err5:
918         pl330_del(pi);
919 probe_err4:
920         free_irq(irq, pi);
921 probe_err3:
922 #ifndef CONFIG_PM_RUNTIME
923         clk_disable(pdmac->clk);
924 #endif
925         clk_put(pdmac->clk);
926 probe_err2:
927         iounmap(pi->base);
928 probe_err1:
929         release_mem_region(res->start, resource_size(res));
930         kfree(pdmac);
931
932         return ret;
933 }
934
935 static int __devexit pl330_remove(struct amba_device *adev)
936 {
937         struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
938         struct dma_pl330_chan *pch, *_p;
939         struct pl330_info *pi;
940         struct resource *res;
941         int irq;
942
943         if (!pdmac)
944                 return 0;
945
946         amba_set_drvdata(adev, NULL);
947
948         /* Idle the DMAC */
949         list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
950                         chan.device_node) {
951
952                 /* Remove the channel */
953                 list_del(&pch->chan.device_node);
954
955                 /* Flush the channel */
956                 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
957                 pl330_free_chan_resources(&pch->chan);
958         }
959
960         pi = &pdmac->pif;
961
962         pl330_del(pi);
963
964         irq = adev->irq[0];
965         free_irq(irq, pi);
966
967         iounmap(pi->base);
968
969         res = &adev->res;
970         release_mem_region(res->start, resource_size(res));
971
972 #ifndef CONFIG_PM_RUNTIME
973         clk_disable(pdmac->clk);
974 #endif
975
976         kfree(pdmac);
977
978         return 0;
979 }
980
981 static struct amba_id pl330_ids[] = {
982         {
983                 .id     = 0x00041330,
984                 .mask   = 0x000fffff,
985         },
986         { 0, 0 },
987 };
988
989 MODULE_DEVICE_TABLE(amba, pl330_ids);
990
991 #ifdef CONFIG_PM_RUNTIME
992 static int pl330_runtime_suspend(struct device *dev)
993 {
994         struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
995
996         if (!pdmac) {
997                 dev_err(dev, "failed to get dmac\n");
998                 return -ENODEV;
999         }
1000
1001         clk_disable(pdmac->clk);
1002
1003         return 0;
1004 }
1005
1006 static int pl330_runtime_resume(struct device *dev)
1007 {
1008         struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1009
1010         if (!pdmac) {
1011                 dev_err(dev, "failed to get dmac\n");
1012                 return -ENODEV;
1013         }
1014
1015         clk_enable(pdmac->clk);
1016
1017         return 0;
1018 }
1019 #else
1020 #define pl330_runtime_suspend   NULL
1021 #define pl330_runtime_resume    NULL
1022 #endif /* CONFIG_PM_RUNTIME */
1023
1024 static const struct dev_pm_ops pl330_pm_ops = {
1025         .runtime_suspend = pl330_runtime_suspend,
1026         .runtime_resume = pl330_runtime_resume,
1027 };
1028
1029 static struct amba_driver pl330_driver = {
1030         .drv = {
1031                 .owner = THIS_MODULE,
1032                 .name = "dma-pl330",
1033                 .pm = &pl330_pm_ops,
1034         },
1035         .id_table = pl330_ids,
1036         .probe = pl330_probe,
1037         .remove = pl330_remove,
1038 };
1039
1040 static int __init pl330_init(void)
1041 {
1042         return amba_driver_register(&pl330_driver);
1043 }
1044 module_init(pl330_init);
1045
1046 static void __exit pl330_exit(void)
1047 {
1048         amba_driver_unregister(&pl330_driver);
1049         return;
1050 }
1051 module_exit(pl330_exit);
1052
1053 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1054 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
1055 MODULE_LICENSE("GPL");