1 /* linux/drivers/dma/pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dmaengine.h>
17 #include <linux/interrupt.h>
18 #include <linux/amba/bus.h>
19 #include <linux/amba/pl330.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
23 #define NR_DEFAULT_DESC 16
26 /* In the DMAC pool */
29 * Allocted to some channel during prep_xxx
30 * Also may be sitting on the work_list.
34 * Sitting on the work_list and already submitted
35 * to the PL330 core. Not more than two descriptors
36 * of a channel can be BUSY at any time.
40 * Sitting on the channel work_list but xfer done
46 struct dma_pl330_chan {
47 /* Schedule desc completion */
48 struct tasklet_struct task;
50 /* DMA-Engine Channel */
53 /* Last completed cookie */
54 dma_cookie_t completed;
56 /* List of to be xfered descriptors */
57 struct list_head work_list;
59 /* Pointer to the DMAC that manages this channel,
60 * NULL if the channel is available to be acquired.
61 * As the parent, this DMAC also provides descriptors
64 struct dma_pl330_dmac *dmac;
66 /* To protect channel manipulation */
69 /* Token of a hardware channel thread of PL330 DMAC
70 * NULL if the channel is available to be acquired.
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
76 int burst_len; /* the number of burst */
80 struct dma_pl330_dmac {
81 struct pl330_info pif;
83 /* DMA-Engine Device */
84 struct dma_device ddma;
86 /* Pool of descriptors available for the DMAC's channels */
87 struct list_head desc_pool;
88 /* To protect desc_pool manipulation */
91 /* Peripheral channels connected to this DMAC */
92 struct dma_pl330_chan *peripherals; /* keep at end */
97 struct dma_pl330_desc {
98 /* To attach to a queue as child */
99 struct list_head node;
101 /* Descriptor for the DMA Engine API */
102 struct dma_async_tx_descriptor txd;
104 /* Xfer for PL330 core */
105 struct pl330_xfer px;
107 struct pl330_reqcfg rqcfg;
108 struct pl330_req req;
110 enum desc_status status;
112 /* The channel which currently holds this desc */
113 struct dma_pl330_chan *pchan;
116 static inline struct dma_pl330_chan *
117 to_pchan(struct dma_chan *ch)
122 return container_of(ch, struct dma_pl330_chan, chan);
125 static inline struct dma_pl330_desc *
126 to_desc(struct dma_async_tx_descriptor *tx)
128 return container_of(tx, struct dma_pl330_desc, txd);
131 static inline void free_desc_list(struct list_head *list)
133 struct dma_pl330_dmac *pdmac;
134 struct dma_pl330_desc *desc;
135 struct dma_pl330_chan *pch;
138 if (list_empty(list))
141 /* Finish off the work list */
142 list_for_each_entry(desc, list, node) {
143 dma_async_tx_callback callback;
146 /* All desc in a list belong to same channel */
148 callback = desc->txd.callback;
149 param = desc->txd.callback_param;
159 spin_lock_irqsave(&pdmac->pool_lock, flags);
160 list_splice_tail_init(list, &pdmac->desc_pool);
161 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
164 static inline void fill_queue(struct dma_pl330_chan *pch)
166 struct dma_pl330_desc *desc;
169 list_for_each_entry(desc, &pch->work_list, node) {
171 /* If already submitted */
172 if (desc->status == BUSY)
175 ret = pl330_submit_req(pch->pl330_chid,
180 } else if (ret == -EAGAIN) {
181 /* QFull or DMAC Dying */
184 /* Unacceptable request */
186 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
187 __func__, __LINE__, desc->txd.cookie);
188 tasklet_schedule(&pch->task);
193 static void pl330_tasklet(unsigned long data)
195 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
196 struct dma_pl330_desc *desc, *_dt;
200 spin_lock_irqsave(&pch->lock, flags);
202 /* Pick up ripe tomatoes */
203 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
204 if (desc->status == DONE) {
205 pch->completed = desc->txd.cookie;
206 list_move_tail(&desc->node, &list);
209 /* Try to submit a req imm. next to the last completed cookie */
212 /* Make sure the PL330 Channel thread is active */
213 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
215 spin_unlock_irqrestore(&pch->lock, flags);
217 free_desc_list(&list);
220 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
222 struct dma_pl330_desc *desc = token;
223 struct dma_pl330_chan *pch = desc->pchan;
226 /* If desc aborted */
230 spin_lock_irqsave(&pch->lock, flags);
234 spin_unlock_irqrestore(&pch->lock, flags);
236 tasklet_schedule(&pch->task);
239 static int pl330_alloc_chan_resources(struct dma_chan *chan)
241 struct dma_pl330_chan *pch = to_pchan(chan);
242 struct dma_pl330_dmac *pdmac = pch->dmac;
245 spin_lock_irqsave(&pch->lock, flags);
247 pch->completed = chan->cookie = 1;
249 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
250 if (!pch->pl330_chid) {
251 spin_unlock_irqrestore(&pch->lock, flags);
255 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
257 spin_unlock_irqrestore(&pch->lock, flags);
262 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
264 struct dma_pl330_chan *pch = to_pchan(chan);
265 struct dma_pl330_desc *desc, *_dt;
267 struct dma_pl330_dmac *pdmac = pch->dmac;
268 struct dma_slave_config *slave_config;
272 case DMA_TERMINATE_ALL:
273 spin_lock_irqsave(&pch->lock, flags);
275 /* FLUSH the PL330 Channel thread */
276 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
278 /* Mark all desc done */
279 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
281 pch->completed = desc->txd.cookie;
282 list_move_tail(&desc->node, &list);
285 list_splice_tail_init(&list, &pdmac->desc_pool);
286 spin_unlock_irqrestore(&pch->lock, flags);
288 case DMA_SLAVE_CONFIG:
289 slave_config = (struct dma_slave_config *)arg;
291 if (slave_config->direction == DMA_TO_DEVICE) {
292 if (slave_config->dst_addr)
293 pch->fifo_addr = slave_config->dst_addr;
294 if (slave_config->dst_addr_width)
295 pch->burst_sz = __ffs(slave_config->dst_addr_width);
296 if (slave_config->dst_maxburst)
297 pch->burst_len = slave_config->dst_maxburst;
298 } else if (slave_config->direction == DMA_FROM_DEVICE) {
299 if (slave_config->src_addr)
300 pch->fifo_addr = slave_config->src_addr;
301 if (slave_config->src_addr_width)
302 pch->burst_sz = __ffs(slave_config->src_addr_width);
303 if (slave_config->src_maxburst)
304 pch->burst_len = slave_config->src_maxburst;
308 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
315 static void pl330_free_chan_resources(struct dma_chan *chan)
317 struct dma_pl330_chan *pch = to_pchan(chan);
320 spin_lock_irqsave(&pch->lock, flags);
322 tasklet_kill(&pch->task);
324 pl330_release_channel(pch->pl330_chid);
325 pch->pl330_chid = NULL;
327 spin_unlock_irqrestore(&pch->lock, flags);
330 static enum dma_status
331 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
332 struct dma_tx_state *txstate)
334 struct dma_pl330_chan *pch = to_pchan(chan);
335 dma_cookie_t last_done, last_used;
338 last_done = pch->completed;
339 last_used = chan->cookie;
341 ret = dma_async_is_complete(cookie, last_done, last_used);
343 dma_set_tx_state(txstate, last_done, last_used, 0);
348 static void pl330_issue_pending(struct dma_chan *chan)
350 pl330_tasklet((unsigned long) to_pchan(chan));
354 * We returned the last one of the circular list of descriptor(s)
355 * from prep_xxx, so the argument to submit corresponds to the last
356 * descriptor of the list.
358 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
360 struct dma_pl330_desc *desc, *last = to_desc(tx);
361 struct dma_pl330_chan *pch = to_pchan(tx->chan);
365 spin_lock_irqsave(&pch->lock, flags);
367 /* Assign cookies to all nodes */
368 cookie = tx->chan->cookie;
370 while (!list_empty(&last->node)) {
371 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
375 desc->txd.cookie = cookie;
377 list_move_tail(&desc->node, &pch->work_list);
382 last->txd.cookie = cookie;
384 list_add_tail(&last->node, &pch->work_list);
386 tx->chan->cookie = cookie;
388 spin_unlock_irqrestore(&pch->lock, flags);
393 static inline void _init_desc(struct dma_pl330_desc *desc)
396 desc->req.x = &desc->px;
397 desc->req.token = desc;
398 desc->rqcfg.swap = SWAP_NO;
399 desc->rqcfg.privileged = 0;
400 desc->rqcfg.insnaccess = 0;
401 desc->rqcfg.scctl = SCCTRL0;
402 desc->rqcfg.dcctl = DCCTRL0;
403 desc->req.cfg = &desc->rqcfg;
404 desc->req.xfer_cb = dma_pl330_rqcb;
405 desc->txd.tx_submit = pl330_tx_submit;
407 INIT_LIST_HEAD(&desc->node);
410 /* Returns the number of descriptors added to the DMAC pool */
411 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
413 struct dma_pl330_desc *desc;
420 desc = kmalloc(count * sizeof(*desc), flg);
424 spin_lock_irqsave(&pdmac->pool_lock, flags);
426 for (i = 0; i < count; i++) {
427 _init_desc(&desc[i]);
428 list_add_tail(&desc[i].node, &pdmac->desc_pool);
431 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
436 static struct dma_pl330_desc *
437 pluck_desc(struct dma_pl330_dmac *pdmac)
439 struct dma_pl330_desc *desc = NULL;
445 spin_lock_irqsave(&pdmac->pool_lock, flags);
447 if (!list_empty(&pdmac->desc_pool)) {
448 desc = list_entry(pdmac->desc_pool.next,
449 struct dma_pl330_desc, node);
451 list_del_init(&desc->node);
454 desc->txd.callback = NULL;
457 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
462 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
464 struct dma_pl330_dmac *pdmac = pch->dmac;
465 struct dma_pl330_peri *peri = pch->chan.private;
466 struct dma_pl330_desc *desc;
468 /* Pluck one desc from the pool of DMAC */
469 desc = pluck_desc(pdmac);
471 /* If the DMAC pool is empty, alloc new */
473 if (!add_desc(pdmac, GFP_ATOMIC, 1))
477 desc = pluck_desc(pdmac);
479 dev_err(pch->dmac->pif.dev,
480 "%s:%d ALERT!\n", __func__, __LINE__);
485 /* Initialize the descriptor */
487 desc->txd.cookie = 0;
488 async_tx_ack(&desc->txd);
491 desc->req.rqtype = peri->rqtype;
492 desc->req.peri = pch->chan.chan_id;
494 desc->req.rqtype = MEMTOMEM;
498 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
503 static inline void fill_px(struct pl330_xfer *px,
504 dma_addr_t dst, dma_addr_t src, size_t len)
512 static struct dma_pl330_desc *
513 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
514 dma_addr_t src, size_t len)
516 struct dma_pl330_desc *desc = pl330_get_desc(pch);
519 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
525 * Ideally we should lookout for reqs bigger than
526 * those that can be programmed with 256 bytes of
527 * MC buffer, but considering a req size is seldom
528 * going to be word-unaligned and more than 200MB,
530 * Also, should the limit is reached we'd rather
531 * have the platform increase MC buffer size than
532 * complicating this API driver.
534 fill_px(&desc->px, dst, src, len);
539 /* Call after fixing burst size */
540 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
542 struct dma_pl330_chan *pch = desc->pchan;
543 struct pl330_info *pi = &pch->dmac->pif;
546 burst_len = pi->pcfg.data_bus_width / 8;
547 burst_len *= pi->pcfg.data_buf_dep;
548 burst_len >>= desc->rqcfg.brst_size;
550 /* src/dst_burst_len can't be more than 16 */
554 while (burst_len > 1) {
555 if (!(len % (burst_len << desc->rqcfg.brst_size)))
563 static struct dma_async_tx_descriptor *
564 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
565 dma_addr_t src, size_t len, unsigned long flags)
567 struct dma_pl330_desc *desc;
568 struct dma_pl330_chan *pch = to_pchan(chan);
569 struct dma_pl330_peri *peri = chan->private;
570 struct pl330_info *pi;
573 if (unlikely(!pch || !len))
576 if (peri && peri->rqtype != MEMTOMEM)
579 pi = &pch->dmac->pif;
581 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
585 desc->rqcfg.src_inc = 1;
586 desc->rqcfg.dst_inc = 1;
588 /* Select max possible burst size */
589 burst = pi->pcfg.data_bus_width / 8;
597 desc->rqcfg.brst_size = 0;
598 while (burst != (1 << desc->rqcfg.brst_size))
599 desc->rqcfg.brst_size++;
601 desc->rqcfg.brst_len = get_burst_len(desc, len);
603 desc->txd.flags = flags;
608 static struct dma_async_tx_descriptor *
609 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
610 unsigned int sg_len, enum dma_data_direction direction,
613 struct dma_pl330_desc *first, *desc = NULL;
614 struct dma_pl330_chan *pch = to_pchan(chan);
615 struct dma_pl330_peri *peri = chan->private;
616 struct scatterlist *sg;
621 if (unlikely(!pch || !sgl || !sg_len || !peri))
624 /* Make sure the direction is consistent */
625 if ((direction == DMA_TO_DEVICE &&
626 peri->rqtype != MEMTODEV) ||
627 (direction == DMA_FROM_DEVICE &&
628 peri->rqtype != DEVTOMEM)) {
629 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
634 addr = pch->fifo_addr;
638 for_each_sg(sgl, sg, sg_len, i) {
640 desc = pl330_get_desc(pch);
642 struct dma_pl330_dmac *pdmac = pch->dmac;
644 dev_err(pch->dmac->pif.dev,
645 "%s:%d Unable to fetch desc\n",
650 spin_lock_irqsave(&pdmac->pool_lock, flags);
652 while (!list_empty(&first->node)) {
653 desc = list_entry(first->node.next,
654 struct dma_pl330_desc, node);
655 list_move_tail(&desc->node, &pdmac->desc_pool);
658 list_move_tail(&first->node, &pdmac->desc_pool);
660 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
668 list_add_tail(&desc->node, &first->node);
670 if (direction == DMA_TO_DEVICE) {
671 desc->rqcfg.src_inc = 1;
672 desc->rqcfg.dst_inc = 0;
674 addr, sg_dma_address(sg), sg_dma_len(sg));
676 desc->rqcfg.src_inc = 0;
677 desc->rqcfg.dst_inc = 1;
679 sg_dma_address(sg), addr, sg_dma_len(sg));
682 desc->rqcfg.brst_size = pch->burst_sz;
683 desc->rqcfg.brst_len = 1;
686 /* Return the last desc in the chain */
687 desc->txd.flags = flg;
691 static irqreturn_t pl330_irq_handler(int irq, void *data)
693 if (pl330_update(data))
700 pl330_probe(struct amba_device *adev, const struct amba_id *id)
702 struct dma_pl330_platdata *pdat;
703 struct dma_pl330_dmac *pdmac;
704 struct dma_pl330_chan *pch;
705 struct pl330_info *pi;
706 struct dma_device *pd;
707 struct resource *res;
711 pdat = adev->dev.platform_data;
713 /* Allocate a new DMAC and its Channels */
714 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
716 dev_err(&adev->dev, "unable to allocate mem\n");
721 pi->dev = &adev->dev;
722 pi->pl330_data = NULL;
723 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
726 request_mem_region(res->start, resource_size(res), "dma-pl330");
728 pi->base = ioremap(res->start, resource_size(res));
734 pdmac->clk = clk_get(&adev->dev, "dma");
735 if (IS_ERR(pdmac->clk)) {
736 dev_err(&adev->dev, "Cannot get operation clock.\n");
741 amba_set_drvdata(adev, pdmac);
743 #ifdef CONFIG_PM_RUNTIME
744 /* to use the runtime PM helper functions */
745 pm_runtime_enable(&adev->dev);
747 /* enable the power domain */
748 if (pm_runtime_get_sync(&adev->dev)) {
749 dev_err(&adev->dev, "failed to get runtime pm\n");
755 clk_enable(pdmac->clk);
759 ret = request_irq(irq, pl330_irq_handler, 0,
760 dev_name(&adev->dev), pi);
768 INIT_LIST_HEAD(&pdmac->desc_pool);
769 spin_lock_init(&pdmac->pool_lock);
771 /* Create a descriptor pool of default size */
772 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
773 dev_warn(&adev->dev, "unable to allocate desc\n");
776 INIT_LIST_HEAD(&pd->channels);
778 /* Initialize channel parameters */
779 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
780 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
782 for (i = 0; i < num_chan; i++) {
783 pch = &pdmac->peripherals[i];
785 struct dma_pl330_peri *peri = &pdat->peri[i];
787 switch (peri->rqtype) {
789 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
793 dma_cap_set(DMA_SLAVE, pd->cap_mask);
796 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
799 pch->chan.private = peri;
801 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
802 pch->chan.private = NULL;
805 INIT_LIST_HEAD(&pch->work_list);
806 spin_lock_init(&pch->lock);
807 pch->pl330_chid = NULL;
808 pch->chan.device = pd;
809 pch->chan.chan_id = i;
812 /* Add the channel to the DMAC list */
814 list_add_tail(&pch->chan.device_node, &pd->channels);
817 pd->dev = &adev->dev;
819 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
820 pd->device_free_chan_resources = pl330_free_chan_resources;
821 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
822 pd->device_tx_status = pl330_tx_status;
823 pd->device_prep_slave_sg = pl330_prep_slave_sg;
824 pd->device_control = pl330_control;
825 pd->device_issue_pending = pl330_issue_pending;
827 ret = dma_async_device_register(pd);
829 dev_err(&adev->dev, "unable to register DMAC\n");
834 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
836 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
837 pi->pcfg.data_buf_dep,
838 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
839 pi->pcfg.num_peri, pi->pcfg.num_events);
850 release_mem_region(res->start, resource_size(res));
856 static int __devexit pl330_remove(struct amba_device *adev)
858 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
859 struct dma_pl330_chan *pch, *_p;
860 struct pl330_info *pi;
861 struct resource *res;
867 amba_set_drvdata(adev, NULL);
870 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
873 /* Remove the channel */
874 list_del(&pch->chan.device_node);
876 /* Flush the channel */
877 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
878 pl330_free_chan_resources(&pch->chan);
891 release_mem_region(res->start, resource_size(res));
893 #ifdef CONFIG_PM_RUNTIME
894 pm_runtime_put(&adev->dev);
895 pm_runtime_disable(&adev->dev);
897 clk_disable(pdmac->clk);
905 static struct amba_id pl330_ids[] = {
913 #ifdef CONFIG_PM_RUNTIME
914 static int pl330_runtime_suspend(struct device *dev)
916 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
919 dev_err(dev, "failed to get dmac\n");
923 clk_disable(pdmac->clk);
928 static int pl330_runtime_resume(struct device *dev)
930 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
933 dev_err(dev, "failed to get dmac\n");
937 clk_enable(pdmac->clk);
942 #define pl330_runtime_suspend NULL
943 #define pl330_runtime_resume NULL
944 #endif /* CONFIG_PM_RUNTIME */
946 static const struct dev_pm_ops pl330_pm_ops = {
947 .runtime_suspend = pl330_runtime_suspend,
948 .runtime_resume = pl330_runtime_resume,
951 static struct amba_driver pl330_driver = {
953 .owner = THIS_MODULE,
957 .id_table = pl330_ids,
958 .probe = pl330_probe,
959 .remove = pl330_remove,
962 static int __init pl330_init(void)
964 return amba_driver_register(&pl330_driver);
966 module_init(pl330_init);
968 static void __exit pl330_exit(void)
970 amba_driver_unregister(&pl330_driver);
973 module_exit(pl330_exit);
975 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
976 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
977 MODULE_LICENSE("GPL");