diff options
author | Vinod Koul | 2016-10-03 09:18:55 +0530 |
---|---|---|
committer | Vinod Koul | 2016-10-03 09:18:55 +0530 |
commit | 765630daa83c79fb804da4f268ec631a262fb7ba (patch) | |
tree | 435ba1fe951b0ce9006282655c273fb07810bfa6 /drivers | |
parent | f35d7633f5adfd7f93c0391b48ef78a14682e6b2 (diff) | |
parent | e7b2acfc79f052d023bf15bb6f82c90166ba74b5 (diff) |
Merge branch 'topic/omap' into for-linus
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/omap-dma.c | 243 |
1 files changed, 213 insertions, 30 deletions
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index d99ca2b511c4..7ca27d4b1c54 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -8,6 +8,7 @@ #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -32,10 +33,12 @@ struct omap_dmadev { const struct omap_dma_reg *reg_map; struct omap_system_dma_plat_info *plat; bool legacy; + bool ll123_supported; + struct dma_pool *desc_pool; unsigned dma_requests; spinlock_t irq_lock; uint32_t irq_enable_mask; - struct omap_chan *lch_map[OMAP_SDMA_CHANNELS]; + struct omap_chan **lch_map; }; struct omap_chan { @@ -55,16 +58,40 @@ struct omap_chan { unsigned sgidx; }; +#define DESC_NXT_SV_REFRESH (0x1 << 24) +#define DESC_NXT_SV_REUSE (0x2 << 24) +#define DESC_NXT_DV_REFRESH (0x1 << 26) +#define DESC_NXT_DV_REUSE (0x2 << 26) +#define DESC_NTYPE_TYPE2 (0x2 << 29) + +/* Type 2 descriptor with Source or Destination address update */ +struct omap_type2_desc { + uint32_t next_desc; + uint32_t en; + uint32_t addr; /* src or dst */ + uint16_t fn; + uint16_t cicr; + int16_t cdei; + int16_t csei; + int32_t cdfi; + int32_t csfi; +} __packed; + struct omap_sg { dma_addr_t addr; uint32_t en; /* number of elements (24-bit) */ uint32_t fn; /* number of frames (16-bit) */ int32_t fi; /* for double indexing */ int16_t ei; /* for double indexing */ + + /* Linked list */ + struct omap_type2_desc *t2_desc; + dma_addr_t t2_desc_paddr; }; struct omap_desc { struct virt_dma_desc vd; + bool using_ll; enum dma_transfer_direction dir; dma_addr_t dev_addr; @@ -81,6 +108,9 @@ struct omap_desc { }; enum { + CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */ + CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */ + CCR_FS = BIT(5), CCR_READ_PRIORITY = BIT(6), CCR_ENABLE = BIT(7), @@ -151,6 +181,19 @@ enum { CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ CLNK_CTRL_ENABLE_LNK = BIT(15), + + CDP_DST_VALID_INC = 0 << 0, + CDP_DST_VALID_RELOAD = 1 << 0, + CDP_DST_VALID_REUSE = 2 << 0, + CDP_SRC_VALID_INC = 0 << 2, + CDP_SRC_VALID_RELOAD = 1 << 2, + CDP_SRC_VALID_REUSE = 2 << 2, + CDP_NTYPE_TYPE1 = 1 << 4, + CDP_NTYPE_TYPE2 = 2 << 4, + CDP_NTYPE_TYPE3 = 3 << 4, + CDP_TMODE_NORMAL = 0 << 8, + CDP_TMODE_LLIST = 1 << 8, + CDP_FAST = BIT(10), }; static const unsigned es_bytes[] = { @@ -180,7 +223,64 @@ static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor static void omap_dma_desc_free(struct virt_dma_desc *vd) { - kfree(container_of(vd, struct omap_desc, vd)); + struct omap_desc *d = to_omap_dma_desc(&vd->tx); + + if (d->using_ll) { + struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device); + int i; + + for (i = 0; i < d->sglen; i++) { + if (d->sg[i].t2_desc) + dma_pool_free(od->desc_pool, d->sg[i].t2_desc, + d->sg[i].t2_desc_paddr); + } + } + + kfree(d); +} + +static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx, + enum dma_transfer_direction dir, bool last) +{ + struct omap_sg *sg = &d->sg[idx]; + struct omap_type2_desc *t2_desc = sg->t2_desc; + + if (idx) + d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr; + if (last) + t2_desc->next_desc = 0xfffffffc; + + t2_desc->en = sg->en; + t2_desc->addr = sg->addr; + t2_desc->fn = sg->fn & 0xffff; + t2_desc->cicr = d->cicr; + if (!last) + t2_desc->cicr &= ~CICR_BLOCK_IE; + + switch (dir) { + case DMA_DEV_TO_MEM: + t2_desc->cdei = sg->ei; + t2_desc->csei = d->ei; + t2_desc->cdfi = sg->fi; + t2_desc->csfi = d->fi; + + t2_desc->en |= DESC_NXT_DV_REFRESH; + t2_desc->en |= DESC_NXT_SV_REUSE; + break; + case DMA_MEM_TO_DEV: + t2_desc->cdei = d->ei; + t2_desc->csei = sg->ei; + t2_desc->cdfi = d->fi; + t2_desc->csfi = sg->fi; + + t2_desc->en |= DESC_NXT_SV_REFRESH; + t2_desc->en |= DESC_NXT_DV_REUSE; + break; + default: + return; + } + + t2_desc->en |= DESC_NTYPE_TYPE2; } static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) @@ -285,6 +385,7 @@ static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint16_t cicr = d->cicr; if (__dma_omap15xx(od->plat->dma_attr)) omap_dma_chan_write(c, CPC, 0); @@ -293,8 +394,27 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) omap_dma_clear_csr(c); + if (d->using_ll) { + uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST; + + if (d->dir == DMA_DEV_TO_MEM) + cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE); + else + cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD); + omap_dma_chan_write(c, CDP, cdp); + + omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr); + omap_dma_chan_write(c, CCDN, 0); + omap_dma_chan_write(c, CCFN, 0xffff); + omap_dma_chan_write(c, CCEN, 0xffffff); + + cicr &= ~CICR_BLOCK_IE; + } else if (od->ll123_supported) { + omap_dma_chan_write(c, CDP, 0); + } + /* Enable interrupts */ - omap_dma_chan_write(c, CICR, d->cicr); + omap_dma_chan_write(c, CICR, cicr); /* Enable channel */ omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); @@ -365,10 +485,9 @@ static void omap_dma_stop(struct omap_chan *c) c->running = false; } -static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, - unsigned idx) +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) { - struct omap_sg *sg = d->sg + idx; + struct omap_sg *sg = d->sg + c->sgidx; unsigned cxsa, cxei, cxfi; if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { @@ -388,6 +507,7 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, omap_dma_chan_write(c, CFN, sg->fn); omap_dma_start(c, d); + c->sgidx++; } static void omap_dma_start_desc(struct omap_chan *c) @@ -433,7 +553,7 @@ static void omap_dma_start_desc(struct omap_chan *c) omap_dma_chan_write(c, CSDP, d->csdp); omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); - omap_dma_start_sg(c, d, 0); + omap_dma_start_sg(c, d); } static void omap_dma_callback(int ch, u16 status, void *data) @@ -445,15 +565,13 @@ static void omap_dma_callback(int ch, u16 status, void *data) spin_lock_irqsave(&c->vc.lock, flags); d = c->desc; if (d) { - if (!c->cyclic) { - if (++c->sgidx < d->sglen) { - omap_dma_start_sg(c, d, c->sgidx); - } else { - omap_dma_start_desc(c); - vchan_cookie_complete(&d->vd); - } - } else { + if (c->cyclic) { vchan_cyclic_callback(&d->vd); + } else if (d->using_ll || c->sgidx == d->sglen) { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } else { + omap_dma_start_sg(c, d); } } spin_unlock_irqrestore(&c->vc.lock, flags); @@ -503,6 +621,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); + struct device *dev = od->ddev.dev; int ret; if (od->legacy) { @@ -513,8 +632,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) &c->dma_ch); } - dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", - c->dma_ch, c->dma_sig); + dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); if (ret >= 0) { omap_dma_assign(od, c, c->dma_ch); @@ -570,7 +688,8 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(&c->vc); omap_free_dma(c->dma_ch); - dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); + dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, + c->dma_sig); c->dma_sig = 0; } @@ -744,6 +863,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( struct omap_desc *d; dma_addr_t dev_addr; unsigned i, es, en, frame_bytes; + bool ll_failed = false; u32 burst; if (dir == DMA_DEV_TO_MEM) { @@ -784,13 +904,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->es = es; d->ccr = c->ccr | CCR_SYNC_FRAME; - if (dir == DMA_DEV_TO_MEM) + if (dir == DMA_DEV_TO_MEM) { d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; - else + d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; + } else { d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; + d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; + } d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; - d->csdp = es; + d->csdp |= es; if (dma_omap1()) { d->cicr |= CICR_TOUT_IE; @@ -819,14 +942,47 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( */ en = burst; frame_bytes = es_bytes[es] * en; + + if (sglen >= 2) + d->using_ll = od->ll123_supported; + for_each_sg(sgl, sgent, sglen, i) { - d->sg[i].addr = sg_dma_address(sgent); - d->sg[i].en = en; - d->sg[i].fn = sg_dma_len(sgent) / frame_bytes; + struct omap_sg *osg = &d->sg[i]; + + osg->addr = sg_dma_address(sgent); + osg->en = en; + osg->fn = sg_dma_len(sgent) / frame_bytes; + + if (d->using_ll) { + osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, + &osg->t2_desc_paddr); + if (!osg->t2_desc) { + dev_err(chan->device->dev, + "t2_desc[%d] allocation failed\n", i); + ll_failed = true; + d->using_ll = false; + continue; + } + + omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1)); + } } d->sglen = sglen; + /* Release the dma_pool entries if one allocation failed */ + if (ll_failed) { + for (i = 0; i < d->sglen; i++) { + struct omap_sg *osg = &d->sg[i]; + + if (osg->t2_desc) { + dma_pool_free(od->desc_pool, osg->t2_desc, + osg->t2_desc_paddr); + osg->t2_desc = NULL; + } + } + } + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } @@ -1225,16 +1381,24 @@ static int omap_dma_probe(struct platform_device *pdev) spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); - od->dma_requests = OMAP_SDMA_REQUESTS; - if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, - "dma-requests", - &od->dma_requests)) { + if (!pdev->dev.of_node) { + od->dma_requests = od->plat->dma_attr->lch_count; + if (unlikely(!od->dma_requests)) + od->dma_requests = OMAP_SDMA_REQUESTS; + } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests", + &od->dma_requests)) { dev_info(&pdev->dev, "Missing dma-requests property, using %u.\n", OMAP_SDMA_REQUESTS); + od->dma_requests = OMAP_SDMA_REQUESTS; } - for (i = 0; i < OMAP_SDMA_CHANNELS; i++) { + od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests, + sizeof(*od->lch_map), GFP_KERNEL); + if (!od->lch_map) + return -ENOMEM; + + for (i = 0; i < od->dma_requests; i++) { rc = omap_dma_chan_init(od); if (rc) { omap_dma_free(od); @@ -1257,10 +1421,25 @@ static int omap_dma_probe(struct platform_device *pdev) return rc; } + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) + od->ll123_supported = true; + od->ddev.filter.map = od->plat->slave_map; od->ddev.filter.mapcnt = od->plat->slavecnt; od->ddev.filter.fn = omap_dma_filter_fn; + if (od->ll123_supported) { + od->desc_pool = dma_pool_create(dev_name(&pdev->dev), + &pdev->dev, + sizeof(struct omap_type2_desc), + 4, 0); + if (!od->desc_pool) { + dev_err(&pdev->dev, + "unable to allocate descriptor pool\n"); + od->ll123_supported = false; + } + } + rc = dma_async_device_register(&od->ddev); if (rc) { pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", @@ -1284,7 +1463,8 @@ static int omap_dma_probe(struct platform_device *pdev) } } - dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", + od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); return rc; } @@ -1307,6 +1487,9 @@ static int omap_dma_remove(struct platform_device *pdev) omap_dma_glbl_write(od, IRQENABLE_L0, 0); } + if (od->ll123_supported) + dma_pool_destroy(od->desc_pool); + omap_dma_free(od); return 0; |