diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 4 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 2 | ||||
-rw-r--r-- | drivers/dma/at_xdmac.c | 2 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 98 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 24 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.c | 65 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.h | 4 | ||||
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-pcie.c | 10 | ||||
-rw-r--r-- | drivers/dma/idxd/sysfs.c | 11 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 85 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 10 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 2 | ||||
-rw-r--r-- | drivers/dma/mmp_tdma.c | 26 | ||||
-rw-r--r-- | drivers/dma/moxart-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/qcom/bam_dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.c | 3 | ||||
-rw-r--r-- | drivers/dma/sf-pdma/sf-pdma.c | 25 | ||||
-rw-r--r-- | drivers/dma/stm32-dma.c | 41 | ||||
-rw-r--r-- | drivers/dma/ti/Kconfig | 4 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma.c | 34 |
21 files changed, 272 insertions, 184 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 023db6883d05..e9ed9165de40 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -106,7 +106,7 @@ config AXI_DMAC select REGMAP_MMIO help Enable support for the Analog Devices AXI-DMAC peripheral. This DMA - controller is often used in Analog Device's reference designs for FPGA + controller is often used in Analog Devices' reference designs for FPGA platforms. config BCM_SBA_RAID @@ -395,12 +395,10 @@ config MMP_TDMA bool "MMP Two-Channel DMA support" depends on ARCH_MMP || COMPILE_TEST select DMA_ENGINE - select MMP_SRAM if ARCH_MMP select GENERIC_ALLOCATOR help Support the MMP Two-Channel DMA engine. This engine used for MMP Audio DMA and pxa910 SQU. - It needs sram driver under mach-mmp. config MOXART_DMA tristate "MOXART DMA support" diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 397692e937b3..80fc2fe8c77e 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -331,7 +331,7 @@ struct at_dma { struct dma_pool *dma_desc_pool; struct dma_pool *memset_pool; /* AT THE END channels table */ - struct at_dma_chan chan[0]; + struct at_dma_chan chan[]; }; #define dma_readl(atdma, name) \ diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index bb0eaf38b594..fd92f048c491 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -212,7 +212,7 @@ struct at_xdmac { struct clk *clk; u32 save_gim; struct dma_pool *at_xdmac_desc_pool; - struct at_xdmac_chan chan[0]; + struct at_xdmac_chan chan[]; }; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d31076d9ef25..2b06a7a8629d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -53,6 +53,8 @@ #include <linux/mempool.h> #include <linux/numa.h> +#include "dmaengine.h" + static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDA(dma_ida); static LIST_HEAD(dma_device_list); @@ -145,9 +147,9 @@ static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } /** * dev_to_dma_chan - convert a device pointer to its sysfs container object - * @dev - device node + * @dev: device node * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static struct dma_chan *dev_to_dma_chan(struct device *dev) { @@ -243,22 +245,18 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -/** - * dma_cap_mask_all - enable iteration over all operation types - */ +/* enable iteration over all operation types */ static dma_cap_mask_t dma_cap_mask_all; /** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry + * struct dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan: associated channel for this entry */ struct dma_chan_tbl_ent { struct dma_chan *chan; }; -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ +/* percpu lookup table for memory-to-memory offload providers */ static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; static int __init dma_channel_table_init(void) @@ -295,8 +293,11 @@ static int __init dma_channel_table_init(void) arch_initcall(dma_channel_table_init); /** - * dma_chan_is_local - returns true if the channel is in the same numa-node as - * the cpu + * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU + * @chan: DMA channel to test + * @cpu: CPU index which the channel should be close to + * + * Returns true if the channel is in the same NUMA-node as the CPU. */ static bool dma_chan_is_local(struct dma_chan *chan, int cpu) { @@ -306,14 +307,14 @@ static bool dma_chan_is_local(struct dma_chan *chan, int cpu) } /** - * min_chan - returns the channel with min count and in the same numa-node as - * the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to + * min_chan - finds the channel with min count and in the same NUMA-node as the CPU + * @cap: capability to match + * @cpu: CPU index which the channel should be close to * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the + * If some channels are close to the given CPU, the one with the lowest + * reference count is returned. Otherwise, CPU is ignored and only the * reference count is taken into account. + * * Must be called under dma_list_mutex. */ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) @@ -351,10 +352,11 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) /** * dma_channel_rebalance - redistribute the available channels * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. + * Optimize for CPU isolation (each CPU gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. + * + * Must be called under dma_list_mutex. */ static void dma_channel_rebalance(void) { @@ -404,9 +406,9 @@ static struct module *dma_chan_to_owner(struct dma_chan *chan) /** * balance_ref_count - catch up the channel reference count - * @chan - channel to balance ->client_count versus dmaengine_ref_count + * @chan: channel to balance ->client_count versus dmaengine_ref_count * - * balance_ref_count must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static void balance_ref_count(struct dma_chan *chan) { @@ -436,10 +438,10 @@ static void dma_device_put(struct dma_device *device) } /** - * dma_chan_get - try to grab a dma channel's parent driver module - * @chan - channel to grab + * dma_chan_get - try to grab a DMA channel's parent driver module + * @chan: channel to grab * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static int dma_chan_get(struct dma_chan *chan) { @@ -483,10 +485,10 @@ module_put_out: } /** - * dma_chan_put - drop a reference to a dma channel's parent driver module - * @chan - channel to release + * dma_chan_put - drop a reference to a DMA channel's parent driver module + * @chan: channel to release * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static void dma_chan_put(struct dma_chan *chan) { @@ -537,7 +539,7 @@ EXPORT_SYMBOL(dma_sync_wait); /** * dma_find_channel - find a channel to carry out the operation - * @tx_type: transaction type + * @tx_type: transaction type */ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) { @@ -677,7 +679,7 @@ static struct dma_chan *find_candidate(struct dma_device *device, /** * dma_get_slave_channel - try to get specific channel exclusively - * @chan: target channel + * @chan: target channel */ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) { @@ -731,10 +733,10 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); /** * __dma_request_channel - try to allocate an exclusive channel - * @mask: capabilities that the channel must satisfy - * @fn: optional callback to disposition available channels - * @fn_param: opaque parameter to pass to dma_filter_fn - * @np: device node to look for DMA channels + * @mask: capabilities that the channel must satisfy + * @fn: optional callback to disposition available channels + * @fn_param: opaque parameter to pass to dma_filter_fn() + * @np: device node to look for DMA channels * * Returns pointer to appropriate DMA channel on success or NULL. */ @@ -877,7 +879,7 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel); /** * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities - * @mask: capabilities that the channel must satisfy + * @mask: capabilities that the channel must satisfy * * Returns pointer to appropriate DMA channel on success or an error pointer. */ @@ -968,7 +970,7 @@ void dmaengine_get(void) EXPORT_SYMBOL(dmaengine_get); /** - * dmaengine_put - let dma drivers be removed when ref_count == 0 + * dmaengine_put - let DMA drivers be removed when ref_count == 0 */ void dmaengine_put(void) { @@ -1132,7 +1134,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); /** * dma_async_device_register - registers DMA devices found - * @device: &dma_device + * @device: pointer to &struct dma_device * * After calling this routine the structure should not be freed except in the * device_release() callback which will be called after @@ -1304,7 +1306,7 @@ EXPORT_SYMBOL(dma_async_device_register); /** * dma_async_device_unregister - unregister a DMA device - * @device: &dma_device + * @device: pointer to &struct dma_device * * This routine is called by dma driver exit routines, dmaengine holds module * references to prevent it being called while channels are in use. @@ -1341,7 +1343,7 @@ static void dmam_device_release(struct device *dev, void *res) /** * dmaenginem_async_device_register - registers DMA devices found - * @device: &dma_device + * @device: pointer to &struct dma_device * * The operation is managed and will be undone on driver detach. */ @@ -1578,8 +1580,9 @@ int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, } EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); -/* dma_wait_for_async_tx - spin wait for a transaction to complete - * @tx: in-flight transaction to wait on +/** + * dma_wait_for_async_tx - spin wait for a transaction to complete + * @tx: in-flight transaction to wait on */ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) @@ -1602,9 +1605,12 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) } EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); -/* dma_run_dependencies - helper routine for dma drivers to process - * (start) dependent operations on their target channel - * @tx: transaction with dependencies +/** + * dma_run_dependencies - process dependent operations on the target channel + * @tx: transaction with dependencies + * + * Helper routine for DMA drivers to process (start) dependent operations + * on their target channel. */ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 0425984db118..b175229a4b01 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -60,9 +60,9 @@ MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); static int timeout = 3000; -module_param(timeout, uint, S_IRUGO | S_IWUSR); +module_param(timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " - "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); + "Pass -1 for infinite timeout"); static bool noverify; module_param(noverify, bool, S_IRUGO | S_IWUSR); @@ -72,10 +72,6 @@ static bool norandom; module_param(norandom, bool, 0644); MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)"); -static bool polled; -module_param(polled, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts"); - static bool verbose; module_param(verbose, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); @@ -88,6 +84,10 @@ static unsigned int transfer_size; module_param(transfer_size, uint, 0644); MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))"); +static bool polled; +module_param(polled, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -98,7 +98,12 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default * @iterations: iterations before stopping test * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers - * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) + * @timeout: transfer timeout in msec, -1 for infinite timeout + * @noverify: disable data verification + * @norandom: disable random offset setup + * @alignment: custom data address alignment taken as 2^alignment + * @transfer_size: custom transfer size in bytes + * @polled: use polling for completion instead of interrupts */ struct dmatest_params { unsigned int buf_size; @@ -109,7 +114,7 @@ struct dmatest_params { unsigned int iterations; unsigned int xor_sources; unsigned int pq_sources; - unsigned int timeout; + int timeout; bool noverify; bool norandom; int alignment; @@ -120,7 +125,10 @@ struct dmatest_params { /** * struct dmatest_info - test information. * @params: test parameters + * @channels: channels under test + * @nr_channels: number of channels under test * @lock: access protection to the fields of this structure + * @did_init: module has been initialized completely */ static struct dmatest_info { /* Test parameters */ diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index ff392c01bad1..ed430ad9b3dd 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -13,8 +13,9 @@ #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/dma/edma.h> -#include <linux/pci.h> +#include <linux/dma-mapping.h> #include "dw-edma-core.h" #include "dw-edma-v0-core.h" @@ -322,7 +323,7 @@ static struct dma_async_tx_descriptor * dw_edma_device_transfer(struct dw_edma_transfer *xfer) { struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); - enum dma_transfer_direction direction = xfer->direction; + enum dma_transfer_direction dir = xfer->direction; phys_addr_t src_addr, dst_addr; struct scatterlist *sg = NULL; struct dw_edma_chunk *chunk; @@ -331,10 +332,26 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) u32 cnt; int i; - if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || - (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) + if (!chan->configured) return NULL; + switch (chan->config.direction) { + case DMA_DEV_TO_MEM: /* local dma */ + if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) + break; + return NULL; + case DMA_MEM_TO_DEV: /* local dma */ + if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) + break; + return NULL; + default: /* remote dma */ + if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) + break; + if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) + break; + return NULL; + } + if (xfer->cyclic) { if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) return NULL; @@ -343,9 +360,6 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) return NULL; } - if (!chan->configured) - return NULL; - desc = dw_edma_alloc_desc(chan); if (unlikely(!desc)) goto err_alloc; @@ -386,7 +400,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; - if (direction == DMA_DEV_TO_MEM) { + if (chan->dir == EDMA_DIR_WRITE) { burst->sar = src_addr; if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; @@ -773,6 +787,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, u32 rd_mask = 1; int i, err = 0; u32 ch_cnt; + int irq; ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; @@ -781,16 +796,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, if (dw->nr_irqs == 1) { /* Common IRQ shared among all channels */ - err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), - dw_edma_interrupt_common, + irq = dw->ops->irq_vector(dev, 0); + err = request_irq(irq, dw_edma_interrupt_common, IRQF_SHARED, dw->name, &dw->irq[0]); if (err) { dw->nr_irqs = 0; return err; } - get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), - &dw->irq[0].msi); + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[0].msi); } else { /* Distribute IRQs equally among all channels */ int tmp = dw->nr_irqs; @@ -804,7 +819,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { - err = request_irq(pci_irq_vector(to_pci_dev(dev), i), + irq = dw->ops->irq_vector(dev, i); + err = request_irq(irq, i < *wr_alloc ? dw_edma_interrupt_write : dw_edma_interrupt_read, @@ -815,8 +831,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, return err; } - get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), - &dw->irq[i].msi); + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[i].msi); } dw->nr_irqs = i; @@ -827,12 +843,23 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, int dw_edma_probe(struct dw_edma_chip *chip) { - struct device *dev = chip->dev; - struct dw_edma *dw = chip->dw; + struct device *dev; + struct dw_edma *dw; u32 wr_alloc = 0; u32 rd_alloc = 0; int i, err; + if (!chip) + return -EINVAL; + + dev = chip->dev; + if (!dev) + return -EINVAL; + + dw = chip->dw; + if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector) + return -EINVAL; + raw_spin_lock_init(&dw->lock); /* Find out how many write channels are supported by hardware */ @@ -884,7 +911,7 @@ int dw_edma_probe(struct dw_edma_chip *chip) err_irq_free: for (i = (dw->nr_irqs - 1); i >= 0; i--) - free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); dw->nr_irqs = 0; @@ -904,7 +931,7 @@ int dw_edma_remove(struct dw_edma_chip *chip) /* Free irqs */ for (i = (dw->nr_irqs - 1); i >= 0; i--) - free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); /* Power management */ pm_runtime_disable(dev); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h index 4e5f9f6e901b..31fc50d31792 100644 --- a/drivers/dma/dw-edma/dw-edma-core.h +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -103,6 +103,10 @@ struct dw_edma_irq { struct dw_edma *dw; }; +struct dw_edma_core_ops { + int (*irq_vector)(struct device *dev, unsigned int nr); +}; + struct dw_edma { char name[20]; diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index dc85f55e1bb8..1eafc602e17e 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -54,6 +54,15 @@ static const struct dw_edma_pcie_data snps_edda_data = { .irqs = 1, }; +static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) +{ + return pci_irq_vector(to_pci_dev(dev), nr); +} + +static const struct dw_edma_core_ops dw_edma_pcie_core_ops = { + .irq_vector = dw_edma_pcie_irq_vector, +}; + static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { @@ -151,6 +160,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, dw->version = pdata->version; dw->mode = pdata->mode; dw->nr_irqs = nr_irqs; + dw->ops = &dw_edma_pcie_core_ops; /* Debug info */ pci_dbg(pdev, "Version:\t%u\n", dw->version); diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 3999827970ab..052dae5d6ddd 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1092,6 +1092,16 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = { }; /* IDXD device attribs */ +static ssize_t version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%#x\n", idxd->hw.version); +} +static DEVICE_ATTR_RO(version); + static ssize_t max_work_queues_size_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1313,6 +1323,7 @@ static ssize_t cdev_major_show(struct device *dev, static DEVICE_ATTR_RO(cdev_major); static struct attribute *idxd_device_attributes[] = { + &dev_attr_version.attr, &dev_attr_max_groups.attr, &dev_attr_max_work_queues.attr, &dev_attr_max_work_queues_size.attr, diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 4d4477df4ede..91774039ae5d 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -2063,7 +2063,7 @@ static int sdma_probe(struct platform_device *pdev) /* initially no scripts available */ saddr_arr = (s32 *)sdma->script_addrs; - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) + for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++) saddr_arr[i] = -EINVAL; dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 18c011e57592..8ad0ad861c86 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -332,8 +332,8 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) u8 *pos; off_t offs; - chunk = idx / IOAT_DESCS_PER_2M; - idx &= (IOAT_DESCS_PER_2M - 1); + chunk = idx / IOAT_DESCS_PER_CHUNK; + idx &= (IOAT_DESCS_PER_CHUNK - 1); offs = idx * IOAT_DESC_SZ; pos = (u8 *)ioat_chan->descs[chunk].virt + offs; phys = ioat_chan->descs[chunk].hw + offs; @@ -370,7 +370,8 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) if (!ring) return NULL; - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M; + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE; + ioat_chan->desc_chunks = chunks; for (i = 0; i < chunks; i++) { struct ioat_descs *descs = &ioat_chan->descs[i]; @@ -382,8 +383,9 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) for (idx = 0; idx < i; idx++) { descs = &ioat_chan->descs[idx]; - dma_free_coherent(to_dev(ioat_chan), SZ_2M, - descs->virt, descs->hw); + dma_free_coherent(to_dev(ioat_chan), + IOAT_CHUNK_SIZE, + descs->virt, descs->hw); descs->virt = NULL; descs->hw = 0; } @@ -404,7 +406,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) for (idx = 0; idx < ioat_chan->desc_chunks; idx++) { dma_free_coherent(to_dev(ioat_chan), - SZ_2M, + IOAT_CHUNK_SIZE, ioat_chan->descs[idx].virt, ioat_chan->descs[idx].hw); ioat_chan->descs[idx].virt = NULL; @@ -867,6 +869,23 @@ static void check_active(struct ioatdma_chan *ioat_chan) mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } +static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) +{ + spin_lock_bh(&ioat_chan->prep_lock); + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + ioat_abort_descs(ioat_chan); + dev_warn(to_dev(ioat_chan), "Reset channel...\n"); + ioat_reset_hw(ioat_chan); + dev_warn(to_dev(ioat_chan), "Restart channel...\n"); + ioat_restart_channel(ioat_chan); + + spin_lock_bh(&ioat_chan->prep_lock); + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); +} + void ioat_timer_event(struct timer_list *t) { struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer); @@ -889,19 +908,7 @@ void ioat_timer_event(struct timer_list *t) if (test_bit(IOAT_RUN, &ioat_chan->state)) { spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->prep_lock); - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - - ioat_abort_descs(ioat_chan); - dev_warn(to_dev(ioat_chan), "Reset channel...\n"); - ioat_reset_hw(ioat_chan); - dev_warn(to_dev(ioat_chan), "Restart channel...\n"); - ioat_restart_channel(ioat_chan); - - spin_lock_bh(&ioat_chan->prep_lock); - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + ioat_reboot_chan(ioat_chan); spin_unlock_bh(&ioat_chan->cleanup_lock); } @@ -915,17 +922,23 @@ void ioat_timer_event(struct timer_list *t) spin_lock_bh(&ioat_chan->prep_lock); check_active(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; + goto unlock_out; + } + + /* handle the missed cleanup case */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) { + /* timer restarted in ioat_cleanup_preamble + * and IOAT_COMPLETION_ACK cleared + */ + __cleanup(ioat_chan, phys_complete); + goto unlock_out; } /* if we haven't made progress and we have already * acknowledged a pending completion once, then be more * forceful with a restart */ - if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { u32 chanerr; chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); @@ -937,25 +950,23 @@ void ioat_timer_event(struct timer_list *t) dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", ioat_ring_active(ioat_chan)); - spin_lock_bh(&ioat_chan->prep_lock); - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + ioat_reboot_chan(ioat_chan); - ioat_abort_descs(ioat_chan); - dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); - ioat_reset_hw(ioat_chan); - dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); - ioat_restart_channel(ioat_chan); + goto unlock_out; + } + /* handle missed issue pending case */ + if (ioat_ring_pending(ioat_chan)) { + dev_warn(to_dev(ioat_chan), + "Completion timeout with pending descriptors\n"); spin_lock_bh(&ioat_chan->prep_lock); - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + __ioat_issue_pending(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; - } else - set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + } + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); +unlock_out: spin_unlock_bh(&ioat_chan->cleanup_lock); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index b8e8e0b9693c..e6b622e1ba92 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -81,6 +81,11 @@ struct ioatdma_device { u32 msixpba; }; +#define IOAT_MAX_ORDER 16 +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) +#define IOAT_CHUNK_SIZE (SZ_512K) +#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ) + struct ioat_descs { void *virt; dma_addr_t hw; @@ -128,7 +133,7 @@ struct ioatdma_chan { u16 produce; struct ioat_ring_ent **ring; spinlock_t prep_lock; - struct ioat_descs descs[2]; + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK]; int desc_chunks; int intr_coalesce; int prev_intr_coalesce; @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -#define IOAT_MAX_ORDER 16 -#define IOAT_MAX_DESCS 65536 -#define IOAT_DESCS_PER_2M 32768 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) { diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 60e9afbb896c..58d13564f88b 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c) } for (i = 0; i < ioat_chan->desc_chunks; i++) { - dma_free_coherent(to_dev(ioat_chan), SZ_2M, + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE, ioat_chan->descs[i].virt, ioat_chan->descs[i].hw); ioat_chan->descs[i].virt = NULL; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d683232d7fea..dbc6a48424fa 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -235,7 +235,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan) tdcr |= TDCR_BURSTSZ_128B; break; default: - dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + dev_err(tdmac->dev, "unknown burst size.\n"); return -EINVAL; } @@ -250,7 +250,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan) tdcr |= TDCR_SSZ_32_BITS; break; default: - dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); + dev_err(tdmac->dev, "unknown bus size.\n"); return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { @@ -276,7 +276,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan) tdcr |= TDCR_BURSTSZ_SQU_32B; break; default: - dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + dev_err(tdmac->dev, "unknown burst size.\n"); return -EINVAL; } } @@ -429,8 +429,15 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; - if (tdmac->status != DMA_COMPLETE) + if (!is_slave_direction(direction)) { + dev_err(tdmac->dev, "unsupported transfer direction\n"); return NULL; + } + + if (tdmac->status != DMA_COMPLETE) { + dev_err(tdmac->dev, "controller busy"); + return NULL; + } if (period_len > TDMA_MAX_XFER_BYTES) { dev_err(tdmac->dev, @@ -704,6 +711,17 @@ static int mmp_tdma_probe(struct platform_device *pdev) tdev->device.device_terminate_all = mmp_tdma_terminate_all; tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; + tdev->device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + if (type == MMP_AUD_TDMA) { + tdev->device.max_burst = SZ_128; + tdev->device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + tdev->device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + } else if (type == PXA910_SQU) { + tdev->device.max_burst = SZ_32; + } + tdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + tdev->device.descriptor_reuse = true; + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); platform_set_drvdata(pdev, tdev); diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index e04499c1f27f..4ab493d46375 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -568,7 +568,7 @@ static int moxart_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct resource *res; - static void __iomem *dma_base_addr; + void __iomem *dma_base_addr; int ret, i; unsigned int irq; struct moxart_chan *ch; diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index ef73f65224b1..5a08dd0d3388 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -74,7 +74,7 @@ struct bam_async_desc { struct list_head desc_node; enum dma_transfer_direction dir; size_t length; - struct bam_desc_hw desc[0]; + struct bam_desc_hw desc[]; }; enum bam_reg { diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 411f91fde734..0a6d3ea08c78 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -550,7 +550,7 @@ static void hidma_free_chan_resources(struct dma_chan *dmach) kfree(mdesc); } - mchan->allocated = 0; + mchan->allocated = false; spin_unlock_irqrestore(&mchan->lock, irqflags); } @@ -897,7 +897,6 @@ uninit: if (msi) hidma_free_msis(dmadev); - hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); dmafree: if (dmadev) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 6d0bec947636..5c118c7e02bd 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -506,11 +506,11 @@ static int sf_pdma_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdma->membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pdma->membase)) - goto ERR_MEMBASE; + return PTR_ERR(pdma->membase); ret = sf_pdma_irq_init(pdev, pdma); if (ret) - goto ERR_INITIRQ; + return ret; sf_pdma_setup_chans(pdma); @@ -544,24 +544,13 @@ static int sf_pdma_probe(struct platform_device *pdev) "Failed to set DMA mask. Fall back to default.\n"); ret = dma_async_device_register(&pdma->dma_dev); - if (ret) - goto ERR_REG_DMADEVICE; + if (ret) { + dev_err(&pdev->dev, + "Can't register SiFive Platform DMA. (%d)\n", ret); + return ret; + } return 0; - -ERR_MEMBASE: - devm_kfree(&pdev->dev, pdma); - return PTR_ERR(pdma->membase); - -ERR_INITIRQ: - devm_kfree(&pdev->dev, pdma); - return ret; - -ERR_REG_DMADEVICE: - devm_kfree(&pdev->dev, pdma); - dev_err(&pdev->dev, - "Can't register SiFive Platform DMA. (%d)\n", ret); - return ret; } static int sf_pdma_remove(struct platform_device *pdev) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 0ddbaa4b4f0b..96ad1b3d24c6 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -117,6 +117,7 @@ #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 +#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04 #define STM32_DMA_MAX_DATA_ITEMS 0xffff /* @@ -136,6 +137,9 @@ /* DMA Features */ #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) +#define STM32_DMA_DIRECT_MODE_MASK BIT(2) +#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \ + >> 2) enum stm32_dma_width { STM32_DMA_BYTE, @@ -281,6 +285,9 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, { u32 remaining; + if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) + return false; + if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { if (burst != 0) { /* @@ -302,6 +309,10 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) { + /* If FIFO direct mode, burst is not possible */ + if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) + return false; + /* * Buffer or period length has to be aligned on FIFO depth. * Otherwise bytes may be stuck within FIFO at buffer or period @@ -657,6 +668,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); } } + if (status & STM32_DMA_DMEI) { + stm32_dma_irq_clear(chan, STM32_DMA_DMEI); + status &= ~STM32_DMA_DMEI; + if (sfcr & STM32_DMA_SCR_DMEIE) + dev_dbg(chan2dev(chan), "Direct mode overrun\n"); + } if (status) { stm32_dma_irq_clear(chan, status); dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); @@ -692,13 +709,13 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, int src_bus_width, dst_bus_width; int src_burst_size, dst_burst_size; u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; - u32 dma_scr, threshold; + u32 dma_scr, fifoth; src_addr_width = chan->dma_sconfig.src_addr_width; dst_addr_width = chan->dma_sconfig.dst_addr_width; src_maxburst = chan->dma_sconfig.src_maxburst; dst_maxburst = chan->dma_sconfig.dst_maxburst; - threshold = chan->threshold; + fifoth = chan->threshold; switch (direction) { case DMA_MEM_TO_DEV: @@ -710,7 +727,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set device burst size */ dst_best_burst = stm32_dma_get_best_burst(buf_len, dst_maxburst, - threshold, + fifoth, dst_addr_width); dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); @@ -718,7 +735,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, return dst_burst_size; /* Set memory data size */ - src_addr_width = stm32_dma_get_max_width(buf_len, threshold); + src_addr_width = stm32_dma_get_max_width(buf_len, fifoth); chan->mem_width = src_addr_width; src_bus_width = stm32_dma_get_width(chan, src_addr_width); if (src_bus_width < 0) @@ -728,7 +745,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, src_maxburst = STM32_DMA_MAX_BURST; src_best_burst = stm32_dma_get_best_burst(buf_len, src_maxburst, - threshold, + fifoth, src_addr_width); src_burst_size = stm32_dma_get_burst(chan, src_best_burst); if (src_burst_size < 0) @@ -742,7 +759,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set FIFO threshold */ chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; - chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); + if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) + chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); /* Set peripheral address */ chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; @@ -758,7 +776,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set device burst size */ src_best_burst = stm32_dma_get_best_burst(buf_len, src_maxburst, - threshold, + fifoth, src_addr_width); chan->mem_burst = src_best_burst; src_burst_size = stm32_dma_get_burst(chan, src_best_burst); @@ -766,7 +784,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, return src_burst_size; /* Set memory data size */ - dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); + dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth); chan->mem_width = dst_addr_width; dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); if (dst_bus_width < 0) @@ -776,7 +794,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, dst_maxburst = STM32_DMA_MAX_BURST; dst_best_burst = stm32_dma_get_best_burst(buf_len, dst_maxburst, - threshold, + fifoth, dst_addr_width); chan->mem_burst = dst_best_burst; dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); @@ -791,7 +809,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set FIFO threshold */ chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; - chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); + if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) + chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); /* Set peripheral address */ chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; @@ -1216,6 +1235,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan, chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); + if (STM32_DMA_DIRECT_MODE_GET(cfg->features)) + chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE; } static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index f76e06651f80..79618fac119a 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -36,7 +36,7 @@ config DMA_OMAP config TI_K3_UDMA bool "Texas Instruments UDMA support" - depends on ARCH_K3 || COMPILE_TEST + depends on ARCH_K3 depends on TI_SCI_PROTOCOL depends on TI_SCI_INTA_IRQCHIP select DMA_ENGINE @@ -49,7 +49,7 @@ config TI_K3_UDMA config TI_K3_UDMA_GLUE_LAYER bool "Texas Instruments UDMA Glue layer for non DMAengine users" - depends on ARCH_K3 || COMPILE_TEST + depends on ARCH_K3 depends on TI_K3_UDMA help Say y here to support the K3 NAVSS DMA glue interface diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index a90e154b0ae0..945b7c604f91 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -231,7 +231,6 @@ struct udma_chan { struct udma_tx_drain tx_drain; u32 bcnt; /* number of bytes completed since the start of the channel */ - u32 in_ring_cnt; /* number of descriptors in flight */ /* Channel configuration parameters */ struct udma_chan_config config; @@ -574,7 +573,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) struct udma_desc *d = uc->desc; struct k3_ring *ring = NULL; dma_addr_t paddr; - int ret; switch (uc->config.dir) { case DMA_DEV_TO_MEM: @@ -598,11 +596,7 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) udma_sync_for_device(uc, idx); } - ret = k3_ringacc_ring_push(ring, &paddr); - if (!ret) - uc->in_ring_cnt++; - - return ret; + return k3_ringacc_ring_push(ring, &paddr); } static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) @@ -655,9 +649,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) d->hwdesc[0].cppi5_desc_size, DMA_FROM_DEVICE); rmb(); /* Ensure that reads are not moved before this point */ - - if (!ret) - uc->in_ring_cnt--; } return ret; @@ -697,8 +688,6 @@ static void udma_reset_rings(struct udma_chan *uc) udma_desc_free(&uc->terminated_desc->vd); uc->terminated_desc = NULL; } - - uc->in_ring_cnt = 0; } static void udma_reset_counters(struct udma_chan *uc) @@ -1073,9 +1062,6 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data) /* Teardown completion message */ if (cppi5_desc_is_tdcm(paddr)) { - /* Compensate our internal pop/push counter */ - uc->in_ring_cnt++; - complete_all(&uc->teardown_completed); if (uc->terminated_desc) { @@ -1291,10 +1277,8 @@ static int udma_get_tchan(struct udma_chan *uc) } uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); - if (IS_ERR(uc->tchan)) - return PTR_ERR(uc->tchan); - return 0; + return PTR_ERR_OR_ZERO(uc->tchan); } static int udma_get_rchan(struct udma_chan *uc) @@ -1308,10 +1292,8 @@ static int udma_get_rchan(struct udma_chan *uc) } uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); - if (IS_ERR(uc->rchan)) - return PTR_ERR(uc->rchan); - return 0; + return PTR_ERR_OR_ZERO(uc->rchan); } static int udma_get_chan_pair(struct udma_chan *uc) @@ -1373,10 +1355,8 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id) } uc->rflow = __udma_get_rflow(ud, flow_id); - if (IS_ERR(uc->rflow)) - return PTR_ERR(uc->rflow); - return 0; + return PTR_ERR_OR_ZERO(uc->rflow); } static void udma_put_rchan(struct udma_chan *uc) @@ -1870,6 +1850,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) udma_stop(uc); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + ret = -EBUSY; goto err_res_free; } } @@ -3189,7 +3170,7 @@ static struct udma_match_data am654_main_data = { static struct udma_match_data am654_mcu_data = { .psil_base = 0x6000, - .enable_memcpy_support = true, /* TEST: DMA domains */ + .enable_memcpy_support = false, .statictr_z_mask = GENMASK(11, 0), .rchan_oes_offset = 0x2000, .tpl_levels = 2, @@ -3471,6 +3452,9 @@ static int udma_setup_rx_flush(struct udma_dev *ud) tr_req->icnt0 = rx_flush->buffer_size; tr_req->icnt1 = 1; + dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, + hwdesc->cppi5_desc_size, DMA_TO_DEVICE); + /* Set up descriptor to be used for packet mode */ hwdesc = &rx_flush->hwdescs[1]; hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + |