diff options
author | Linus Torvalds | 2022-07-10 11:23:01 -0700 |
---|---|---|
committer | Linus Torvalds | 2022-07-10 11:23:01 -0700 |
commit | 952c53cd357c71338a59d444933ed48a879229e1 (patch) | |
tree | 8e109fc88a84509ceeabc373fc2b362784e27537 | |
parent | 5867f3b88bb54016c42cdde510c184255488a12b (diff) | |
parent | 607a48c78e6b427b0b684d24e61c19e846ad65d6 (diff) |
Merge tag 'dmaengine-fix-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine fixes from Vinod Koul:
"One core fix for DMA_INTERRUPT and rest driver fixes.
Core:
- Revert verification of DMA_INTERRUPT capability as that was
incorrect
Bunch of driver fixes for:
- ti: refcount and put_device leak
- qcom_bam: runtime pm overflow
- idxd: force wq context cleanup and call idxd_enable_system_pasid()
on success
- dw-axi-dmac: RMW on channel suspend register
- imx-sdma: restart cyclic channel when enabled
- at_xdma: error handling for at_xdmac_alloc_desc
- pl330: lockdep warning
- lgm: error handling path in probe
- allwinner: Fix min/max typo in binding"
* tag 'dmaengine-fix-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
dt-bindings: dma: allwinner,sun50i-a64-dma: Fix min/max typo
dmaengine: lgm: Fix an error handling path in intel_ldma_probe()
dmaengine: pl330: Fix lockdep warning about non-static key
dmaengine: idxd: Only call idxd_enable_system_pasid() if succeeded in enabling SVA feature
dmaengine: at_xdma: handle errors of at_xdmac_alloc_desc() correctly
dmaengine: imx-sdma: only restart cyclic channel when enabled
dmaengine: dw-axi-dmac: Fix RMW on channel suspend register
dmaengine: idxd: force wq context cleanup on device disable path
dmaengine: qcom: bam_dma: fix runtime PM underflow
dmaengine: imx-sdma: Allow imx8m for imx7 FW revs
dmaengine: Revert "dmaengine: add verification of DMA_INTERRUPT capability for dmatest"
dmaengine: ti: Add missing put_device in ti_dra7_xbar_route_allocate
dmaengine: ti: Fix refcount leak in ti_dra7_xbar_route_allocate
-rw-r--r-- | Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml | 2 | ||||
-rw-r--r-- | drivers/dma/at_xdmac.c | 5 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 13 | ||||
-rw-r--r-- | drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 8 | ||||
-rw-r--r-- | drivers/dma/idxd/device.c | 5 | ||||
-rw-r--r-- | drivers/dma/idxd/init.c | 13 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 4 | ||||
-rw-r--r-- | drivers/dma/lgm/lgm-dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 2 | ||||
-rw-r--r-- | drivers/dma/qcom/bam_dma.c | 39 | ||||
-rw-r--r-- | drivers/dma/ti/dma-crossbar.c | 5 |
11 files changed, 43 insertions, 56 deletions
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml index ff0a5c58d78c..e712444abff1 100644 --- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml +++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml @@ -67,7 +67,7 @@ if: then: properties: clocks: - maxItems: 2 + minItems: 2 required: - clock-names diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 3e9d726504e2..7b3e6030f7b4 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1900,6 +1900,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) for (i = 0; i < init_nr_desc_per_channel; i++) { desc = at_xdmac_alloc_desc(chan, GFP_KERNEL); if (!desc) { + if (i == 0) { + dev_warn(chan2dev(chan), + "can't allocate any descriptors\n"); + return -EIO; + } dev_warn(chan2dev(chan), "only %d descriptors have been allocated\n", i); break; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 0a2168a4ccb0..f696246f57fd 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -675,16 +675,10 @@ static int dmatest_func(void *data) /* * src and dst buffers are freed by ourselves below */ - if (params->polled) { + if (params->polled) flags = DMA_CTRL_ACK; - } else { - if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) { - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; - } else { - pr_err("Channel does not support interrupt!\n"); - goto err_pq_array; - } - } + else + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ktime = ktime_get(); while (!(kthread_should_stop() || @@ -912,7 +906,6 @@ error_unmap_continue: runtime = ktime_to_us(ktime); ret = 0; -err_pq_array: kfree(dma_pq); err_srcs_array: kfree(srcs); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index e9c9bcb1f5c2..c741da02b67e 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1164,8 +1164,9 @@ static int dma_chan_pause(struct dma_chan *dchan) BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } else { - val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | - BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); + val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); } @@ -1190,12 +1191,13 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); if (chan->chip->dw->hdata->reg_map_8_channels) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index ff0ea60051f0..5a8cc52c1abf 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) struct idxd_wq *wq = idxd->wqs[i]; mutex_lock(&wq->wq_lock); - if (wq->state == IDXD_WQ_ENABLED) { - idxd_wq_disable_cleanup(wq); - wq->state = IDXD_WQ_DISABLED; - } + idxd_wq_disable_cleanup(wq); idxd_wq_device_reset_cleanup(wq); mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 355fb3ef4cbf..aa3478257ddb 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -512,15 +512,16 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) + if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) { dev_warn(dev, "Unable to turn on user SVA feature.\n"); - else + } else { set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); - if (idxd_enable_system_pasid(idxd)) - dev_warn(dev, "No in-kernel DMA with PASID.\n"); - else - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + if (idxd_enable_system_pasid(idxd)) + dev_warn(dev, "No in-kernel DMA with PASID.\n"); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + } } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 8535018ee7a2..f37a276f519e 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -891,7 +891,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac) * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA * owned buffer is available (i.e. BD_DONE was set too late). */ - if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) { + if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) { dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel); sdma_enable_channel(sdmac->sdma, sdmac->channel); } @@ -2346,7 +2346,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver"); #if IS_ENABLED(CONFIG_SOC_IMX6Q) MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); #endif -#if IS_ENABLED(CONFIG_SOC_IMX7D) +#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M) MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); #endif MODULE_LICENSE("GPL"); diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c index efe8bd3a0e2a..9b9184f964be 100644 --- a/drivers/dma/lgm/lgm-dma.c +++ b/drivers/dma/lgm/lgm-dma.c @@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev) d->core_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(d->core_clk)) return PTR_ERR(d->core_clk); - clk_prepare_enable(d->core_clk); d->rst = devm_reset_control_get_optional(dev, NULL); if (IS_ERR(d->rst)) return PTR_ERR(d->rst); + + clk_prepare_enable(d->core_clk); reset_control_deassert(d->rst); ret = devm_add_action_or_reset(dev, ldma_clk_disable, d); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 858400e42ec0..09915a5cba3e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2589,7 +2589,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) /* If the DMAC pool is empty, alloc new */ if (!desc) { - DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(lock); LIST_HEAD(pool); if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 87f6ca1541cf..2ff787df513e 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -558,14 +558,6 @@ static int bam_alloc_chan(struct dma_chan *chan) return 0; } -static int bam_pm_runtime_get_sync(struct device *dev) -{ - if (pm_runtime_enabled(dev)) - return pm_runtime_get_sync(dev); - - return 0; -} - /** * bam_free_chan - Frees dma resources associated with specific channel * @chan: specified channel @@ -581,7 +573,7 @@ static void bam_free_chan(struct dma_chan *chan) unsigned long flags; int ret; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return; @@ -784,7 +776,7 @@ static int bam_pause(struct dma_chan *chan) unsigned long flag; int ret; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return ret; @@ -810,7 +802,7 @@ static int bam_resume(struct dma_chan *chan) unsigned long flag; int ret; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return ret; @@ -919,7 +911,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return IRQ_NONE; @@ -1037,7 +1029,7 @@ static void bam_start_dma(struct bam_chan *bchan) if (!vd) return; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return; @@ -1374,11 +1366,6 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; - if (!bdev->bamclk) { - pm_runtime_disable(&pdev->dev); - return 0; - } - pm_runtime_irq_safe(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(&pdev->dev); @@ -1462,10 +1449,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); - if (bdev->bamclk) { - pm_runtime_force_suspend(dev); - clk_unprepare(bdev->bamclk); - } + pm_runtime_force_suspend(dev); + clk_unprepare(bdev->bamclk); return 0; } @@ -1475,13 +1460,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev) struct bam_device *bdev = dev_get_drvdata(dev); int ret; - if (bdev->bamclk) { - ret = clk_prepare(bdev->bamclk); - if (ret) - return ret; + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; - pm_runtime_force_resume(dev); - } + pm_runtime_force_resume(dev); return 0; } diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c index 71d24fc07c00..f744ddbbbad7 100644 --- a/drivers/dma/ti/dma-crossbar.c +++ b/drivers/dma/ti/dma-crossbar.c @@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, if (dma_spec->args[0] >= xbar->xbar_requests) { dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", dma_spec->args[0]); + put_device(&pdev->dev); return ERR_PTR(-EINVAL); } @@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); if (!dma_spec->np) { dev_err(&pdev->dev, "Can't get DMA master\n"); + put_device(&pdev->dev); return ERR_PTR(-EINVAL); } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { of_node_put(dma_spec->np); + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } @@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, mutex_unlock(&xbar->mutex); dev_err(&pdev->dev, "Run out of free DMA requests\n"); kfree(map); + of_node_put(dma_spec->np); + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } set_bit(map->xbar_out, xbar->dma_inuse); |