diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 14 | ||||
-rw-r--r-- | drivers/dma/bcm2835-dma.c | 13 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 5 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 299 | ||||
-rw-r--r-- | drivers/dma/dw/core.c | 2 | ||||
-rw-r--r-- | drivers/dma/dw/platform.c | 6 | ||||
-rw-r--r-- | drivers/dma/dw/regs.h | 4 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 6 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 60 | ||||
-rw-r--r-- | drivers/dma/mediatek/Kconfig | 13 | ||||
-rw-r--r-- | drivers/dma/mediatek/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/mediatek/mtk-cqdma.c | 951 | ||||
-rw-r--r-- | drivers/dma/mic_x100_dma.c | 22 | ||||
-rw-r--r-- | drivers/dma/mmp_pdma.c | 28 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 28 | ||||
-rw-r--r-- | drivers/dma/pxa_dma.c | 36 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_dbg.c | 33 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 31 | ||||
-rw-r--r-- | drivers/dma/xilinx/zynqmp_dma.c | 37 |
19 files changed, 1385 insertions, 204 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 97483df1f82e..fc8c2bab563c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2505,24 +2505,14 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) return 0; } -static int pl08x_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, pl08x_debugfs_show, inode->i_private); -} - -static const struct file_operations pl08x_debugfs_operations = { - .open = pl08x_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { /* Expose a simple debugfs interface to view all clocks */ (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, NULL, pl08x, - &pl08x_debugfs_operations); + &pl08x_debugfs_fops); } #else diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index cad55ab80d41..1a44c8086d77 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * BCM2835 DMA engine support * @@ -18,16 +19,6 @@ * * MARVELL MMP Peripheral DMA Driver * Copyright 2012 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> @@ -1056,4 +1047,4 @@ module_platform_driver(bcm2835_dma_driver); MODULE_ALIAS("platform:bcm2835-dma"); MODULE_DESCRIPTION("BCM2835 DMA engine driver"); MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index eebaba3d9e78..b69d66e44052 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1802,13 +1802,10 @@ static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, static int coh901318_config(struct coh901318_chan *cohc, struct coh901318_params *param) { - unsigned long flags; const struct coh901318_params *p; int channel = cohc->id; void __iomem *virtbase = cohc->base->virtbase; - spin_lock_irqsave(&cohc->lock, flags); - if (param) p = param; else @@ -1828,8 +1825,6 @@ static int coh901318_config(struct coh901318_chan *cohc, coh901318_set_conf(cohc, p->config); coh901318_set_ctrl(cohc, p->ctrl_lli_last); - spin_unlock_irqrestore(&cohc->lock, flags); - return 0; } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index aa1712beb0cc..2eea4ef72915 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -27,11 +27,6 @@ static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); -static char test_channel[20]; -module_param_string(channel, test_channel, sizeof(test_channel), - S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); - static char test_device[32]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO | S_IWUSR); @@ -84,6 +79,14 @@ static bool verbose; module_param(verbose, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); +static int alignment = -1; +module_param(alignment, int, 0644); +MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))"); + +static unsigned int transfer_size; +module_param(transfer_size, uint, 0644); +MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -108,6 +111,8 @@ struct dmatest_params { int timeout; bool noverify; bool norandom; + int alignment; + unsigned int transfer_size; }; /** @@ -139,6 +144,28 @@ static bool dmatest_run; module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(run, "Run the test (default: false)"); +static int dmatest_chan_set(const char *val, const struct kernel_param *kp); +static int dmatest_chan_get(char *val, const struct kernel_param *kp); +static const struct kernel_param_ops multi_chan_ops = { + .set = dmatest_chan_set, + .get = dmatest_chan_get, +}; + +static char test_channel[20]; +static struct kparam_string newchan_kps = { + .string = test_channel, + .maxlen = 20, +}; +module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644); +MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); + +static int dmatest_test_list_get(char *val, const struct kernel_param *kp); +static const struct kernel_param_ops test_list_ops = { + .get = dmatest_test_list_get, +}; +module_param_cb(test_list, &test_list_ops, NULL, 0444); +MODULE_PARM_DESC(test_list, "Print current test list"); + /* Maximum amount of mismatched bytes in buffer to print */ #define MAX_ERROR_COUNT 32 @@ -160,6 +187,13 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); #define PATTERN_COUNT_MASK 0x1f #define PATTERN_MEMSET_IDX 0x01 +/* Fixed point arithmetic ops */ +#define FIXPT_SHIFT 8 +#define FIXPNT_MASK 0xFF +#define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT) +#define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT) +#define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT) + /* poor man's completion - we want to use wait_event_freezable() on it */ struct dmatest_done { bool done; @@ -179,6 +213,7 @@ struct dmatest_thread { wait_queue_head_t done_wait; struct dmatest_done test_done; bool done; + bool pending; }; struct dmatest_chan { @@ -206,6 +241,22 @@ static bool is_threaded_test_run(struct dmatest_info *info) return false; } +static bool is_threaded_test_pending(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (thread->pending) + return true; + } + } + + return false; +} + static int dmatest_wait_get(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; @@ -419,13 +470,15 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val) } per_sec *= val; + per_sec = INT_TO_FIXPT(per_sec); do_div(per_sec, runtime); + return per_sec; } static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) { - return dmatest_persec(runtime, len >> 10); + return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); } /* @@ -466,6 +519,7 @@ static int dmatest_func(void *data) ktime_t comparetime = 0; s64 runtime = 0; unsigned long long total_len = 0; + unsigned long long iops = 0; u8 align = 0; bool is_memset = false; dma_addr_t *srcs; @@ -476,27 +530,32 @@ static int dmatest_func(void *data) ret = -ENOMEM; smp_rmb(); + thread->pending = false; info = thread->info; params = &info->params; chan = thread->chan; dev = chan->device; if (thread->type == DMA_MEMCPY) { - align = dev->copy_align; + align = params->alignment < 0 ? dev->copy_align : + params->alignment; src_cnt = dst_cnt = 1; } else if (thread->type == DMA_MEMSET) { - align = dev->fill_align; + align = params->alignment < 0 ? dev->fill_align : + params->alignment; src_cnt = dst_cnt = 1; is_memset = true; } else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); dst_cnt = 1; - align = dev->xor_align; + align = params->alignment < 0 ? dev->xor_align : + params->alignment; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; - align = dev->pq_align; + align = params->alignment < 0 ? dev->pq_align : + params->alignment; pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL); if (!pq_coefs) @@ -507,9 +566,22 @@ static int dmatest_func(void *data) } else goto err_thread_type; + /* Check if buffer count fits into map count variable (u8) */ + if ((src_cnt + dst_cnt) >= 255) { + pr_err("too many buffers (%d of 255 supported)\n", + src_cnt + dst_cnt); + goto err_free_coefs; + } + + if (1 << align > params->buf_size) { + pr_err("%u-byte buffer too small for %d-byte alignment\n", + params->buf_size, 1 << align); + goto err_free_coefs; + } + thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) - goto err_srcs; + goto err_free_coefs; thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!thread->usrcs) @@ -576,28 +648,25 @@ static int dmatest_func(void *data) total_tests++; - /* Check if buffer count fits into map count variable (u8) */ - if ((src_cnt + dst_cnt) >= 255) { - pr_err("too many buffers (%d of 255 supported)\n", - src_cnt + dst_cnt); - break; - } - - if (1 << align > params->buf_size) { - pr_err("%u-byte buffer too small for %d-byte alignment\n", - params->buf_size, 1 << align); - break; - } - - if (params->norandom) + if (params->transfer_size) { + if (params->transfer_size >= params->buf_size) { + pr_err("%u-byte transfer size must be lower than %u-buffer size\n", + params->transfer_size, params->buf_size); + break; + } + len = params->transfer_size; + } else if (params->norandom) { len = params->buf_size; - else + } else { len = dmatest_random() % params->buf_size + 1; + } - len = (len >> align) << align; - if (!len) - len = 1 << align; - + /* Do not alter transfer size explicitly defined by user */ + if (!params->transfer_size) { + len = (len >> align) << align; + if (!len) + len = 1 << align; + } total_len += len; if (params->norandom) { @@ -721,14 +790,14 @@ static int dmatest_func(void *data) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + dmaengine_unmap_put(um); + if (!done->done) { - dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); failed_tests++; continue; } else if (status != DMA_COMPLETE) { - dmaengine_unmap_put(um); result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, @@ -737,8 +806,6 @@ static int dmatest_func(void *data) continue; } - dmaengine_unmap_put(um); - if (params->noverify) { verbose_result("test passed", total_tests, src_off, dst_off, len, 0); @@ -802,17 +869,18 @@ err_srcbuf: kfree(thread->usrcs); err_usrcs: kfree(thread->srcs); -err_srcs: +err_free_coefs: kfree(pq_coefs); err_thread_type: - pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", + iops = dmatest_persec(runtime, total_tests); + pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n", current->comm, total_tests, failed_tests, - dmatest_persec(runtime, total_tests), + FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops), dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ if (ret || failed_tests) - dmaengine_terminate_all(chan); + dmaengine_terminate_sync(chan); thread->done = true; wake_up(&thread_wait); @@ -836,7 +904,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) } /* terminate all transfers on specified channels */ - dmaengine_terminate_all(dtc->chan); + dmaengine_terminate_sync(dtc->chan); kfree(dtc); } @@ -886,7 +954,7 @@ static int dmatest_add_threads(struct dmatest_info *info, /* srcbuf and dstbuf are allocated by the thread itself */ get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); - wake_up_process(thread->task); + thread->pending = true; } return i; @@ -932,7 +1000,7 @@ static int dmatest_add_channel(struct dmatest_info *info, thread_count += cnt > 0 ? cnt : 0; } - pr_info("Started %u threads using %s\n", + pr_info("Added %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); @@ -977,7 +1045,7 @@ static void request_channels(struct dmatest_info *info, } } -static void run_threaded_test(struct dmatest_info *info) +static void add_threaded_test(struct dmatest_info *info) { struct dmatest_params *params = &info->params; @@ -993,6 +1061,8 @@ static void run_threaded_test(struct dmatest_info *info) params->timeout = timeout; params->noverify = noverify; params->norandom = norandom; + params->alignment = alignment; + params->transfer_size = transfer_size; request_channels(info, DMA_MEMCPY); request_channels(info, DMA_MEMSET); @@ -1000,6 +1070,24 @@ static void run_threaded_test(struct dmatest_info *info) request_channels(info, DMA_PQ); } +static void run_pending_tests(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + unsigned int thread_count = 0; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + thread_count = 0; + list_for_each_entry(thread, &dtc->threads, node) { + wake_up_process(thread->task); + thread_count++; + } + pr_info("Started %u threads using %s\n", + thread_count, dma_chan_name(dtc->chan)); + } +} + static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; @@ -1016,7 +1104,7 @@ static void stop_threaded_test(struct dmatest_info *info) info->nr_channels = 0; } -static void restart_threaded_test(struct dmatest_info *info, bool run) +static void start_threaded_tests(struct dmatest_info *info) { /* we might be called early to set run=, defer running until all * parameters have been evaluated @@ -1024,11 +1112,7 @@ static void restart_threaded_test(struct dmatest_info *info, bool run) if (!info->did_init) return; - /* Stop any running test first */ - stop_threaded_test(info); - - /* Run test with new parameters */ - run_threaded_test(info); + run_pending_tests(info); } static int dmatest_run_get(char *val, const struct kernel_param *kp) @@ -1039,7 +1123,8 @@ static int dmatest_run_get(char *val, const struct kernel_param *kp) if (is_threaded_test_run(info)) { dmatest_run = true; } else { - stop_threaded_test(info); + if (!is_threaded_test_pending(info)) + stop_threaded_test(info); dmatest_run = false; } mutex_unlock(&info->lock); @@ -1057,18 +1142,125 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) if (ret) { mutex_unlock(&info->lock); return ret; + } else if (dmatest_run) { + if (is_threaded_test_pending(info)) + start_threaded_tests(info); + else + pr_info("Could not start test, no channels configured\n"); + } else { + stop_threaded_test(info); } - if (is_threaded_test_run(info)) + mutex_unlock(&info->lock); + + return ret; +} + +static int dmatest_chan_set(const char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_chan *dtc; + char chan_reset_val[20]; + int ret = 0; + + mutex_lock(&info->lock); + ret = param_set_copystring(val, kp); + if (ret) { + mutex_unlock(&info->lock); + return ret; + } + /*Clear any previously run threads */ + if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) + stop_threaded_test(info); + /* Reject channels that are already registered */ + if (is_threaded_test_pending(info)) { + list_for_each_entry(dtc, &info->channels, node) { + if (strcmp(dma_chan_name(dtc->chan), + strim(test_channel)) == 0) { + dtc = list_last_entry(&info->channels, + struct dmatest_chan, + node); + strlcpy(chan_reset_val, + dma_chan_name(dtc->chan), + sizeof(chan_reset_val)); + ret = -EBUSY; + goto add_chan_err; + } + } + } + + add_threaded_test(info); + + /* Check if channel was added successfully */ + dtc = list_last_entry(&info->channels, struct dmatest_chan, node); + + if (dtc->chan) { + /* + * if new channel was not successfully added, revert the + * "test_channel" string to the name of the last successfully + * added channel. exception for when users issues empty string + * to channel parameter. + */ + if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) + && (strcmp("", strim(test_channel)) != 0)) { + ret = -EINVAL; + strlcpy(chan_reset_val, dma_chan_name(dtc->chan), + sizeof(chan_reset_val)); + goto add_chan_err; + } + + } else { + /* Clear test_channel if no channels were added successfully */ + strlcpy(chan_reset_val, "", sizeof(chan_reset_val)); ret = -EBUSY; - else if (dmatest_run) - restart_threaded_test(info, dmatest_run); + goto add_chan_err; + } + + mutex_unlock(&info->lock); + + return ret; +add_chan_err: + param_set_copystring(chan_reset_val, kp); mutex_unlock(&info->lock); return ret; } +static int dmatest_chan_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + + mutex_lock(&info->lock); + if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) { + stop_threaded_test(info); + strlcpy(test_channel, "", sizeof(test_channel)); + } + mutex_unlock(&info->lock); + + return param_get_string(val, kp); +} + +static int dmatest_test_list_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_chan *dtc; + unsigned int thread_count = 0; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + thread_count = 0; + list_for_each_entry(thread, &dtc->threads, node) { + thread_count++; + } + pr_info("%u threads using %s\n", + thread_count, dma_chan_name(dtc->chan)); + } + + return 0; +} + static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; @@ -1076,7 +1268,8 @@ static int __init dmatest_init(void) if (dmatest_run) { mutex_lock(&info->lock); - run_threaded_test(info); + add_threaded_test(info); + run_pending_tests(info); mutex_unlock(&info->lock); } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index d0c3e50b39fb..2c5ca1961256 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -160,12 +160,14 @@ static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); bool hs_polarity = dwc->dws.hs_polarity; cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); + cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); /* Set polarity of handshake interface */ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index f01b2c173fa6..31ff8113c3de 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -162,6 +162,12 @@ dw_dma_parse_dt(struct platform_device *pdev) pdata->multi_block[tmp] = 1; } + if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { + if (tmp > CHAN_PROTCTL_MASK) + return NULL; + pdata->protctl = tmp; + } + return pdata; } #else diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 09e7dfdbb790..646c9c960c07 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -200,6 +200,10 @@ enum dw_dma_msize { #define DWC_CFGH_FCMODE (1 << 0) #define DWC_CFGH_FIFO_MODE (1 << 1) #define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_PROTCTL_DATA (0 << 2) /* data access - always set */ +#define DWC_CFGH_PROTCTL_PRIV (1 << 2) /* privileged -> AHB HPROT[1] */ +#define DWC_CFGH_PROTCTL_BUFFER (2 << 2) /* bufferable -> AHB HPROT[2] */ +#define DWC_CFGH_PROTCTL_CACHE (4 << 2) /* cacheable -> AHB HPROT[3] */ #define DWC_CFGH_DS_UPD_EN (1 << 5) #define DWC_CFGH_SS_UPD_EN (1 << 6) #define DWC_CFGH_SRC_PER(x) ((x) << 7) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index f674eb5fbbef..594a88f4f99c 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -997,7 +997,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, for (offset = 0; offset < len; offset += bytes) { desc = ep93xx_dma_desc_get(edmac); if (!desc) { - dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); goto fail; } @@ -1069,7 +1069,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc = ep93xx_dma_desc_get(edmac); if (!desc) { - dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); goto fail; } @@ -1149,7 +1149,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, for (offset = 0; offset < buf_len; offset += period_len) { desc = ep93xx_dma_desc_get(edmac); if (!desc) { - dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); goto fail; } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e661..6f13816cce8e 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -335,6 +335,7 @@ struct sdma_desc { * @sdma: pointer to the SDMA engine for this channel * @channel: the channel number, matches dmaengine chan_id + 1 * @direction: transfer type. Needed for setting SDMA script + * @slave_config Slave configuration * @peripheral_type: Peripheral type. Needed for setting SDMA script * @event_id0: aka dma request line * @event_id1: for channels that use 2 events @@ -362,6 +363,7 @@ struct sdma_channel { struct sdma_engine *sdma; unsigned int channel; enum dma_transfer_direction direction; + struct dma_slave_config slave_config; enum sdma_peripheral_type peripheral_type; unsigned int event_id0; unsigned int event_id1; @@ -440,6 +442,10 @@ struct sdma_engine { struct sdma_buffer_descriptor *bd0; }; +static int sdma_config_write(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg, + enum dma_transfer_direction direction); + static struct sdma_driver_data sdma_imx31 = { .chnenbl0 = SDMA_CHNENBL0_IMX31, .num_events = 32, @@ -671,9 +677,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, int ret; unsigned long flags; - buf_virt = dma_alloc_coherent(NULL, - size, - &buf_phys, GFP_KERNEL); + buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); if (!buf_virt) { return -ENOMEM; } @@ -1104,18 +1108,6 @@ static int sdma_config_channel(struct dma_chan *chan) sdmac->shp_addr = 0; sdmac->per_addr = 0; - if (sdmac->event_id0) { - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id0); - } - - if (sdmac->event_id1) { - if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id1); - } - switch (sdmac->peripheral_type) { case IMX_DMATYPE_DSP: sdma_config_ownership(sdmac, false, true, true); @@ -1415,6 +1407,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct scatterlist *sg; struct sdma_desc *desc; + sdma_config_write(chan, &sdmac->slave_config, direction); + desc = sdma_transfer_init(sdmac, direction, sg_len); if (!desc) goto err_out; @@ -1499,6 +1493,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); + sdma_config_write(chan, &sdmac->slave_config, direction); + desc = sdma_transfer_init(sdmac, direction, num_periods); if (!desc) goto err_out; @@ -1554,17 +1550,18 @@ err_out: return NULL; } -static int sdma_config(struct dma_chan *chan, - struct dma_slave_config *dmaengine_cfg) +static int sdma_config_write(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg, + enum dma_transfer_direction direction) { struct sdma_channel *sdmac = to_sdma_chan(chan); - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { sdmac->per_address = dmaengine_cfg->src_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst * dmaengine_cfg->src_addr_width; sdmac->word_size = dmaengine_cfg->src_addr_width; - } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { + } else if (direction == DMA_DEV_TO_DEV) { sdmac->per_address2 = dmaengine_cfg->src_addr; sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst & @@ -1578,10 +1575,33 @@ static int sdma_config(struct dma_chan *chan, dmaengine_cfg->dst_addr_width; sdmac->word_size = dmaengine_cfg->dst_addr_width; } - sdmac->direction = dmaengine_cfg->direction; + sdmac->direction = direction; return sdma_config_channel(chan); } +static int sdma_config(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); + + /* Set ENBLn earlier to make sure dma request triggered after that */ + if (sdmac->event_id0) { + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id0); + } + + if (sdmac->event_id1) { + if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id1); + } + + return 0; +} + static enum dma_status sdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig index 27bac0bba09e..680fc0572d87 100644 --- a/drivers/dma/mediatek/Kconfig +++ b/drivers/dma/mediatek/Kconfig @@ -11,3 +11,16 @@ config MTK_HSDMA This controller provides the channels which is dedicated to memory-to-memory transfer to offload from CPU through ring- based descriptor management. + +config MTK_CQDMA + tristate "MediaTek Command-Queue DMA controller support" + depends on ARCH_MEDIATEK || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + help + Enable support for Command-Queue DMA controller on MediaTek + SoCs. + + This controller provides the channels which is dedicated to + memory-to-memory transfer to offload from CPU. diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile index 6e778f842f01..41bb3815f636 100644 --- a/drivers/dma/mediatek/Makefile +++ b/drivers/dma/mediatek/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o +obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c new file mode 100644 index 000000000000..131f3974740d --- /dev/null +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-2019 MediaTek Inc. + +/* + * Driver for MediaTek Command-Queue DMA Controller + * + * Author: Shun-Chih Yu <shun-chih.yu@mediatek.com> + * + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/refcount.h> +#include <linux/slab.h> + +#include "../virt-dma.h" + +#define MTK_CQDMA_USEC_POLL 10 +#define MTK_CQDMA_TIMEOUT_POLL 1000 +#define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) +#define MTK_CQDMA_ALIGN_SIZE 1 + +/* The default number of virtual channel */ +#define MTK_CQDMA_NR_VCHANS 32 + +/* The default number of physical channel */ +#define MTK_CQDMA_NR_PCHANS 3 + +/* Registers for underlying dma manipulation */ +#define MTK_CQDMA_INT_FLAG 0x0 +#define MTK_CQDMA_INT_EN 0x4 +#define MTK_CQDMA_EN 0x8 +#define MTK_CQDMA_RESET 0xc +#define MTK_CQDMA_FLUSH 0x14 +#define MTK_CQDMA_SRC 0x1c +#define MTK_CQDMA_DST 0x20 +#define MTK_CQDMA_LEN1 0x24 +#define MTK_CQDMA_LEN2 0x28 +#define MTK_CQDMA_SRC2 0x60 +#define MTK_CQDMA_DST2 0x64 + +/* Registers setting */ +#define MTK_CQDMA_EN_BIT BIT(0) +#define MTK_CQDMA_INT_FLAG_BIT BIT(0) +#define MTK_CQDMA_INT_EN_BIT BIT(0) +#define MTK_CQDMA_FLUSH_BIT BIT(0) + +#define MTK_CQDMA_WARM_RST_BIT BIT(0) +#define MTK_CQDMA_HARD_RST_BIT BIT(1) + +#define MTK_CQDMA_MAX_LEN GENMASK(27, 0) +#define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0) +#define MTK_CQDMA_ADDR2_SHFIT (32) + +/** + * struct mtk_cqdma_vdesc - The struct holding info describing virtual + * descriptor (CVD) + * @vd: An instance for struct virt_dma_desc + * @len: The total data size device wants to move + * @residue: The remaining data size device will move + * @dest: The destination address device wants to move to + * @src: The source address device wants to move from + * @ch: The pointer to the corresponding dma channel + * @node: The lise_head struct to build link-list for VDs + * @parent: The pointer to the parent CVD + */ +struct mtk_cqdma_vdesc { + struct virt_dma_desc vd; + size_t len; + size_t residue; + dma_addr_t dest; + dma_addr_t src; + struct dma_chan *ch; + + struct list_head node; + struct mtk_cqdma_vdesc *parent; +}; + +/** + * struct mtk_cqdma_pchan - The struct holding info describing physical + * channel (PC) + * @queue: Queue for the PDs issued to this PC + * @base: The mapped register I/O base of this PC + * @irq: The IRQ that this PC are using + * @refcnt: Track how many VCs are using this PC + * @tasklet: Tasklet for this PC + * @lock: Lock protect agaisting multiple VCs access PC + */ +struct mtk_cqdma_pchan { + struct list_head queue; + void __iomem *base; + u32 irq; + + refcount_t refcnt; + + struct tasklet_struct tasklet; + + /* lock to protect PC */ + spinlock_t lock; +}; + +/** + * struct mtk_cqdma_vchan - The struct holding info describing virtual + * channel (VC) + * @vc: An instance for struct virt_dma_chan + * @pc: The pointer to the underlying PC + * @issue_completion: The wait for all issued descriptors completited + * @issue_synchronize: Bool indicating channel synchronization starts + */ +struct mtk_cqdma_vchan { + struct virt_dma_chan vc; + struct mtk_cqdma_pchan *pc; + struct completion issue_completion; + bool issue_synchronize; +}; + +/** + * struct mtk_cqdma_device - The struct holding info describing CQDMA + * device + * @ddev: An instance for struct dma_device + * @clk: The clock that device internal is using + * @dma_requests: The number of VCs the device supports to + * @dma_channels: The number of PCs the device supports to + * @vc: The pointer to all available VCs + * @pc: The pointer to all the underlying PCs + */ +struct mtk_cqdma_device { + struct dma_device ddev; + struct clk *clk; + + u32 dma_requests; + u32 dma_channels; + struct mtk_cqdma_vchan *vc; + struct mtk_cqdma_pchan **pc; +}; + +static struct mtk_cqdma_device *to_cqdma_dev(struct dma_chan *chan) +{ + return container_of(chan->device, struct mtk_cqdma_device, ddev); +} + +static struct mtk_cqdma_vchan *to_cqdma_vchan(struct dma_chan *chan) +{ + return container_of(chan, struct mtk_cqdma_vchan, vc.chan); +} + +static struct mtk_cqdma_vdesc *to_cqdma_vdesc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct mtk_cqdma_vdesc, vd); +} + +static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma) +{ + return cqdma->ddev.dev; +} + +static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg) +{ + return readl(pc->base + reg); +} + +static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) +{ + writel_relaxed(val, pc->base + reg); +} + +static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg, + u32 mask, u32 set) +{ + u32 val; + + val = mtk_dma_read(pc, reg); + val &= ~mask; + val |= set; + mtk_dma_write(pc, reg, val); +} + +static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) +{ + mtk_dma_rmw(pc, reg, 0, val); +} + +static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) +{ + mtk_dma_rmw(pc, reg, val, 0); +} + +static void mtk_cqdma_vdesc_free(struct virt_dma_desc *vd) +{ + kfree(to_cqdma_vdesc(vd)); +} + +static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic) +{ + u32 status = 0; + + if (!atomic) + return readl_poll_timeout(pc->base + MTK_CQDMA_EN, + status, + !(status & MTK_CQDMA_EN_BIT), + MTK_CQDMA_USEC_POLL, + MTK_CQDMA_TIMEOUT_POLL); + + return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN, + status, + !(status & MTK_CQDMA_EN_BIT), + MTK_CQDMA_USEC_POLL, + MTK_CQDMA_TIMEOUT_POLL); +} + +static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) +{ + mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); + mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); + + return mtk_cqdma_poll_engine_done(pc, false); +} + +static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, + struct mtk_cqdma_vdesc *cvd) +{ + /* wait for the previous transaction done */ + if (mtk_cqdma_poll_engine_done(pc, true) < 0) + dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n"); + + /* warm reset the dma engine for the new transaction */ + mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); + if (mtk_cqdma_poll_engine_done(pc, true) < 0) + dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n"); + + /* setup the source */ + mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT); +#else + mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); +#endif + + /* setup the destination */ + mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); +#else + mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); +#endif + + /* setup the length */ + mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len); + + /* start dma engine */ + mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT); +} + +static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan *cvc) +{ + struct virt_dma_desc *vd, *vd2; + struct mtk_cqdma_pchan *pc = cvc->pc; + struct mtk_cqdma_vdesc *cvd; + bool trigger_engine = false; + + lockdep_assert_held(&cvc->vc.lock); + lockdep_assert_held(&pc->lock); + + list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) { + /* need to trigger dma engine if PC's queue is empty */ + if (list_empty(&pc->queue)) + trigger_engine = true; + + cvd = to_cqdma_vdesc(vd); + + /* add VD into PC's queue */ + list_add_tail(&cvd->node, &pc->queue); + + /* start the dma engine */ + if (trigger_engine) + mtk_cqdma_start(pc, cvd); + + /* remove VD from list desc_issued */ + list_del(&vd->node); + } +} + +/* + * return true if this VC is active, + * meaning that there are VDs under processing by the PC + */ +static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc) +{ + struct mtk_cqdma_vdesc *cvd; + + list_for_each_entry(cvd, &cvc->pc->queue, node) + if (cvc == to_cqdma_vchan(cvd->ch)) + return true; + + return false; +} + +/* + * return the pointer of the CVD that is just consumed by the PC + */ +static struct mtk_cqdma_vdesc +*mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc) +{ + struct mtk_cqdma_vchan *cvc; + struct mtk_cqdma_vdesc *cvd, *ret = NULL; + + /* consume a CVD from PC's queue */ + cvd = list_first_entry_or_null(&pc->queue, + struct mtk_cqdma_vdesc, node); + if (unlikely(!cvd || !cvd->parent)) + return NULL; + + cvc = to_cqdma_vchan(cvd->ch); + ret = cvd; + + /* update residue of the parent CVD */ + cvd->parent->residue -= cvd->len; + + /* delete CVD from PC's queue */ + list_del(&cvd->node); + + spin_lock(&cvc->vc.lock); + + /* check whether all the child CVDs completed */ + if (!cvd->parent->residue) { + /* add the parent VD into list desc_completed */ + vchan_cookie_complete(&cvd->parent->vd); + + /* setup completion if this VC is under synchronization */ + if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) { + complete(&cvc->issue_completion); + cvc->issue_synchronize = false; + } + } + + spin_unlock(&cvc->vc.lock); + + /* start transaction for next CVD in the queue */ + cvd = list_first_entry_or_null(&pc->queue, + struct mtk_cqdma_vdesc, node); + if (cvd) + mtk_cqdma_start(pc, cvd); + + return ret; +} + +static void mtk_cqdma_tasklet_cb(unsigned long data) +{ + struct mtk_cqdma_pchan *pc = (struct mtk_cqdma_pchan *)data; + struct mtk_cqdma_vdesc *cvd = NULL; + unsigned long flags; + + spin_lock_irqsave(&pc->lock, flags); + /* consume the queue */ + cvd = mtk_cqdma_consume_work_queue(pc); + spin_unlock_irqrestore(&pc->lock, flags); + + /* submit the next CVD */ + if (cvd) { + dma_run_dependencies(&cvd->vd.tx); + + /* + * free child CVD after completion. + * the parent CVD would be freeed with desc_free by user. + */ + if (cvd->parent != cvd) + kfree(cvd); + } + + /* re-enable interrupt before leaving tasklet */ + enable_irq(pc->irq); +} + +static irqreturn_t mtk_cqdma_irq(int irq, void *devid) +{ + struct mtk_cqdma_device *cqdma = devid; + irqreturn_t ret = IRQ_NONE; + bool schedule_tasklet = false; + u32 i; + + /* clear interrupt flags for each PC */ + for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) { + spin_lock(&cqdma->pc[i]->lock); + if (mtk_dma_read(cqdma->pc[i], + MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) { + /* clear interrupt */ + mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG, + MTK_CQDMA_INT_FLAG_BIT); + + schedule_tasklet = true; + ret = IRQ_HANDLED; + } + spin_unlock(&cqdma->pc[i]->lock); + + if (schedule_tasklet) { + /* disable interrupt */ + disable_irq_nosync(cqdma->pc[i]->irq); + + /* schedule the tasklet to handle the transactions */ + tasklet_schedule(&cqdma->pc[i]->tasklet); + } + } + + return ret; +} + +static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, + dma_cookie_t cookie) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + struct virt_dma_desc *vd; + unsigned long flags; + + spin_lock_irqsave(&cvc->pc->lock, flags); + list_for_each_entry(vd, &cvc->pc->queue, node) + if (vd->tx.cookie == cookie) { + spin_unlock_irqrestore(&cvc->pc->lock, flags); + return vd; + } + spin_unlock_irqrestore(&cvc->pc->lock, flags); + + list_for_each_entry(vd, &cvc->vc.desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} + +static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + struct mtk_cqdma_vdesc *cvd; + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + size_t bytes = 0; + + ret = dma_cookie_status(c, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&cvc->vc.lock, flags); + vd = mtk_cqdma_find_active_desc(c, cookie); + spin_unlock_irqrestore(&cvc->vc.lock, flags); + + if (vd) { + cvd = to_cqdma_vdesc(vd); + bytes = cvd->residue; + } + + dma_set_residue(txstate, bytes); + + return ret; +} + +static void mtk_cqdma_issue_pending(struct dma_chan *c) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + unsigned long pc_flags; + unsigned long vc_flags; + + /* acquire PC's lock before VS's lock for lock dependency in tasklet */ + spin_lock_irqsave(&cvc->pc->lock, pc_flags); + spin_lock_irqsave(&cvc->vc.lock, vc_flags); + + if (vchan_issue_pending(&cvc->vc)) + mtk_cqdma_issue_vchan_pending(cvc); + + spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); + spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); +} + +static struct dma_async_tx_descriptor * +mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct mtk_cqdma_vdesc **cvd; + struct dma_async_tx_descriptor *tx = NULL, *prev_tx = NULL; + size_t i, tlen, nr_vd; + + /* + * In the case that trsanction length is larger than the + * DMA engine supports, a single memcpy transaction needs + * to be separated into several DMA transactions. + * Each DMA transaction would be described by a CVD, + * and the first one is referred as the parent CVD, + * while the others are child CVDs. + * The parent CVD's tx descriptor is the only tx descriptor + * returned to the DMA user, and it should not be completed + * until all the child CVDs completed. + */ + nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN); + cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT); + if (!cvd) + return NULL; + + for (i = 0; i < nr_vd; ++i) { + cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT); + if (!cvd[i]) { + for (; i > 0; --i) + kfree(cvd[i - 1]); + return NULL; + } + + /* setup dma channel */ + cvd[i]->ch = c; + + /* setup sourece, destination, and length */ + tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len; + cvd[i]->len = tlen; + cvd[i]->src = src; + cvd[i]->dest = dest; + + /* setup tx descriptor */ + tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags); + tx->next = NULL; + + if (!i) { + cvd[0]->residue = len; + } else { + prev_tx->next = tx; + cvd[i]->residue = tlen; + } + + cvd[i]->parent = cvd[0]; + + /* update the src, dest, len, prev_tx for the next CVD */ + src += tlen; + dest += tlen; + len -= tlen; + prev_tx = tx; + } + + return &cvd[0]->vd.tx; +} + +static void mtk_cqdma_free_inactive_desc(struct dma_chan *c) +{ + struct virt_dma_chan *vc = to_virt_chan(c); + unsigned long flags; + LIST_HEAD(head); + + /* + * set desc_allocated, desc_submitted, + * and desc_issued as the candicates to be freed + */ + spin_lock_irqsave(&vc->lock, flags); + list_splice_tail_init(&vc->desc_allocated, &head); + list_splice_tail_init(&vc->desc_submitted, &head); + list_splice_tail_init(&vc->desc_issued, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + /* free descriptor lists */ + vchan_dma_desc_free_list(vc, &head); +} + +static void mtk_cqdma_free_active_desc(struct dma_chan *c) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + bool sync_needed = false; + unsigned long pc_flags; + unsigned long vc_flags; + + /* acquire PC's lock first due to lock dependency in dma ISR */ + spin_lock_irqsave(&cvc->pc->lock, pc_flags); + spin_lock_irqsave(&cvc->vc.lock, vc_flags); + + /* synchronization is required if this VC is active */ + if (mtk_cqdma_is_vchan_active(cvc)) { + cvc->issue_synchronize = true; + sync_needed = true; + } + + spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); + spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); + + /* waiting for the completion of this VC */ + if (sync_needed) + wait_for_completion(&cvc->issue_completion); + + /* free all descriptors in list desc_completed */ + vchan_synchronize(&cvc->vc); + + WARN_ONCE(!list_empty(&cvc->vc.desc_completed), + "Desc pending still in list desc_completed\n"); +} + +static int mtk_cqdma_terminate_all(struct dma_chan *c) +{ + /* free descriptors not processed yet by hardware */ + mtk_cqdma_free_inactive_desc(c); + + /* free descriptors being processed by hardware */ + mtk_cqdma_free_active_desc(c); + + return 0; +} + +static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c) +{ + struct mtk_cqdma_device *cqdma = to_cqdma_dev(c); + struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c); + struct mtk_cqdma_pchan *pc = NULL; + u32 i, min_refcnt = U32_MAX, refcnt; + unsigned long flags; + + /* allocate PC with the minimun refcount */ + for (i = 0; i < cqdma->dma_channels; ++i) { + refcnt = refcount_read(&cqdma->pc[i]->refcnt); + if (refcnt < min_refcnt) { + pc = cqdma->pc[i]; + min_refcnt = refcnt; + } + } + + if (!pc) + return -ENOSPC; + + spin_lock_irqsave(&pc->lock, flags); + + if (!refcount_read(&pc->refcnt)) { + /* allocate PC when the refcount is zero */ + mtk_cqdma_hard_reset(pc); + + /* enable interrupt for this PC */ + mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); + + /* + * refcount_inc would complain increment on 0; use-after-free. + * Thus, we need to explicitly set it as 1 initially. + */ + refcount_set(&pc->refcnt, 1); + } else { + refcount_inc(&pc->refcnt); + } + + spin_unlock_irqrestore(&pc->lock, flags); + + vc->pc = pc; + + return 0; +} + +static void mtk_cqdma_free_chan_resources(struct dma_chan *c) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + unsigned long flags; + + /* free all descriptors in all lists on the VC */ + mtk_cqdma_terminate_all(c); + + spin_lock_irqsave(&cvc->pc->lock, flags); + + /* PC is not freed until there is no VC mapped to it */ + if (refcount_dec_and_test(&cvc->pc->refcnt)) { + /* start the flush operation and stop the engine */ + mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); + + /* wait for the completion of flush operation */ + if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0) + dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); + + /* clear the flush bit and interrupt flag */ + mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); + mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG, + MTK_CQDMA_INT_FLAG_BIT); + + /* disable interrupt for this PC */ + mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); + } + + spin_unlock_irqrestore(&cvc->pc->lock, flags); +} + +static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma) +{ + unsigned long flags; + int err; + u32 i; + + pm_runtime_enable(cqdma2dev(cqdma)); + pm_runtime_get_sync(cqdma2dev(cqdma)); + + err = clk_prepare_enable(cqdma->clk); + + if (err) { + pm_runtime_put_sync(cqdma2dev(cqdma)); + pm_runtime_disable(cqdma2dev(cqdma)); + return err; + } + + /* reset all PCs */ + for (i = 0; i < cqdma->dma_channels; ++i) { + spin_lock_irqsave(&cqdma->pc[i]->lock, flags); + if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) { + dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + + clk_disable_unprepare(cqdma->clk); + pm_runtime_put_sync(cqdma2dev(cqdma)); + pm_runtime_disable(cqdma2dev(cqdma)); + return -EINVAL; + } + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + } + + return 0; +} + +static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma) +{ + unsigned long flags; + u32 i; + + /* reset all PCs */ + for (i = 0; i < cqdma->dma_channels; ++i) { + spin_lock_irqsave(&cqdma->pc[i]->lock, flags); + if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) + dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + } + + clk_disable_unprepare(cqdma->clk); + + pm_runtime_put_sync(cqdma2dev(cqdma)); + pm_runtime_disable(cqdma2dev(cqdma)); +} + +static const struct of_device_id mtk_cqdma_match[] = { + { .compatible = "mediatek,mt6765-cqdma" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_cqdma_match); + +static int mtk_cqdma_probe(struct platform_device *pdev) +{ + struct mtk_cqdma_device *cqdma; + struct mtk_cqdma_vchan *vc; + struct dma_device *dd; + struct resource *res; + int err; + u32 i; + + cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL); + if (!cqdma) + return -ENOMEM; + + dd = &cqdma->ddev; + + cqdma->clk = devm_clk_get(&pdev->dev, "cqdma"); + if (IS_ERR(cqdma->clk)) { + dev_err(&pdev->dev, "No clock for %s\n", + dev_name(&pdev->dev)); + return PTR_ERR(cqdma->clk); + } + + dma_cap_set(DMA_MEMCPY, dd->cap_mask); + + dd->copy_align = MTK_CQDMA_ALIGN_SIZE; + dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources; + dd->device_free_chan_resources = mtk_cqdma_free_chan_resources; + dd->device_tx_status = mtk_cqdma_tx_status; + dd->device_issue_pending = mtk_cqdma_issue_pending; + dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy; + dd->device_terminate_all = mtk_cqdma_terminate_all; + dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; + dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; + dd->directions = BIT(DMA_MEM_TO_MEM); + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + dd->dev = &pdev->dev; + INIT_LIST_HEAD(&dd->channels); + + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-requests", + &cqdma->dma_requests)) { + dev_info(&pdev->dev, + "Using %u as missing dma-requests property\n", + MTK_CQDMA_NR_VCHANS); + + cqdma->dma_requests = MTK_CQDMA_NR_VCHANS; + } + + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-channels", + &cqdma->dma_channels)) { + dev_info(&pdev->dev, + "Using %u as missing dma-channels property\n", + MTK_CQDMA_NR_PCHANS); + + cqdma->dma_channels = MTK_CQDMA_NR_PCHANS; + } + + cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, + sizeof(*cqdma->pc), GFP_KERNEL); + if (!cqdma->pc) + return -ENOMEM; + + /* initialization for PCs */ + for (i = 0; i < cqdma->dma_channels; ++i) { + cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, + sizeof(**cqdma->pc), GFP_KERNEL); + if (!cqdma->pc[i]) + return -ENOMEM; + + INIT_LIST_HEAD(&cqdma->pc[i]->queue); + spin_lock_init(&cqdma->pc[i]->lock); + refcount_set(&cqdma->pc[i]->refcnt, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) { + dev_err(&pdev->dev, "No mem resource for %s\n", + dev_name(&pdev->dev)); + return -EINVAL; + } + + cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cqdma->pc[i]->base)) + return PTR_ERR(cqdma->pc[i]->base); + + /* allocate IRQ resource */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!res) { + dev_err(&pdev->dev, "No irq resource for %s\n", + dev_name(&pdev->dev)); + return -EINVAL; + } + cqdma->pc[i]->irq = res->start; + + err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, + mtk_cqdma_irq, 0, dev_name(&pdev->dev), + cqdma); + if (err) { + dev_err(&pdev->dev, + "request_irq failed with err %d\n", err); + return -EINVAL; + } + } + + /* allocate resource for VCs */ + cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests, + sizeof(*cqdma->vc), GFP_KERNEL); + if (!cqdma->vc) + return -ENOMEM; + + for (i = 0; i < cqdma->dma_requests; i++) { + vc = &cqdma->vc[i]; + vc->vc.desc_free = mtk_cqdma_vdesc_free; + vchan_init(&vc->vc, dd); + init_completion(&vc->issue_completion); + } + + err = dma_async_device_register(dd); + if (err) + return err; + + err = of_dma_controller_register(pdev->dev.of_node, + of_dma_xlate_by_chan_id, cqdma); + if (err) { + dev_err(&pdev->dev, + "MediaTek CQDMA OF registration failed %d\n", err); + goto err_unregister; + } + + err = mtk_cqdma_hw_init(cqdma); + if (err) { + dev_err(&pdev->dev, + "MediaTek CQDMA HW initialization failed %d\n", err); + goto err_unregister; + } + + platform_set_drvdata(pdev, cqdma); + + /* initialize tasklet for each PC */ + for (i = 0; i < cqdma->dma_channels; ++i) + tasklet_init(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb, + (unsigned long)cqdma->pc[i]); + + dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n"); + + return 0; + +err_unregister: + dma_async_device_unregister(dd); + + return err; +} + +static int mtk_cqdma_remove(struct platform_device *pdev) +{ + struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev); + struct mtk_cqdma_vchan *vc; + unsigned long flags; + int i; + + /* kill VC task */ + for (i = 0; i < cqdma->dma_requests; i++) { + vc = &cqdma->vc[i]; + + list_del(&vc->vc.chan.device_node); + tasklet_kill(&vc->vc.task); + } + + /* disable interrupt */ + for (i = 0; i < cqdma->dma_channels; i++) { + spin_lock_irqsave(&cqdma->pc[i]->lock, flags); + mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN, + MTK_CQDMA_INT_EN_BIT); + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + + /* Waits for any pending IRQ handlers to complete */ + synchronize_irq(cqdma->pc[i]->irq); + + tasklet_kill(&cqdma->pc[i]->tasklet); + } + + /* disable hardware */ + mtk_cqdma_hw_deinit(cqdma); + + dma_async_device_unregister(&cqdma->ddev); + of_dma_controller_free(pdev->dev.of_node); + + return 0; +} + +static struct platform_driver mtk_cqdma_driver = { + .probe = mtk_cqdma_probe, + .remove = mtk_cqdma_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = mtk_cqdma_match, + }, +}; +module_platform_driver(mtk_cqdma_driver); + +MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver"); +MODULE_AUTHOR("Shun-Chih Yu <shun-chih.yu@mediatek.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index adfd316db1a8..6a91e28d537d 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -676,7 +676,7 @@ static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) } /* DEBUGFS CODE */ -static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) +static int mic_dma_reg_show(struct seq_file *s, void *pos) { struct mic_dma_device *mic_dma_dev = s->private; int i, chan_num, first_chan = mic_dma_dev->start_ch; @@ -707,23 +707,7 @@ static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) return 0; } -static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, mic_dma_reg_seq_show, inode->i_private); -} - -static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) -{ - return single_release(inode, file); -} - -static const struct file_operations mic_dma_reg_ops = { - .owner = THIS_MODULE, - .open = mic_dma_reg_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = mic_dma_reg_debug_release -}; +DEFINE_SHOW_ATTRIBUTE(mic_dma_reg); /* Debugfs parent dir */ static struct dentry *mic_dma_dbg; @@ -747,7 +731,7 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev) if (mic_dma_dev->dbg_dir) debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, mic_dma_dev, - &mic_dma_reg_ops); + &mic_dma_reg_fops); } return 0; } diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index eb3a1f42ab06..334bab92d26d 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -96,6 +96,7 @@ struct mmp_pdma_chan { struct dma_async_tx_descriptor desc; struct mmp_pdma_phy *phy; enum dma_transfer_direction dir; + struct dma_slave_config slave_config; struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel * is in cyclic mode */ @@ -140,6 +141,10 @@ struct mmp_pdma_device { #define to_mmp_pdma_dev(dmadev) \ container_of(dmadev, struct mmp_pdma_device, device) +static int mmp_pdma_config_write(struct dma_chan *dchan, + struct dma_slave_config *cfg, + enum dma_transfer_direction direction); + static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) { u32 reg = (phy->idx << 4) + DDADR; @@ -537,6 +542,8 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, chan->byte_align = false; + mmp_pdma_config_write(dchan, &chan->slave_config, dir); + for_each_sg(sgl, sg, sg_len, i) { addr = sg_dma_address(sg); avail = sg_dma_len(sgl); @@ -619,6 +626,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, return NULL; chan = to_mmp_pdma_chan(dchan); + mmp_pdma_config_write(dchan, &chan->slave_config, direction); switch (direction) { case DMA_MEM_TO_DEV: @@ -684,8 +692,9 @@ fail: return NULL; } -static int mmp_pdma_config(struct dma_chan *dchan, - struct dma_slave_config *cfg) +static int mmp_pdma_config_write(struct dma_chan *dchan, + struct dma_slave_config *cfg, + enum dma_transfer_direction direction) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); u32 maxburst = 0, addr = 0; @@ -694,12 +703,12 @@ static int mmp_pdma_config(struct dma_chan *dchan, if (!dchan) return -EINVAL; - if (cfg->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; maxburst = cfg->src_maxburst; width = cfg->src_addr_width; addr = cfg->src_addr; - } else if (cfg->direction == DMA_MEM_TO_DEV) { + } else if (direction == DMA_MEM_TO_DEV) { chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; maxburst = cfg->dst_maxburst; width = cfg->dst_addr_width; @@ -720,7 +729,7 @@ static int mmp_pdma_config(struct dma_chan *dchan, else if (maxburst == 32) chan->dcmd |= DCMD_BURST32; - chan->dir = cfg->direction; + chan->dir = direction; chan->dev_addr = addr; /* FIXME: drivers should be ported over to use the filter * function. Once that's done, the following two lines can @@ -732,6 +741,15 @@ static int mmp_pdma_config(struct dma_chan *dchan, return 0; } +static int mmp_pdma_config(struct dma_chan *dchan, + struct dma_slave_config *cfg) +{ + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); + + memcpy(&chan->slave_config, cfg, sizeof(*cfg)); + return 0; +} + static int mmp_pdma_terminate_all(struct dma_chan *dchan) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 88750a34e859..cff1b143fff5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -448,6 +448,7 @@ struct dma_pl330_chan { /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ dma_addr_t fifo_dma; enum dma_data_direction dir; + struct dma_slave_config slave_config; /* for cyclic capability */ bool cyclic; @@ -542,6 +543,10 @@ struct _xfer_spec { struct dma_pl330_desc *desc; }; +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction); + static inline bool _queue_full(struct pl330_thread *thrd) { return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; @@ -2220,20 +2225,21 @@ static int fixup_burst_len(int max_burst_len, int quirks) return max_burst_len; } -static int pl330_config(struct dma_chan *chan, - struct dma_slave_config *slave_config) +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction) { struct dma_pl330_chan *pch = to_pchan(chan); pl330_unprep_slave_fifo(pch); - if (slave_config->direction == DMA_MEM_TO_DEV) { + if (direction == DMA_MEM_TO_DEV) { if (slave_config->dst_addr) pch->fifo_addr = slave_config->dst_addr; if (slave_config->dst_addr_width) pch->burst_sz = __ffs(slave_config->dst_addr_width); pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, pch->dmac->quirks); - } else if (slave_config->direction == DMA_DEV_TO_MEM) { + } else if (direction == DMA_DEV_TO_MEM) { if (slave_config->src_addr) pch->fifo_addr = slave_config->src_addr; if (slave_config->src_addr_width) @@ -2245,6 +2251,16 @@ static int pl330_config(struct dma_chan *chan, return 0; } +static int pl330_config(struct dma_chan *chan, + struct dma_slave_config *slave_config) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + + memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); + + return 0; +} + static int pl330_terminate_all(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); @@ -2661,6 +2677,8 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( return NULL; } + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; @@ -2815,6 +2833,8 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(!pch || !sgl || !sg_len)) return NULL; + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 825725057e00..e78fe98b5cf1 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -189,7 +189,7 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param); #include <linux/uaccess.h> #include <linux/seq_file.h> -static int dbg_show_requester_chan(struct seq_file *s, void *p) +static int requester_chan_show(struct seq_file *s, void *p) { struct pxad_phy *phy = s->private; int i; @@ -220,7 +220,7 @@ static int is_phys_valid(unsigned long addr) #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") -static int dbg_show_descriptors(struct seq_file *s, void *p) +static int descriptors_show(struct seq_file *s, void *p) { struct pxad_phy *phy = s->private; int i, max_show = 20, burst, width; @@ -263,7 +263,7 @@ static int dbg_show_descriptors(struct seq_file *s, void *p) return 0; } -static int dbg_show_chan_state(struct seq_file *s, void *p) +static int chan_state_show(struct seq_file *s, void *p) { struct pxad_phy *phy = s->private; u32 dcsr, dcmd; @@ -306,7 +306,7 @@ static int dbg_show_chan_state(struct seq_file *s, void *p) return 0; } -static int dbg_show_state(struct seq_file *s, void *p) +static int state_show(struct seq_file *s, void *p) { struct pxad_device *pdev = s->private; @@ -317,22 +317,10 @@ static int dbg_show_state(struct seq_file *s, void *p) return 0; } -#define DBGFS_FUNC_DECL(name) \ -static int dbg_open_##name(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, dbg_show_##name, inode->i_private); \ -} \ -static const struct file_operations dbg_fops_##name = { \ - .open = dbg_open_##name, \ - .llseek = seq_lseek, \ - .read = seq_read, \ - .release = single_release, \ -} - -DBGFS_FUNC_DECL(state); -DBGFS_FUNC_DECL(chan_state); -DBGFS_FUNC_DECL(descriptors); -DBGFS_FUNC_DECL(requester_chan); +DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(chan_state); +DEFINE_SHOW_ATTRIBUTE(descriptors); +DEFINE_SHOW_ATTRIBUTE(requester_chan); static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, int ch, struct dentry *chandir) @@ -348,13 +336,13 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, if (chan) chan_state = debugfs_create_file("state", 0400, chan, dt, - &dbg_fops_chan_state); + &chan_state_fops); if (chan_state) chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, - &dbg_fops_descriptors); + &descriptors_fops); if (chan_descr) chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, - &dbg_fops_requester_chan); + &requester_chan_fops); if (!chan_reqs) goto err_state; @@ -375,7 +363,7 @@ static void pxad_init_debugfs(struct pxad_device *pdev) goto err_root; pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, - pdev, &dbg_fops_state); + pdev, &state_fops); if (!pdev->dbgfs_state) goto err_state; diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index 3bdcb8056a36..9523faf7acdc 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c @@ -85,11 +85,11 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl) } /* - * hidma_chan_stats: display HIDMA channel statistics + * hidma_chan_show: display HIDMA channel statistics * * Display the statistics for the current HIDMA virtual channel device. */ -static int hidma_chan_stats(struct seq_file *s, void *unused) +static int hidma_chan_show(struct seq_file *s, void *unused) { struct hidma_chan *mchan = s->private; struct hidma_desc *mdesc; @@ -117,11 +117,11 @@ static int hidma_chan_stats(struct seq_file *s, void *unused) } /* - * hidma_dma_info: display HIDMA device info + * hidma_dma_show: display HIDMA device info * * Display the info for the current HIDMA device. */ -static int hidma_dma_info(struct seq_file *s, void *unused) +static int hidma_dma_show(struct seq_file *s, void *unused) { struct hidma_dev *dmadev = s->private; resource_size_t sz; @@ -138,29 +138,8 @@ static int hidma_dma_info(struct seq_file *s, void *unused) return 0; } -static int hidma_chan_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, hidma_chan_stats, inode->i_private); -} - -static int hidma_dma_info_open(struct inode *inode, struct file *file) -{ - return single_open(file, hidma_dma_info, inode->i_private); -} - -static const struct file_operations hidma_chan_fops = { - .open = hidma_chan_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static const struct file_operations hidma_dma_fops = { - .open = hidma_dma_info_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(hidma_chan); +DEFINE_SHOW_ATTRIBUTE(hidma_dma); void hidma_debug_uninit(struct hidma_dev *dmadev) { diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5e328bd10c27..907ae97a3ef4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -442,6 +442,7 @@ struct d40_base; * @queue: Queued jobs. * @prepare_queue: Prepared jobs. * @dma_cfg: The client configuration of this dma channel. + * @slave_config: DMA slave configuration. * @configured: whether the dma_cfg configuration is valid * @base: Pointer to the device instance struct. * @src_def_cfg: Default cfg register setting for src. @@ -468,6 +469,7 @@ struct d40_chan { struct list_head queue; struct list_head prepare_queue; struct stedma40_chan_cfg dma_cfg; + struct dma_slave_config slave_config; bool configured; struct d40_base *base; /* Default register configurations */ @@ -625,6 +627,10 @@ static void __iomem *chan_base(struct d40_chan *chan) #define chan_err(d40c, format, arg...) \ d40_err(chan2dev(d40c), format, ## arg) +static int d40_set_runtime_config_write(struct dma_chan *chan, + struct dma_slave_config *config, + enum dma_transfer_direction direction); + static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, int lli_len) { @@ -2216,6 +2222,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, return NULL; } + d40_set_runtime_config_write(dchan, &chan->slave_config, direction); + spin_lock_irqsave(&chan->lock, flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); @@ -2634,11 +2642,22 @@ dma40_config_to_halfchannel(struct d40_chan *d40c, return 0; } -/* Runtime reconfiguration extension */ static int d40_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + + memcpy(&d40c->slave_config, config, sizeof(*config)); + + return 0; +} + +/* Runtime reconfiguration extension */ +static int d40_set_runtime_config_write(struct dma_chan *chan, + struct dma_slave_config *config, + enum dma_transfer_direction direction) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; enum dma_slave_buswidth src_addr_width, dst_addr_width; dma_addr_t config_addr; @@ -2655,7 +2674,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_addr_width = config->dst_addr_width; dst_maxburst = config->dst_maxburst; - if (config->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { config_addr = config->src_addr; if (cfg->dir != DMA_DEV_TO_MEM) @@ -2671,7 +2690,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (dst_maxburst == 0) dst_maxburst = src_maxburst; - } else if (config->direction == DMA_MEM_TO_DEV) { + } else if (direction == DMA_MEM_TO_DEV) { config_addr = config->dst_addr; if (cfg->dir != DMA_MEM_TO_DEV) @@ -2689,7 +2708,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, } else { dev_err(d40c->base->dev, "unrecognized channel direction %d\n", - config->direction); + direction); return -EINVAL; } @@ -2746,12 +2765,12 @@ static int d40_set_runtime_config(struct dma_chan *chan, /* These settings will take precedence later */ d40c->runtime_addr = config_addr; - d40c->runtime_direction = config->direction; + d40c->runtime_direction = direction; dev_dbg(d40c->base->dev, "configured channel %s for %s, data width %d/%d, " "maxburst %d/%d elements, LE, no flow control\n", dma_chan_name(chan), - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", + (direction == DMA_DEV_TO_MEM) ? "RX" : "TX", src_addr_width, dst_addr_width, src_maxburst, dst_maxburst); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index c74a88b65039..6f26b59a7216 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct zynqmp_dma_chan *chan = to_chan(tx->chan); struct zynqmp_dma_desc_sw *desc, *new; dma_cookie_t cookie; + unsigned long irqflags; new = tx_to_desc(tx); - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); cookie = dma_cookie_assign(tx); if (!list_empty(&chan->pending_list)) { @@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) } list_add_tail(&new->node, &chan->pending_list); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); return cookie; } @@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw * zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc; + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); desc = list_first_entry(&chan->free_list, struct zynqmp_dma_desc_sw, node); list_del(&desc->node); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); INIT_LIST_HEAD(&desc->tx_list); /* Clear the src and dst descriptor memory */ @@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) static void zynqmp_dma_issue_pending(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_start_transfer(chan); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); } /** @@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_free_descriptors(chan); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); dma_free_coherent(chan->dev, (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); @@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data) { struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; u32 count; + unsigned long irqflags; - spin_lock(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); if (chan->err) { zynqmp_dma_reset(chan); @@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data) zynqmp_dma_start_transfer(chan); unlock: - spin_unlock(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); } /** @@ -776,11 +781,12 @@ unlock: static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); zynqmp_dma_free_descriptors(chan); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); return 0; } @@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( void *desc = NULL, *prev = NULL; size_t copy; u32 desc_cnt; + unsigned long irqflags; chan = to_chan(dchan); desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); if (desc_cnt > chan->desc_free_cnt) { - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); dev_dbg(chan->dev, "chan %p descs are not available\n", chan); return NULL; } chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); do { /* Allocate and populate the descriptor */ |