aboutsummaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig2018-12-25 13:29:54 +0100
committerChristoph Hellwig2019-02-20 07:27:00 -0700
commit82c5de0ab8dbd6035223ad69e76bd8a88a0a9399 (patch)
tree8401f73e927f3f7f9e93450f88c895243b41d911 /kernel/dma
parent91a6fda95cb67c94b887355690d1923a7eb6f630 (diff)
dma-mapping: remove the DMA_MEMORY_EXCLUSIVE flag
All users of dma_declare_coherent want their allocations to be exclusive, so default to exclusive allocations. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/coherent.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 1d12a31af6d7..29fd6590dc1e 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -14,7 +14,6 @@ struct dma_coherent_mem {
dma_addr_t device_base;
unsigned long pfn_base;
int size;
- int flags;
unsigned long *bitmap;
spinlock_t spinlock;
bool use_dev_dma_pfn_offset;
@@ -38,9 +37,9 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
return mem->device_base;
}
-static int dma_init_coherent_memory(
- phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
- struct dma_coherent_mem **mem)
+static int dma_init_coherent_memory(phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size,
+ struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
void *mem_base = NULL;
@@ -73,7 +72,6 @@ static int dma_init_coherent_memory(
dma_mem->device_base = device_addr;
dma_mem->pfn_base = PFN_DOWN(phys_addr);
dma_mem->size = pages;
- dma_mem->flags = flags;
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
@@ -110,12 +108,12 @@ static int dma_assign_coherent_memory(struct device *dev,
}
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
+ dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem;
int ret;
- ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
+ ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
if (ret)
return ret;
@@ -190,15 +188,7 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
return 0;
*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
- if (*ret)
- return 1;
-
- /*
- * In the case where the allocation can not be satisfied from the
- * per-device area, try to fall back to generic memory if the
- * constraints allow it.
- */
- return mem->flags & DMA_MEMORY_EXCLUSIVE;
+ return 1;
}
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
@@ -327,8 +317,7 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
if (!mem) {
ret = dma_init_coherent_memory(rmem->base, rmem->base,
- rmem->size,
- DMA_MEMORY_EXCLUSIVE, &mem);
+ rmem->size, &mem);
if (ret) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);