aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds2020-04-04 10:12:47 -0700
committerLinus Torvalds2020-04-04 10:12:47 -0700
commit6f43bae38269a55534e1f86a9917318167de6639 (patch)
treee3275f7ec8fbe39e679be847fe5dbd6486d12431 /arch/arm
parent1e396a5d171d61aa00d49389d92f8afb21568635 (diff)
parentfd27a526bb381f43dded6db30b3b016468ab0e6c (diff)
Merge tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - fix an integer overflow in the coherent pool (Kevin Grandemange) - provide support for in-place uncached remapping and use that for openrisc - fix the arm coherent allocator to take the bus limit into account * tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping: ARM/dma-mapping: merge __dma_supported into arm_dma_supported ARM/dma-mapping: take the bus limit into account in __dma_alloc ARM/dma-mapping: remove get_coherent_dma_mask openrisc: use the generic in-place uncached DMA allocator dma-direct: provide a arch_dma_clear_uncached hook dma-direct: make uncached_kernel_address more general dma-direct: consolidate the error handling in dma_direct_alloc_pages dma-direct: remove the cached_kernel_address hook dma-coherent: fix integer overflow in the reserved-memory dma allocation
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/mm/dma-mapping.c76
2 files changed, 18 insertions, 60 deletions
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 772f48ef84b7..86405cc81385 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
-int arm_dma_supported(struct device *dev, u64 mask);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 9414d72f664b..8a8949174b1c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+static int arm_dma_supported(struct device *dev, u64 mask)
+{
+ unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ return dma_to_pfn(dev, mask) >= max_dma_pfn;
+}
+
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -219,49 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
-static int __dma_supported(struct device *dev, u64 mask, bool warn)
-{
- unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
-
- /*
- * Translate the device's DMA mask to a PFN limit. This
- * PFN number includes the page which we can DMA to.
- */
- if (dma_to_pfn(dev, mask) < max_dma_pfn) {
- if (warn)
- dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
- mask,
- dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
- max_dma_pfn + 1);
- return 0;
- }
-
- return 1;
-}
-
-static u64 get_coherent_dma_mask(struct device *dev)
-{
- u64 mask = (u64)DMA_BIT_MASK(32);
-
- if (dev) {
- mask = dev->coherent_dma_mask;
-
- /*
- * Sanity check the DMA mask - it must be non-zero, and
- * must be able to be satisfied by a DMA allocation.
- */
- if (mask == 0) {
- dev_warn(dev, "coherent DMA mask is unset\n");
- return 0;
- }
-
- if (!__dma_supported(dev, mask, true))
- return 0;
- }
-
- return mask;
-}
-
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
@@ -688,7 +662,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
unsigned long attrs, const void *caller)
{
- u64 mask = get_coherent_dma_mask(dev);
+ u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
struct page *page = NULL;
void *addr;
bool allowblock, cma;
@@ -712,9 +686,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
#endif
- if (!mask)
- return NULL;
-
buf = kzalloc(sizeof(*buf),
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
if (!buf)
@@ -1087,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
dir);
}
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- */
-int arm_dma_supported(struct device *dev, u64 mask)
-{
- return __dma_supported(dev, mask, false);
-}
-
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
/*