aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/mm/dma-mapping.c10
-rw-r--r--kernel/dma/swiotlb.c40
-rw-r--r--lib/sg_pool.c16
5 files changed, 38 insertions, 46 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 4157710d6f8d..7547ffce5032 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6171,6 +6171,7 @@ F: include/asm-generic/dma-mapping.h
F: include/linux/dma-direct.h
F: include/linux/dma-mapping.h
F: include/linux/dma-map-ops.h
+F: include/linux/swiotlb.h
F: kernel/dma/
DMA MAPPING BENCHMARK
@@ -19749,16 +19750,6 @@ S: Maintained
F: Documentation/admin-guide/svga.rst
F: arch/x86/boot/video*
-SWIOTLB SUBSYSTEM
-M: Christoph Hellwig <hch@infradead.org>
-L: iommu@lists.linux.dev
-S: Supported
-W: http://git.infradead.org/users/hch/dma-mapping.git
-T: git git://git.infradead.org/users/hch/dma-mapping.git
-F: arch/*/kernel/pci-swiotlb.c
-F: include/linux/swiotlb.h
-F: kernel/dma/swiotlb.c
-
SWITCHDEV
M: Jiri Pirko <jiri@resnulli.us>
M: Ivan Vecera <ivecera@redhat.com>
@@ -22475,8 +22466,10 @@ M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: iommu@lists.linux.dev
S: Supported
-F: arch/x86/xen/*swiotlb*
-F: drivers/xen/*swiotlb*
+F: arch/*/include/asm/xen/swiotlb-xen.h
+F: drivers/xen/swiotlb-xen.c
+F: include/xen/arm/swiotlb-xen.h
+F: include/xen/swiotlb-xen.h
XFS FILESYSTEM
C: irc://irc.oftc.net/xfs
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 8754c0f5fc90..c6beb1708c64 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -9,7 +9,6 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
- unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ef691a5720d2..d7909091cf97 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1769,8 +1769,14 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- dev->archdata.dma_coherent = coherent;
- dev->dma_coherent = coherent;
+ /*
+ * Due to legacy code that sets the ->dma_coherent flag from a bus
+ * notifier we can't just assign coherent to the ->dma_coherent flag
+ * here, but instead have to make sure we only set but never clear it
+ * for now.
+ */
+ if (coherent)
+ dev->dma_coherent = true;
/*
* Don't override the dma_ops if they have already been set. Ideally
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 0ef6b12f961d..339a990554e7 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -346,22 +346,27 @@ retry:
memblock_free(tlb, PAGE_ALIGN(bytes));
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
- if (nslabs < IO_TLB_MIN_SLABS)
- panic("%s: Failed to remap %zu bytes\n",
- __func__, bytes);
- goto retry;
+ if (nslabs >= IO_TLB_MIN_SLABS)
+ goto retry;
+
+ pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
+ return;
}
alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
- if (!mem->slots)
- panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
- __func__, alloc_size, PAGE_SIZE);
+ if (!mem->slots) {
+ pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
+ __func__, alloc_size, PAGE_SIZE);
+ return;
+ }
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
default_nareas), SMP_CACHE_BYTES);
- if (!mem->areas)
- panic("%s: Failed to allocate mem->areas.\n", __func__);
+ if (!mem->areas) {
+ pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
+ return;
+ }
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
default_nareas);
@@ -545,9 +550,8 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
}
if (PageHighMem(pfn_to_page(pfn))) {
- /* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK;
- char *buffer;
+ struct page *page;
unsigned int sz = 0;
unsigned long flags;
@@ -555,12 +559,11 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
- buffer = kmap_atomic(pfn_to_page(pfn));
+ page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
- memcpy(vaddr, buffer + offset, sz);
+ memcpy_from_page(vaddr, page, offset, sz);
else
- memcpy(buffer + offset, vaddr, sz);
- kunmap_atomic(buffer);
+ memcpy_to_page(page, offset, vaddr, sz);
local_irq_restore(flags);
size -= sz;
@@ -731,8 +734,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
int index;
phys_addr_t tlb_addr;
- if (!mem || !mem->nslabs)
- panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+ if (!mem || !mem->nslabs) {
+ dev_warn_ratelimited(dev,
+ "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index a0b1a52cd6f7..9bfe60ca3f37 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/mempool.h>
#include <linux/slab.h>
@@ -177,16 +177,4 @@ cleanup_sdb:
return -ENOMEM;
}
-static __exit void sg_pool_exit(void)
-{
- int i;
-
- for (i = 0; i < SG_MEMPOOL_NR; i++) {
- struct sg_pool *sgp = sg_pools + i;
- mempool_destroy(sgp->pool);
- kmem_cache_destroy(sgp->slab);
- }
-}
-
-module_init(sg_pool_init);
-module_exit(sg_pool_exit);
+subsys_initcall(sg_pool_init);