diff options
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 8 | ||||
-rw-r--r-- | include/linux/swiotlb.h | 8 | ||||
-rw-r--r-- | kernel/dma/direct.c | 2 | ||||
-rw-r--r-- | kernel/dma/swiotlb.c | 34 |
4 files changed, 32 insertions, 20 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index ae1df496bf38..adcabd9473eb 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, */ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, - attrs); + map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, + size, size, dir, attrs); if (map == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; @@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * Ensure that the address returned is DMA'ble */ if (unlikely(!dma_capable(dev, dev_addr, size))) { - swiotlb_tbl_unmap_single(dev, map, size, dir, + swiotlb_tbl_unmap_single(dev, map, size, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } @@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) - swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); + swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); } static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 361f62bb4a8e..cde3dc18e21a 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -46,13 +46,17 @@ enum dma_sync_target { extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t phys, size_t size, + phys_addr_t phys, + size_t mapping_size, + size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, + size_t mapping_size, + size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_sync_single(struct device *hwdev, diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 795c9b095d75..a7f2a0163426 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -297,7 +297,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, dma_direct_sync_single_for_cpu(dev, addr, size, dir); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); } EXPORT_SYMBOL(dma_direct_unmap_page); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 9de232229063..796a44f8ef5a 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t orig_addr, size_t size, + phys_addr_t orig_addr, + size_t mapping_size, + size_t alloc_size, enum dma_data_direction dir, unsigned long attrs) { @@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, pr_warn_once("%s is active and system is using DMA bounce buffers\n", sme_active() ? "SME" : "SEV"); + if (mapping_size > alloc_size) { + dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", + mapping_size, alloc_size); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; @@ -471,8 +479,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* - * Carefully handle integer overflow which can occur when mask == ~0UL. - */ + * Carefully handle integer overflow which can occur when mask == ~0UL. + */ max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); @@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, * For mappings greater than or equal to a page, we limit the stride * (and hence alignment) to a page size. */ - nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size >= PAGE_SIZE) + nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + if (alloc_size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; @@ -547,7 +555,7 @@ not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - size, io_tlb_nslabs, tmp_io_tlb_used); + alloc_size, io_tlb_nslabs, tmp_io_tlb_used); return (phys_addr_t)DMA_MAPPING_ERROR; found: io_tlb_used += nslots; @@ -562,7 +570,7 @@ found: io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } @@ -571,11 +579,11 @@ found: * tlb_addr is the physical address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) + size_t mapping_size, size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; - int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; @@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, if (orig_addr != INVALID_PHYS_ADDR && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding @@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr, /* Oh well, have to allocate and map a bounce buffer. */ *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), - *phys, size, dir, attrs); + *phys, size, size, dir, attrs); if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) return false; /* Ensure that the address returned is DMA'ble */ *dma_addr = __phys_to_dma(dev, *phys); if (unlikely(!dma_capable(dev, *dma_addr, size))) { - swiotlb_tbl_unmap_single(dev, *phys, size, dir, + swiotlb_tbl_unmap_single(dev, *phys, size, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return false; } |