aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorDavid Stevens2021-09-29 11:33:00 +0900
committerJoerg Roedel2021-09-29 12:50:42 +0200
commit2cbc61a1b1665c84282dbf2b1747ffa0b6248639 (patch)
treeed387add6bbcd2a61edef807e0e7e2daeb63c476 /drivers/iommu
parente81e99bacc9f9347bda7808a949c1ce9fcc2bbf4 (diff)
iommu/dma: Account for min_align_mask w/swiotlb
Pass the non-aligned size to __iommu_dma_map when using swiotlb bounce buffers in iommu_dma_map_page, to account for min_align_mask. To deal with granule alignment, __iommu_dma_map maps iova_align(size + iova_off) bytes starting at phys - iova_off. If iommu_dma_map_page passes aligned size when using swiotlb, then this becomes iova_align(iova_align(orig_size) + iova_off). Normally iova_off will be zero when using swiotlb. However, this is not the case for devices that set min_align_mask. When iova_off is non-zero, __iommu_dma_map ends up mapping an extra page at the end of the buffer. Beyond just being a security issue, the extra page is not cleaned up by __iommu_dma_unmap. This causes problems when the IOVA is reused, due to collisions in the iommu driver. Just passing the original size is sufficient, since __iommu_dma_map will take care of granule alignment. Fixes: 1f221a0d0dbf ("swiotlb: respect min_align_mask") Signed-off-by: David Stevens <stevensd@chromium.org> Link: https://lore.kernel.org/r/20210929023300.335969-8-stevensd@google.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dma-iommu.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 289c49ead01a..342359727a59 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -806,7 +806,6 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- size_t aligned_size = size;
dma_addr_t iova, dma_mask = dma_get_mask(dev);
/*
@@ -815,7 +814,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
*/
if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
void *padding_start;
- size_t padding_size;
+ size_t padding_size, aligned_size;
aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
@@ -840,7 +839,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
- iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
+ iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
return iova;