aboutsummaryrefslogtreecommitdiff
path: root/drivers/media/common
diff options
context:
space:
mode:
authorLucas Stach2019-07-08 09:07:42 -0400
committerMauro Carvalho Chehab2019-07-23 11:46:27 -0400
commit596a5a58e89a4bfe0d8a275ab552de990b18f0ac (patch)
tree3446af715a7c8fb4e68c6ea1676772f03ddc2f25 /drivers/media/common
parent81f96ebc8a663869948c197c60f0356f247ac91c (diff)
media: vb2-dc: skip CPU sync in map/unmap dma_buf
This is rougly equivalent to ca0e68e21aae (drm/prime: skip CPU sync in map/unmap dma_buf). The contig memory allocated is already device coherent memory, so there is no point in doing a CPU sync when mapping it to another device. Also most importers currently cache the mapping so the CPU sync would only happen on the first import, so we are better off with not pretending to do a cache synchronization at all. This gets rid of a lot of CPU overhead in uses where those dma-bufs are regularily imported and detached again, like Weston is currently doing in the DRM compositor. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> [hverkuil-cisco@xs4all.nl: fix checkpatch warnings] Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/media/common')
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 7d77e4d30c8a..44cd0e530bbd 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -267,8 +267,14 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ /*
+ * Cache sync can be skipped here, as the vb2_dc memory is
+ * allocated from device coherent memory, which means the
+ * memory locations do not require any explicit cache
+ * maintenance prior or after being used by the device.
+ */
+ dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
@@ -293,14 +299,17 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
attach->dma_dir = DMA_NONE;
}
- /* mapping to the client with new direction */
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir);
+ /*
+ * mapping to the client with new direction, no cache sync
+ * required see comment in vb2_dc_dmabuf_ops_detach()
+ */
+ sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);