aboutsummaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
authorPaul Cercueil2021-05-23 18:04:13 +0100
committerPaul Cercueil2021-05-25 11:41:07 +0100
commitcf8ccbc72d6109eddf6ae04196addf62cf716639 (patch)
treee1caa0c6dfe844c338b6f057a7ff6c967a7b2a12 /include/drm
parent5e4322a8b266bc9f5ee7ea4895f661c01dbd7cb3 (diff)
drm: Add support for GEM buffers backed by non-coherent memory
Having GEM buffers backed by non-coherent memory is interesting in the particular case where it is faster to render to a non-coherent buffer then sync the data cache, than to render to a write-combine buffer, and (by extension) much faster than using a shadow buffer. This is true for instance on some Ingenic SoCs, where even simple blits (e.g. memcpy) are about three times faster using this method. Add a 'map_noncoherent' flag to the drm_gem_cma_object structure, which can be set by the drivers when they create the dumb buffer. Since this really only applies to software rendering, disable this flag as soon as the CMA objects are exported via PRIME. v3: New patch. Now uses a simple 'map_noncoherent' flag to control how the objects are mapped, and use the new dma_mmap_pages function. v4: Make sure map_noncoherent is always disabled when creating GEM objects meant to be used with dma-buf. Signed-off-by: Paul Cercueil <paul@crapouillou.net> Acked-by: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20210523170415.90410-2-paul@crapouillou.net
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/drm_gem_cma_helper.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 0a9711caa3e8..cd13508acbc1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -16,6 +16,7 @@ struct drm_mode_create_dumb;
* more than one entry but they are guaranteed to have contiguous
* DMA addresses.
* @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
*/
struct drm_gem_cma_object {
struct drm_gem_object base;
@@ -24,6 +25,8 @@ struct drm_gem_cma_object {
/* For objects with DMA memory allocated by GEM CMA */
void *vaddr;
+
+ bool map_noncoherent;
};
#define to_drm_gem_cma_obj(gem_obj) \