diff options
author | Jacek Lawrynowicz | 2024-01-15 14:44:32 +0100 |
---|---|---|
committer | Jacek Lawrynowicz | 2024-01-22 10:28:52 +0100 |
commit | b7a0e75632eb6568c82de9108bd29e130dd082d9 (patch) | |
tree | 6fb429290e3d552458b56118c69fdab7e4027922 /drivers/accel | |
parent | a8c099d5d0e4b0400cfddfd95b881c8bd9349a88 (diff) |
accel/ivpu: Disable buffer sharing among VPU contexts
This was not supported properly. A buffer was imported to another VPU
context as a separate buffer object with duplicated sgt.
Both exported and imported buffers could be DMA mapped causing a double
mapping on the same device.
Buffers imported from another VPU context will now just increase
reference count, leaving only a single sgt, fixing the problem above.
Buffers still can't be shared among VPU contexts because each has its
own MMU mapping and ivpu_bo only supports single MMU mappings.
The solution would be to use a mapping list as in panfrost or etnaviv
drivers and it will be implemented in future if required.
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240115134434.493839-8-jacek.lawrynowicz@linux.intel.com
Diffstat (limited to 'drivers/accel')
-rw-r--r-- | drivers/accel/ivpu/ivpu_gem.c | 44 |
1 files changed, 6 insertions, 38 deletions
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 4de454bfbf91..95e731e13941 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -222,6 +222,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file) struct ivpu_bo *bo = to_ivpu_bo(obj); struct ivpu_addr_range *range; + if (bo->ctx) { + ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n", + file_priv->ctx.id, bo->ctx->id); + return -EALREADY; + } + if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) range = &vdev->hw->ranges.shave; else if (bo->flags & DRM_IVPU_BO_DMA_MEM) @@ -252,47 +258,9 @@ static void ivpu_bo_free(struct drm_gem_object *obj) drm_gem_shmem_free(&bo->base); } -static const struct dma_buf_ops ivpu_bo_dmabuf_ops = { - .cache_sgt_mapping = true, - .attach = drm_gem_map_attach, - .detach = drm_gem_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, - .release = drm_gem_dmabuf_release, - .mmap = drm_gem_dmabuf_mmap, - .vmap = drm_gem_dmabuf_vmap, - .vunmap = drm_gem_dmabuf_vunmap, -}; - -static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags) -{ - struct drm_device *dev = obj->dev; - struct dma_buf_export_info exp_info = { - .exp_name = KBUILD_MODNAME, - .owner = dev->driver->fops->owner, - .ops = &ivpu_bo_dmabuf_ops, - .size = obj->size, - .flags = flags, - .priv = obj, - .resv = obj->resv, - }; - void *sgt; - - /* - * Make sure that pages are allocated and dma-mapped before exporting the bo. - * DMA-mapping is required if the bo will be imported to the same device. - */ - sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj)); - if (IS_ERR(sgt)) - return sgt; - - return drm_gem_dmabuf_export(dev, &exp_info); -} - static const struct drm_gem_object_funcs ivpu_gem_funcs = { .free = ivpu_bo_free, .open = ivpu_bo_open, - .export = ivpu_bo_export, .print_info = drm_gem_shmem_object_print_info, .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, |