From 89c815ef07a1db0ac2cc09d06cb2d5c3d86d6322 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 21 Jun 2018 15:21:35 +0200 Subject: drm/ttm: Introduce ttm_bo_get() and ttm_bo_put() for ref counting The TTM buffer-object interface provides ttm_bo_reference() and ttm_bo_unref() for managing reference counts. Replacing them with ttm_bo_get() and ttm_bo_put() aligns the API with conventions used throughout the Linux kernel. The implementation of ttm_bo_unref() clears the supplied pointer to NULL. This leads to workarounds where the caller saves the pointer's value before de-referencing the BO. ttm_bo_put() does not clear the supplied pointer. Signed-off-by: Thomas Zimmermann Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/drm/ttm/ttm_bo_api.h | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c67977aa1a0e..a01ba2032f0e 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -283,18 +283,30 @@ struct ttm_operation_ctx { /* when serving page fault or suspend, allow alloc anyway */ #define TTM_OPT_FLAG_FORCE_ALLOC 0x2 +/** + * ttm_bo_get - reference a struct ttm_buffer_object + * + * @bo: The buffer object. + */ +static inline void ttm_bo_get(struct ttm_buffer_object *bo) +{ + kref_get(&bo->kref); +} + /** * ttm_bo_reference - reference a struct ttm_buffer_object * * @bo: The buffer object. * * Returns a refcounted pointer to a buffer object. + * + * This function is deprecated. Use @ttm_bo_get instead. */ static inline struct ttm_buffer_object * ttm_bo_reference(struct ttm_buffer_object *bo) { - kref_get(&bo->kref); + ttm_bo_get(bo); return bo; } @@ -345,12 +357,23 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx); +/** + * ttm_bo_put + * + * @bo: The buffer object. + * + * Unreference a buffer object. + */ +void ttm_bo_put(struct ttm_buffer_object *bo); + /** * ttm_bo_unref * * @bo: The buffer object. * * Unreference and clear a pointer to a buffer object. + * + * This function is deprecated. Use @ttm_bo_put instead. */ void ttm_bo_unref(struct ttm_buffer_object **bo); -- cgit v1.2.3 From 8dc9fbbf274b7b2a647e06141aee70ffabf6dbc0 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Fri, 13 Jul 2018 15:21:13 +0530 Subject: drm/scheduler: add a pointer to scheduler in the rq This patch is in preparation for a better load balancing in scheduler. It allows us to associate entities with the run queues instead of binding them to a scheduler. Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Acked-by: Eric Anholt Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 6 ++++-- include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 7d2560699b84..429b1328653a 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -69,11 +69,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); * * Initializes a scheduler runqueue. */ -static void drm_sched_rq_init(struct drm_sched_rq *rq) +static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, + struct drm_sched_rq *rq) { spin_lock_init(&rq->lock); INIT_LIST_HEAD(&rq->entities); rq->current_entity = NULL; + rq->sched = sched; } /** @@ -926,7 +928,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->timeout = timeout; sched->hang_limit = hang_limit; for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) - drm_sched_rq_init(&sched->sched_rq[i]); + drm_sched_rq_init(sched, &sched->sched_rq[i]); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 4214ceb71c05..43e93d6077cf 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -93,6 +93,7 @@ struct drm_sched_entity { * struct drm_sched_rq - queue of entities to be scheduled. * * @lock: to modify the entities list. + * @sched: the scheduler to which this rq belongs to. * @entities: list of the entities to be scheduled. * @current_entity: the entity which is to be scheduled. * @@ -102,6 +103,7 @@ struct drm_sched_entity { */ struct drm_sched_rq { spinlock_t lock; + struct drm_gpu_scheduler *sched; struct list_head entities; struct drm_sched_entity *current_entity; }; -- cgit v1.2.3 From aa16b6c6b4d979234f830a48add47d02c12bb569 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Fri, 13 Jul 2018 15:21:14 +0530 Subject: drm/scheduler: modify args of drm_sched_entity_init replace run queue by a list of run queues and remove the sched arg as that is part of run queue itself Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Acked-by: Eric Anholt Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 4 ++-- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 8 ++++---- drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 ++++++++++++-------- drivers/gpu/drm/v3d/v3d_drv.c | 7 +++---- include/drm/gpu_scheduler.h | 6 +++--- 11 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0120b24fae1b..83e3b320a793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, if (ring == &adev->gfx.kiq.ring) continue; - r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, - rq, &ctx->guilty); + r = drm_sched_entity_init(&ctx->rings[i].entity, + &rq, 1, &ctx->guilty); if (r) goto failed; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6a3fead5c1f0..11a12483c995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, - rq, NULL); + r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 3e70eb61a960..a6c2cace4b9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ring = &adev->uvd.inst[j].ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, - rq, NULL); + r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq, + 1, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 6ae1ad7e83b3..ffb0fcc9707e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ring = &adev->vce.ring[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, - rq, NULL); + r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0fd0a718763b..484e2c19c027 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&ring->sched, &vm->entity, - rq, NULL); + r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 2623f249cb7a..1c118c02e8cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle) struct drm_sched_rq *rq; ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, - rq, NULL); + r = drm_sched_entity_init(&adev->uvd.inst->entity_enc, + &rq, 1, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index ce360ad16856..d48bc3393545 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle) for (j = 0; j < adev->uvd.num_uvd_inst; j++) { ring = &adev->uvd.inst[j].ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, - rq, NULL); + r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc, + &rq, 1, NULL); if (r) { DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); return r; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 45bfdf4cc107..36414ba56b22 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) for (i = 0; i < ETNA_MAX_PIPES; i++) { struct etnaviv_gpu *gpu = priv->gpu[i]; + struct drm_sched_rq *rq; if (gpu) { - drm_sched_entity_init(&gpu->sched, - &ctx->sched_entity[i], - &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], - NULL); + rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + drm_sched_entity_init(&ctx->sched_entity[i], + &rq, 1, NULL); } } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 429b1328653a..16bf446aa6b3 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) * drm_sched_entity_init - Init a context entity used by scheduler when * submit to HW ring. * - * @sched: scheduler instance * @entity: scheduler entity to init - * @rq: the run queue this entity belongs + * @rq_list: the list of run queue on which jobs from this + * entity can be submitted + * @num_rq_list: number of run queue in rq_list * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * + * Note: the rq_list should have atleast one element to schedule + * the entity + * * Returns 0 on success or a negative error code on failure. */ -int drm_sched_entity_init(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity, - struct drm_sched_rq *rq, +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, atomic_t *guilty) { - if (!(sched && entity && rq)) + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) return -EINVAL; memset(entity, 0, sizeof(struct drm_sched_entity)); INIT_LIST_HEAD(&entity->list); - entity->rq = rq; - entity->sched = sched; + entity->rq = rq_list[0]; + entity->sched = rq_list[0]->sched; entity->guilty = guilty; entity->last_scheduled = NULL; diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 567f7d46d912..1dceba2b42fd 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file) { struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv; + struct drm_sched_rq *rq; int i; v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); @@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file) v3d_priv->v3d = v3d; for (i = 0; i < V3D_MAX_QUEUES; i++) { - drm_sched_entity_init(&v3d->queue[i].sched, - &v3d_priv->sched_entity[i], - &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], - NULL); + rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL); } file->driver_priv = v3d_priv; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 43e93d6077cf..2205e89722f6 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); -int drm_sched_entity_init(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity, - struct drm_sched_rq *rq, +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, atomic_t *guilty); long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, long timeout); -- cgit v1.2.3 From 964d0fbf6301d3dc8dfad19ffab5a06d002d27f1 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 6 Jul 2018 14:16:54 -0400 Subject: drm/amdgpu: Allow to create BO lists in CS ioctl v3 This change is to support MESA performace optimization. Modify CS IOCTL to allow its input as command buffer and an array of buffer handles to create a temporay bo list and then destroy it when IOCTL completes. This saves on calling for BO_LIST create and destry IOCTLs in MESA and by this improves performance. v2: Avoid inserting the temp list into idr struct. v3: Remove idr alloation from amdgpu_bo_list_create. Remove useless argument from amdgpu_cs_parser_fini Minor cosmetic stuff. v4: Revert amdgpu_bo_list_destroy back to static Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 88 +++++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 48 +++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +- include/uapi/drm/amdgpu_drm.h | 1 + 5 files changed, 108 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 53435da158c2..c6c1e8dc919f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -732,6 +732,14 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, struct list_head *validated); void amdgpu_bo_list_put(struct amdgpu_bo_list *list); void amdgpu_bo_list_free(struct amdgpu_bo_list *list); +int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, + struct drm_amdgpu_bo_list_entry **info_param); + +int amdgpu_bo_list_create(struct amdgpu_device *adev, + struct drm_file *filp, + struct drm_amdgpu_bo_list_entry *info, + unsigned num_entries, + struct amdgpu_bo_list **list); /* * GFX stuff diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 92be7f6de197..7679c068c89a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -55,15 +55,15 @@ static void amdgpu_bo_list_release_rcu(struct kref *ref) kfree_rcu(list, rhead); } -static int amdgpu_bo_list_create(struct amdgpu_device *adev, +int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_bo_list_entry *info, unsigned num_entries, - int *id) + struct amdgpu_bo_list **list_out) { - int r; - struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo_list *list; + int r; + list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); if (!list) @@ -78,16 +78,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev, return r; } - /* idr alloc should be called only after initialization of bo list. */ - mutex_lock(&fpriv->bo_list_lock); - r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); - mutex_unlock(&fpriv->bo_list_lock); - if (r < 0) { - amdgpu_bo_list_free(list); - return r; - } - *id = r; - + *list_out = list; return 0; } @@ -263,55 +254,79 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list) kfree(list); } -int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) +int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, + struct drm_amdgpu_bo_list_entry **info_param) { + const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); - - struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_fpriv *fpriv = filp->driver_priv; - union drm_amdgpu_bo_list *args = data; - uint32_t handle = args->in.list_handle; - const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr); - struct drm_amdgpu_bo_list_entry *info; - struct amdgpu_bo_list *list; - int r; - info = kvmalloc_array(args->in.bo_number, - sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL); + info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); if (!info) return -ENOMEM; /* copy the handle array from userspace to a kernel buffer */ r = -EFAULT; - if (likely(info_size == args->in.bo_info_size)) { - unsigned long bytes = args->in.bo_number * - args->in.bo_info_size; + if (likely(info_size == in->bo_info_size)) { + unsigned long bytes = in->bo_number * + in->bo_info_size; if (copy_from_user(info, uptr, bytes)) goto error_free; } else { - unsigned long bytes = min(args->in.bo_info_size, info_size); + unsigned long bytes = min(in->bo_info_size, info_size); unsigned i; - memset(info, 0, args->in.bo_number * info_size); - for (i = 0; i < args->in.bo_number; ++i) { + memset(info, 0, in->bo_number * info_size); + for (i = 0; i < in->bo_number; ++i) { if (copy_from_user(&info[i], uptr, bytes)) goto error_free; - uptr += args->in.bo_info_size; + uptr += in->bo_info_size; } } + *info_param = info; + return 0; + +error_free: + kvfree(info); + return r; +} + +int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + union drm_amdgpu_bo_list *args = data; + uint32_t handle = args->in.list_handle; + struct drm_amdgpu_bo_list_entry *info = NULL; + struct amdgpu_bo_list *list; + int r; + + r = amdgpu_bo_create_list_entry_array(&args->in, &info); + if (r) + goto error_free; + switch (args->in.operation) { case AMDGPU_BO_LIST_OP_CREATE: r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, - &handle); + &list); if (r) goto error_free; + + mutex_lock(&fpriv->bo_list_lock); + r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); + mutex_unlock(&fpriv->bo_list_lock); + if (r < 0) { + amdgpu_bo_list_free(list); + return r; + } + + handle = r; break; case AMDGPU_BO_LIST_OP_DESTROY: @@ -345,6 +360,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, return 0; error_free: - kvfree(info); + if (info) + kvfree(info); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5a2a5ba29f9a..6d8df76b5a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -66,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, return 0; } -static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) +static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, + struct drm_amdgpu_bo_list_in *data) +{ + int r; + struct drm_amdgpu_bo_list_entry *info = NULL; + + r = amdgpu_bo_create_list_entry_array(data, &info); + if (r) + return r; + + r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, + &p->bo_list); + if (r) + goto error_free; + + kvfree(info); + return 0; + +error_free: + if (info) + kvfree(info); + + return r; +} + +static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array; unsigned size, num_ibs = 0; @@ -164,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break; + case AMDGPU_CHUNK_ID_BO_HANDLES: + size = sizeof(struct drm_amdgpu_bo_list_in); + if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { + ret = -EINVAL; + goto free_partial_kdata; + } + + ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata); + if (ret) + goto free_partial_kdata; + + break; + case AMDGPU_CHUNK_ID_DEPENDENCIES: case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: @@ -534,7 +571,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&p->validated); - p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); + /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ + if (!p->bo_list) + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); + else + mutex_lock(&p->bo_list->lock); + if (p->bo_list) { amdgpu_bo_list_get_list(p->bo_list, &p->validated); if (p->bo_list->first_userptr != p->bo_list->num_entries) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 06aede194bf8..529500c94675 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -69,9 +69,10 @@ * - 3.24.0 - Add high priority compute support for gfx9 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE. + * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 26 +#define KMS_DRIVER_MINOR 27 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 784b0fe470ee..1ceec56de015 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -516,6 +516,7 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 +#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 struct drm_amdgpu_cs_chunk { __u32 chunk_id; -- cgit v1.2.3