aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChristian König2018-08-08 13:07:11 +0200
committerAlex Deucher2018-08-27 11:10:07 -0500
commit35e160e781a048a9170a9deb3c1f13f06df4add9 (patch)
treec58d71a1de518055e2aab516459cb9b62e259d61 /drivers/gpu/drm
parent573edb241b44162a1478cc74429f94df86e6e71d (diff)
drm/scheduler: change entities rq even earlier
Looks like for correct debugging we need to know the scheduler even earlier. So move picking a rq for an entity into job creation. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c50
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
2 files changed, 33 insertions, 19 deletions
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index f40a504e3d68..f566405f49e3 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -550,6 +550,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
}
/**
+ * drm_sched_entity_select_rq - select a new rq for the entity
+ *
+ * @entity: scheduler entity
+ *
+ * Check all prerequisites and select a new rq for the entity for load
+ * balancing.
+ */
+static void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
+{
+ struct dma_fence *fence;
+ struct drm_sched_rq *rq;
+
+ if (!spsc_queue_count(&entity->job_queue) == 0 ||
+ entity->num_rq_list <= 1)
+ return;
+
+ fence = READ_ONCE(entity->last_scheduled);
+ if (fence && !dma_fence_is_signaled(fence))
+ return;
+
+ rq = drm_sched_entity_get_free_sched(entity);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ entity->rq = rq;
+ spin_unlock(&entity->rq_lock);
+}
+
+/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
*
* @sched_job: job to submit
@@ -564,25 +592,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{
- struct drm_sched_rq *rq = entity->rq;
bool first;
- first = spsc_queue_count(&entity->job_queue) == 0;
- if (first && (entity->num_rq_list > 1)) {
- struct dma_fence *fence;
-
- fence = READ_ONCE(entity->last_scheduled);
- if (fence == NULL || dma_fence_is_signaled(fence)) {
- rq = drm_sched_entity_get_free_sched(entity);
- spin_lock(&entity->rq_lock);
- drm_sched_rq_remove_entity(entity->rq, entity);
- entity->rq = rq;
- spin_unlock(&entity->rq_lock);
- }
- }
-
- sched_job->sched = entity->rq->sched;
- sched_job->s_fence->sched = entity->rq->sched;
trace_drm_sched_job(sched_job, entity);
atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
@@ -786,7 +797,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct drm_gpu_scheduler *sched;
+
+ drm_sched_entity_select_rq(entity);
+ sched = entity->rq->sched;
job->sched = sched;
job->entity = entity;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 20e4da377890..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
- fence->sched = NULL;
+ fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);