aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler
diff options
context:
space:
mode:
authorChristian König2019-04-18 11:00:21 -0400
committerAlex Deucher2019-05-02 15:45:48 -0500
commit5918045c4ed492fb5813f980dcf89a90fefd0a4e (patch)
treea59c6f9bb006645b8fc46164d7fb53cb38b24294 /drivers/gpu/drm/scheduler
parentb3198c38f02d54a5e964258a2180d502abe6eaf0 (diff)
drm/scheduler: rework job destruction
We now destroy finished jobs from the worker thread to make sure that we never destroy a job currently in timeout processing. By this we avoid holding lock around ring mirror list in drm_sched_stop which should solve a deadlock reported by a user. v2: Remove unused variable. v4: Move guilty job free into sched code. v5: Move sched->hw_rq_count to drm_sched_start to account for counter decrement in drm_sched_stop even when we don't call resubmit jobs if guily job did signal. v6: remove unused variable Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109692 Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/1555599624-12285-3-git-send-email-andrey.grodzovsky@amd.com
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c159
1 files changed, 94 insertions, 65 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 19fc601c9eeb..7816de7e8c82 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -265,32 +265,6 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
-/* job_finish is called after hw fence signaled
- */
-static void drm_sched_job_finish(struct work_struct *work)
-{
- struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
- finish_work);
- struct drm_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
-
- /*
- * Canceling the timeout without removing our job from the ring mirror
- * list is safe, as we will only end up in this worker if our jobs
- * finished fence has been signaled. So even if some another worker
- * manages to find this job as the next job in the list, the fence
- * signaled check below will prevent the timeout to be restarted.
- */
- cancel_delayed_work_sync(&sched->work_tdr);
-
- spin_lock_irqsave(&sched->job_list_lock, flags);
- /* queue TDR for next job */
- drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(s_job);
-}
-
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
@@ -315,6 +289,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job)
job->sched->ops->timedout_job(job);
+ /*
+ * Guilty job did complete and hence needs to be manually removed
+ * See drm_sched_stop doc.
+ */
+ if (list_empty(&job->node))
+ job->sched->ops->free_job(job);
+
spin_lock_irqsave(&sched->job_list_lock, flags);
drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
@@ -371,23 +352,26 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* @sched: scheduler instance
* @bad: bad scheduler job
*
+ * Stop the scheduler and also removes and frees all completed jobs.
+ * Note: bad job will not be freed as it might be used later and so it's
+ * callers responsibility to release it manually if it's not part of the
+ * mirror list any more.
+ *
*/
-void drm_sched_stop(struct drm_gpu_scheduler *sched)
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
- struct drm_sched_job *s_job;
+ struct drm_sched_job *s_job, *tmp;
unsigned long flags;
- struct dma_fence *last_fence = NULL;
kthread_park(sched->thread);
/*
- * Verify all the signaled jobs in mirror list are removed from the ring
- * by waiting for the latest job to enter the list. This should insure that
- * also all the previous jobs that were in flight also already singaled
- * and removed from the list.
+ * Iterate the job list from later to earlier one and either deactive
+ * their HW callbacks or remove them from mirror list if they already
+ * signaled.
+ * This iteration is thread safe as sched thread is stopped.
*/
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
@@ -395,16 +379,30 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched)
s_job->s_fence->parent = NULL;
atomic_dec(&sched->hw_rq_count);
} else {
- last_fence = dma_fence_get(&s_job->s_fence->finished);
- break;
+ /*
+ * remove job from ring_mirror_list.
+ * Locking here is for concurrent resume timeout
+ */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_del_init(&s_job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ /*
+ * Wait for job's HW fence callback to finish using s_job
+ * before releasing it.
+ *
+ * Job is still alive so fence refcount at least 1
+ */
+ dma_fence_wait(&s_job->s_fence->finished, false);
+
+ /*
+ * We must keep bad job alive for later use during
+ * recovery by some of the drivers
+ */
+ if (bad != s_job)
+ sched->ops->free_job(s_job);
}
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- if (last_fence) {
- dma_fence_wait(last_fence, false);
- dma_fence_put(last_fence);
- }
}
EXPORT_SYMBOL(drm_sched_stop);
@@ -418,21 +416,22 @@ EXPORT_SYMBOL(drm_sched_stop);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
+ unsigned long flags;
int r;
- if (!full_recovery)
- goto unpark;
-
/*
* Locking the list is not required here as the sched thread is parked
- * so no new jobs are being pushed in to HW and in drm_sched_stop we
- * flushed all the jobs who were still in mirror list but who already
- * signaled and removed them self from the list. Also concurrent
+ * so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct dma_fence *fence = s_job->s_fence->parent;
+ atomic_inc(&sched->hw_rq_count);
+
+ if (!full_recovery)
+ continue;
+
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
drm_sched_process_job);
@@ -445,9 +444,12 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_process_job(NULL, &s_job->cb);
}
- drm_sched_start_timeout(sched);
+ if (full_recovery) {
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ }
-unpark:
kthread_unpark(sched->thread);
}
EXPORT_SYMBOL(drm_sched_start);
@@ -464,7 +466,6 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
uint64_t guilty_context;
bool found_guilty = false;
- /*TODO DO we need spinlock here ? */
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -477,7 +478,6 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
s_job->s_fence->parent = sched->ops->run_job(s_job);
- atomic_inc(&sched->hw_rq_count);
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -514,7 +514,6 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
return 0;
@@ -597,24 +596,53 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
struct drm_sched_fence *s_fence = s_job->s_fence;
struct drm_gpu_scheduler *sched = s_fence->sched;
- unsigned long flags;
-
- cancel_delayed_work(&sched->work_tdr);
atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->num_jobs);
- spin_lock_irqsave(&sched->job_list_lock, flags);
- /* remove job from ring_mirror_list */
- list_del_init(&s_job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ trace_drm_sched_process_job(s_fence);
drm_sched_fence_finished(s_fence);
-
- trace_drm_sched_process_job(s_fence);
wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_cleanup_jobs - destroy finished jobs
+ *
+ * @sched: scheduler instance
+ *
+ * Remove all finished jobs from the mirror list and destroy them.
+ */
+static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+{
+ unsigned long flags;
+
+ /* Don't destroy jobs while the timeout worker is running */
+ if (!cancel_delayed_work(&sched->work_tdr))
+ return;
+
+
+ while (!list_empty(&sched->ring_mirror_list)) {
+ struct drm_sched_job *job;
+
+ job = list_first_entry(&sched->ring_mirror_list,
+ struct drm_sched_job, node);
+ if (!dma_fence_is_signaled(&job->s_fence->finished))
+ break;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ /* remove job from ring_mirror_list */
+ list_del_init(&job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ sched->ops->free_job(job);
+ }
+
+ /* queue timeout for next job */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
- schedule_work(&s_job->finish_work);
}
/**
@@ -656,9 +684,10 @@ static int drm_sched_main(void *param)
struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
+ (drm_sched_cleanup_jobs(sched),
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop());
+ kthread_should_stop()));
if (!entity)
continue;