From 78efe21b6f8e6f4d39fceaf0cc5c534c11f9dd60 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 30 Jun 2021 08:27:37 +0200 Subject: drm/sched: Allow using a dedicated workqueue for the timeout/fault tdr Mali Midgard/Bifrost GPUs have 3 hardware queues but only a global GPU reset. This leads to extra complexity when we need to synchronize timeout works with the reset work. One solution to address that is to have an ordered workqueue at the driver level that will be used by the different schedulers to queue their timeout work. Thanks to the serialization provided by the ordered workqueue we are guaranteed that timeout handlers are executed sequentially, and can thus easily reset the GPU from the timeout handler without extra synchronization. v5: * Add a new paragraph to the timedout_job() method v3: * New patch v4: * Actually use the timeout_wq to queue the timeout work Suggested-by: Daniel Vetter Signed-off-by: Boris Brezillon Reviewed-by: Steven Price Reviewed-by: Lucas Stach Acked-by: Daniel Vetter Acked-by: Christian König Cc: Qiang Yu Cc: Emma Anholt Cc: Alex Deucher Cc: "Christian König" Link: https://patchwork.freedesktop.org/patch/msgid/20210630062751.2832545-3-boris.brezillon@collabora.com --- drivers/gpu/drm/scheduler/sched_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/scheduler') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 60125fbe7bb5..67382621b429 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -232,7 +232,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT && !list_empty(&sched->pending_list)) - schedule_delayed_work(&sched->work_tdr, sched->timeout); + queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); } /** @@ -244,7 +244,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) */ void drm_sched_fault(struct drm_gpu_scheduler *sched) { - mod_delayed_work(system_wq, &sched->work_tdr, 0); + mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); } EXPORT_SYMBOL(drm_sched_fault); @@ -270,7 +270,7 @@ unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) * Modify the timeout to an arbitrarily large value. This also prevents * the timeout to be restarted when new submissions arrive */ - if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) + if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) && time_after(sched_timeout, now)) return sched_timeout - now; else @@ -294,7 +294,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, if (list_empty(&sched->pending_list)) cancel_delayed_work(&sched->work_tdr); else - mod_delayed_work(system_wq, &sched->work_tdr, remaining); + mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); spin_unlock(&sched->job_list_lock); } @@ -847,6 +847,8 @@ static int drm_sched_main(void *param) * @hw_submission: number of hw submissions that can be in flight * @hang_limit: number of times to allow a job to hang before dropping it * @timeout: timeout value in jiffies for the scheduler + * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is + * used * @score: optional score atomic shared with other schedulers * @name: name used for debugging * @@ -854,7 +856,8 @@ static int drm_sched_main(void *param) */ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - unsigned hw_submission, unsigned hang_limit, long timeout, + unsigned hw_submission, unsigned hang_limit, + long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name) { int i, ret; @@ -862,6 +865,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->hw_submission_limit = hw_submission; sched->name = name; sched->timeout = timeout; + sched->timeout_wq = timeout_wq ? : system_wq; sched->hang_limit = hang_limit; sched->score = score ? score : &sched->_score; for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) -- cgit v1.2.3