summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-10-08 13:30:12 +0200
committerAlex Deucher <alexander.deucher@amd.com>2018-10-12 12:52:32 -0500
commit0efd2d2f68cd5dbddf4ecd974c33133257d16a8e (patch)
treec62d03c24fb81048755ca69a92ba4f0d912dfa81 /drivers/gpu/drm/scheduler/sched_main.c
parentb981c86f03068ca63391bb54ac4686e7ac419fa3 (diff)
drm/sched: fix timeout handling v2
We need to make sure that we don't race between job completion and timeout. v2: put revert label after calling the handling manually Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bd7d11c47202..44fe587aaef9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -249,13 +249,41 @@ static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+ int r;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *fence = job->s_fence;
+
+ if (!dma_fence_remove_callback(fence->parent, &fence->cb))
+ goto already_signaled;
+ }
+
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
+ spin_unlock(&sched->job_list_lock);
if (job)
- job->sched->ops->timedout_job(job);
+ sched->ops->timedout_job(job);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry(job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *fence = job->s_fence;
+
+ if (!fence->parent || !list_empty(&fence->cb.node))
+ continue;
+
+ r = dma_fence_add_callback(fence->parent, &fence->cb,
+ drm_sched_process_job);
+ if (r)
+ drm_sched_process_job(fence->parent, &fence->cb);
+
+already_signaled:
+ ;
+ }
+ spin_unlock(&sched->job_list_lock);
}
/**