summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c33
1 files changed, 31 insertions, 2 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3c57e84222ca..71ce6215956f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -287,10 +289,21 @@ static void drm_sched_job_timedout(struct work_struct *work)
unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
if (job) {
+ /*
+ * Remove the bad job so it cannot be freed by concurrent
+ * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
+ * is parked at which point it's safe.
+ */
+ list_del_init(&job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
job->sched->ops->timedout_job(job);
/*
@@ -301,6 +314,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+ } else {
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
spin_lock_irqsave(&sched->job_list_lock, flags);
@@ -373,6 +388,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
kthread_park(sched->thread);
/*
+ * Reinsert back the bad job here - now it's safe as
+ * drm_sched_get_cleanup_job cannot race against us and release the
+ * bad job at this point - we parked (waited for) any in progress
+ * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * now until the scheduler thread is unparked.
+ */
+ if (bad && bad->sched == sched)
+ /*
+ * Add at the head of the queue to reflect it was the earliest
+ * job extracted.
+ */
+ list_add(&bad->node, &sched->ring_mirror_list);
+
+ /*
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from mirror list if they already
* signaled.
@@ -628,7 +657,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->num_jobs);
+ atomic_dec(&sched->score);
trace_drm_sched_process_job(s_fence);
@@ -803,7 +832,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->num_jobs, 0);
+ atomic_set(&sched->score, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */