summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-08-10 14:04:12 +0200
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:51:15 -0400
commit05caae8515e12073f4a3beb048e0d289cbe687b7 (patch)
tree896e795c1e0d267ed7fef6d2f7cbf206d7d2c87c /drivers/gpu/drm
parent2983e5cef33b48dede6c023230cc0b54d6f67362 (diff)
drm/amdgpu: remove amd_sched_wait_emit v2
Not used any more. v2: remove amd_sched_emit as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c61
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h10
3 files changed, 0 insertions, 73 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 6a7e83edcaa7..d2e5f3b90a3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -77,8 +77,6 @@ static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
goto err;
}
- amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
-
mutex_unlock(&sched_job->job_lock);
return &fence->base;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 402086d96889..90abefed86cc 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -202,7 +202,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return -EINVAL;
spin_lock_init(&entity->queue_lock);
- atomic64_set(&entity->last_emitted_v_seq, seq_ring);
atomic64_set(&entity->last_queued_v_seq, seq_ring);
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
@@ -329,53 +328,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
return 0;
}
-/**
- * Wait for a virtual sequence number to be emitted.
- *
- * @c_entity The pointer to a valid context entity
- * @seq The virtual sequence number to wait
- * @intr Interruptible or not
- * @timeout Timeout in ms, wait infinitely if <0
- * @emit wait for emit or signal
- *
- * return =0 signaled , <0 failed
-*/
-int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
- uint64_t seq,
- bool intr,
- long timeout)
-{
- atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
- wait_queue_head_t *wait_queue = &c_entity->wait_emit;
-
- if (intr && (timeout < 0)) {
- wait_event_interruptible(
- *wait_queue,
- seq <= atomic64_read(v_seq));
- return 0;
- } else if (intr && (timeout >= 0)) {
- wait_event_interruptible_timeout(
- *wait_queue,
- seq <= atomic64_read(v_seq),
- msecs_to_jiffies(timeout));
- return (seq <= atomic64_read(v_seq)) ?
- 0 : -1;
- } else if (!intr && (timeout < 0)) {
- wait_event(
- *wait_queue,
- seq <= atomic64_read(v_seq));
- return 0;
- } else if (!intr && (timeout >= 0)) {
- wait_event_timeout(
- *wait_queue,
- seq <= atomic64_read(v_seq),
- msecs_to_jiffies(timeout));
- return (seq <= atomic64_read(v_seq)) ?
- 0 : -1;
- }
- return 0;
-}
-
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{
struct amd_sched_job *sched_job =
@@ -511,19 +463,6 @@ int amd_sched_destroy(struct amd_gpu_scheduler *sched)
}
/**
- * Update emitted sequence and wake up the waiters, called by run_job
- * in driver side
- *
- * @entity The context entity
- * @seq The sequence number for the latest emitted job
-*/
-void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq)
-{
- atomic64_set(&c_entity->last_emitted_v_seq, seq);
- wake_up_all(&c_entity->wait_emit);
-}
-
-/**
* Get next queued sequence number
*
* @entity The context entity
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 300132f14d74..aa942033d4b3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -44,7 +44,6 @@ struct amd_sched_entity {
spinlock_t lock;
/* the virtual_seq is unique per context per ring */
atomic64_t last_queued_v_seq;
- atomic64_t last_emitted_v_seq;
atomic64_t last_signaled_v_seq;
/* the job_queue maintains the jobs submitted by clients */
struct kfifo job_queue;
@@ -154,13 +153,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
void *data,
struct amd_sched_fence **fence);
-int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
- uint64_t seq,
- bool intr,
- long timeout);
-
-uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
-
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
struct amd_run_queue *rq,
@@ -168,8 +160,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
-void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq);
-
uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
struct amd_sched_fence *amd_sched_fence_create(