diff options
author | Christian König <christian.koenig@amd.com> | 2016-01-18 17:01:42 +0100 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:16:57 -0500 |
commit | 94dd0a4ae0b1af997b1f45793e5fd5b47f4ffc18 (patch) | |
tree | 4752f0bba41887463fb60b08b5cd7912626a9dc3 /drivers | |
parent | 8d0a7cea824a2784150ef7f25a1e88f18a2a8f69 (diff) |
drm/amdgpu: merge vm_grab_id and vm_fence v2
No need for an extra function any more.
v2: comment cleanups
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 57 |
3 files changed, 30 insertions, 45 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index edfaae439b76..43b48eb6cf6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -956,13 +956,10 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync); + struct amdgpu_sync *sync, struct fence *fence); void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, struct fence *updates); -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence); uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index b22a95f0571c..76a1f823d983 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -38,19 +38,14 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) if (fence == NULL && vm && !job->ibs->grabbed_vmid) { struct amdgpu_ring *ring = job->ibs->ring; - struct amdgpu_device *adev = ring->adev; int r; - mutex_lock(&adev->vm_manager.lock); - r = amdgpu_vm_grab_id(vm, ring, sync); - if (r) { + r = amdgpu_vm_grab_id(vm, ring, sync, + &job->base.s_fence->base); + if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - } else { - fence = &job->base.s_fence->base; - amdgpu_vm_fence(ring->adev, vm, fence); + else job->ibs->grabbed_vmid = true; - } - mutex_unlock(&adev->vm_manager.lock); fence = amdgpu_sync_get_fence(sync); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index edbb3ff4e731..d4718e1cd050 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -152,13 +152,14 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, * @vm: vm to allocate id for * @ring: ring we want to submit job to * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse * * Allocate an id for the vm, adding fences to the sync obj as necessary. * * Global mutex must be locked! */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, struct fence *fence) { struct fence *best[AMDGPU_MAX_RINGS] = {}; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; @@ -167,6 +168,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, unsigned choices[2] = {}; unsigned i; + mutex_lock(&adev->vm_manager.lock); + /* check if the id is still valid */ if (vm_id->id) { unsigned id = vm_id->id; @@ -175,6 +178,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, owner = atomic_long_read(&adev->vm_manager.ids[id].owner); if (owner == (long)vm) { trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); + fence_put(adev->vm_manager.ids[id].active); + adev->vm_manager.ids[id].active = fence_get(fence); + mutex_unlock(&adev->vm_manager.lock); return 0; } } @@ -191,6 +197,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* found a free one */ vm_id->id = i; trace_amdgpu_vm_grab_id(vm, i, ring->idx); + mutex_unlock(&adev->vm_manager.lock); return 0; } @@ -203,19 +210,29 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } for (i = 0; i < 2; ++i) { - if (choices[i]) { - struct fence *fence; + struct fence *active; + int r; - fence = adev->vm_manager.ids[choices[i]].active; - vm_id->id = choices[i]; + if (!choices[i]) + continue; - trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx); - return amdgpu_sync_fence(ring->adev, sync, fence); - } + vm_id->id = choices[i]; + active = adev->vm_manager.ids[vm_id->id].active; + r = amdgpu_sync_fence(ring->adev, sync, active); + + trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx); + atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm); + + fence_put(adev->vm_manager.ids[vm_id->id].active); + adev->vm_manager.ids[vm_id->id].active = fence_get(fence); + + mutex_unlock(&adev->vm_manager.lock); + return r; } /* should never happen */ BUG(); + mutex_unlock(&adev->vm_manager.lock); return -EINVAL; } @@ -258,30 +275,6 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, } /** - * amdgpu_vm_fence - remember fence for vm - * - * @adev: amdgpu_device pointer - * @vm: vm we want to fence - * @fence: fence to remember - * - * Fence the vm (cayman+). - * Set the fence used to protect page table and id. - * - * Global and local mutex must be locked! - */ -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence) -{ - struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); - unsigned vm_id = vm->ids[ring->idx].id; - - fence_put(adev->vm_manager.ids[vm_id].active); - adev->vm_manager.ids[vm_id].active = fence_get(fence); - atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); -} - -/** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm |