summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c138
1 files changed, 88 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 545302d0955f..52a5e4fdc95b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -52,6 +52,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.bo = &bo->tbo;
/* One for TTM and one for the CS job */
p->uf_entry.tv.num_shared = 2;
+ p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -539,14 +540,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (usermm && usermm != current->mm)
return -EPERM;
- if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
- lobj->user_invalidated && lobj->user_pages) {
+ /* Check if we have user pages and nobody bound the BO already */
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
amdgpu_bo_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
-
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true;
@@ -577,6 +578,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
+ unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
@@ -612,45 +614,79 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
list_add(&p->uf_entry.tv.head, &p->validated);
- /* Get userptr backing pages. If pages are updated after registered
- * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
- * amdgpu_ttm_backend_bind() to flush and invalidate new pages
- */
- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- bool userpage_invalidated = false;
- int i;
-
- e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!e->user_pages) {
- DRM_ERROR("calloc failure\n");
- return -ENOMEM;
+ while (1) {
+ struct list_head need_pages;
+
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
+ &duplicates);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
+ goto error_free_pages;
}
- r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
- return r;
+ INIT_LIST_HEAD(&need_pages);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
+ &e->user_invalidated) && e->user_pages) {
+
+ /* We acquired a page array, but somebody
+ * invalidated it. Free it and try again
+ */
+ release_pages(e->user_pages,
+ bo->tbo.ttm->num_pages);
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
+ }
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ !e->user_pages) {
+ list_del(&e->tv.head);
+ list_add(&e->tv.head, &need_pages);
+
+ amdgpu_bo_unreserve(bo);
+ }
+ }
+
+ if (list_empty(&need_pages))
+ break;
+
+ /* Unreserve everything again. */
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+ /* We tried too many times, just abort */
+ if (!--tries) {
+ r = -EDEADLK;
+ DRM_ERROR("deadlock in %s\n", __func__);
+ goto error_free_pages;
}
- for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
- userpage_invalidated = true;
- break;
+ /* Fill the page arrays for all userptrs. */
+ list_for_each_entry(e, &need_pages, tv.head) {
+ struct ttm_tt *ttm = e->tv.bo->ttm;
+
+ e->user_pages = kvmalloc_array(ttm->num_pages,
+ sizeof(struct page*),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!e->user_pages) {
+ r = -ENOMEM;
+ DRM_ERROR("calloc failure in %s\n", __func__);
+ goto error_free_pages;
+ }
+
+ r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
+ if (r) {
+ DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
+ goto error_free_pages;
}
}
- e->user_invalidated = userpage_invalidated;
- }
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
- goto out;
+ /* And try again. */
+ list_splice(&need_pages, &p->validated);
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -719,7 +755,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
error_validate:
if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
-out:
+
+error_free_pages:
+
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ if (!e->user_pages)
+ continue;
+
+ release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
+ kvfree(e->user_pages);
+ }
+
return r;
}
@@ -1178,6 +1224,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
+
int r;
job = p->job;
@@ -1187,23 +1234,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
- /* No memory allocation is allowed while holding the mn lock.
- * p->mn is hold until amdgpu_cs_submit is finished and fence is added
- * to BOs.
- */
+ /* No memory allocation is allowed while holding the mn lock */
amdgpu_mn_lock(p->mn);
-
- /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
- * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
- */
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
- }
- if (r) {
- r = -EAGAIN;
- goto error_abort;
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ r = -ERESTARTSYS;
+ goto error_abort;
+ }
}
job->owner = p->filp;
@@ -1299,7 +1338,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
-
return r;
}