diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv04/crtc.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv04/disp.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv04/overlay.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv50/disp.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv50/wndw.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_abi16.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 91 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_chan.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dmem.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_prime.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv17_fence.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fence.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv84_fence.c | 13 |
16 files changed, 114 insertions, 97 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6416b6907aeb..f9e962fd94d0 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -615,7 +615,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ret; - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret == 0) { if (disp->image[nv_crtc->index]) nouveau_bo_unpin(disp->image[nv_crtc->index]); @@ -1172,7 +1172,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENOMEM; if (new_bo != old_bo) { - ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) goto fail_free; } @@ -1336,10 +1336,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, - TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); if (!ret) { - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, + NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 900ab69df7e8..41d990cca685 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -112,7 +112,7 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime) if (!fb || !fb->obj[0]) continue; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) NV_ERROR(drm, "Could not pin framebuffer\n"); } @@ -122,7 +122,8 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime) if (!nv_crtc->cursor.nvbo) continue; - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, + NOUVEAU_GEM_DOMAIN_VRAM, true); if (!ret && nv_crtc->cursor.set_offset) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 193ba9498f3d..37e63e98cd08 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -142,7 +142,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) return ret; @@ -387,7 +387,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index e7874877da85..9dfb577f148a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2581,10 +2581,11 @@ nv50_display_create(struct drm_device *dev) dev->mode_config.normalize_zpos = true; /* small shared memory area we use for notifiers and semaphores */ - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &disp->sync); if (!ret) { - ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true); if (!ret) { ret = nouveau_bo_map(disp->sync); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 447ecc9fec42..0356474ad6f6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -542,7 +542,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return 0; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 21537ca1dd39..9a5be6f32424 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -328,7 +328,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 0, 0, &chan->ntfy); if (ret == 0) - ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); + ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART, + false); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a9ad27d86a3c..ddabefaab7cf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -159,8 +159,7 @@ roundup_64(u64 x, u32 y) } static void -nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, - int *align, u64 *size) +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nvif_device *device = &drm->client.device; @@ -193,7 +192,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, } struct nouveau_bo * -nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, +nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, u32 tile_mode, u32 tile_flags) { struct nouveau_drm *drm = cli->drm; @@ -219,7 +218,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated * into in nouveau_gem_new(). */ - if (flags & TTM_PL_FLAG_UNCACHED) { + if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { /* Determine if we can get a cache-coherent map, forcing * uncached mapping if we can't. */ @@ -259,9 +258,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, * Skip page sizes that can't support needed domains. */ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && - (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) continue; - if ((flags & TTM_PL_FLAG_TT) && + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) continue; @@ -288,13 +287,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, } nvbo->page = vmm->page[pi].shift; - nouveau_bo_fixup_align(nvbo, flags, align, size); + nouveau_bo_fixup_align(nvbo, align, size); return nvbo; } int -nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, +nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj) { int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; @@ -304,7 +303,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; - nouveau_bo_placement_set(nvbo, flags, 0); + nouveau_bo_placement_set(nvbo, domain, 0); INIT_LIST_HEAD(&nvbo->io_reserve_lru); ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, @@ -320,19 +319,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, int nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, - uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, + uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **pnvbo) { struct nouveau_bo *nvbo; int ret; - nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, tile_flags); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); - ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); if (ret) return ret; @@ -341,27 +340,28 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, } static void -set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) +set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain, + uint32_t flags) { *n = 0; - if (type & TTM_PL_FLAG_VRAM) + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; - if (type & TTM_PL_FLAG_TT) + if (domain & NOUVEAU_GEM_DOMAIN_GART) pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; - if (type & TTM_PL_FLAG_SYSTEM) + if (domain & NOUVEAU_GEM_DOMAIN_CPU) pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; } static void -set_placement_range(struct nouveau_bo *nvbo, uint32_t type) +set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; unsigned i, fpfn, lpfn; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && - nvbo->mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled @@ -388,7 +388,8 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) } void -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, + uint32_t busy) { struct ttm_placement *pl = &nvbo->placement; uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : @@ -397,17 +398,17 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) pl->placement = nvbo->placements; set_placement_list(nvbo->placements, &pl->num_placement, - type, flags); + domain, flags); pl->busy_placement = nvbo->busy_placements; set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, - type | busy, flags); + domain | busy, flags); - set_placement_range(nvbo, type); + set_placement_range(nvbo, domain); } int -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; @@ -419,7 +420,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) return ret; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && - memtype == TTM_PL_FLAG_VRAM && contig) { + domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { if (!nvbo->contig) { nvbo->contig = true; force = true; @@ -428,10 +429,22 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) } if (nvbo->pin_refcnt) { - if (!(memtype & (1 << bo->mem.mem_type)) || evict) { + bool error = evict; + + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); + break; + case TTM_PL_TT: + error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); + default: + break; + } + + if (error) { NV_ERROR(drm, "bo %p pinned elsewhere: " "0x%08x vs 0x%08x\n", bo, - 1 << bo->mem.mem_type, memtype); + bo->mem.mem_type, domain); ret = -EBUSY; } nvbo->pin_refcnt++; @@ -439,14 +452,14 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) } if (evict) { - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); ret = nouveau_bo_validate(nvbo, false, false); if (ret) goto out; } nvbo->pin_refcnt++; - nouveau_bo_placement_set(nvbo, memtype, 0); + nouveau_bo_placement_set(nvbo, domain, 0); /* drop pin_refcnt temporarily, so we don't trip the assertion * in nouveau_bo_move() that makes sure we're not trying to @@ -492,7 +505,16 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) if (ref) goto out; - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); + break; + case TTM_PL_TT: + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); + break; + default: + break; + } ret = nouveau_bo_validate(nvbo, false, false); if (ret == 0) { @@ -702,11 +724,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) switch (bo->mem.mem_type) { case TTM_PL_VRAM: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, - TTM_PL_FLAG_SYSTEM); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, + NOUVEAU_GEM_DOMAIN_CPU); break; default: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); break; } @@ -1214,7 +1236,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; if (bo->mem.mem_type == TTM_PL_SYSTEM) { - nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, + 0); ret = nouveau_bo_validate(nvbo, false, false); if (ret) @@ -1238,7 +1261,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->busy_placements[i].lpfn = mappable; } - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); return nouveau_bo_validate(nvbo, false, false); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index ae90aca33fef..2a23c8207436 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -77,10 +77,10 @@ extern struct ttm_bo_driver nouveau_bo_driver; void nouveau_bo_move_init(struct nouveau_drm *); struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, - u32 flags, u32 tile_mode, u32 tile_flags); -int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags, + u32 domain, u32 tile_mode, u32 tile_flags); +int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj); -int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, +int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain, u32 tile_mode, u32 tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **); @@ -122,13 +122,13 @@ nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo) } static inline int -nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags, +nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain, struct nouveau_bo **pnvbo) { - int ret = nouveau_bo_new(cli, size, align, flags, + int ret = nouveau_bo_new(cli, size, align, domain, 0, 0, NULL, NULL, pnvbo); if (ret == 0) { - ret = nouveau_bo_pin(*pnvbo, flags, true); + ret = nouveau_bo_pin(*pnvbo, domain, true); if (ret == 0) { ret = nouveau_bo_map(*pnvbo); if (ret == 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index b80e4ebf14a6..8f099601d2f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -163,9 +163,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, atomic_set(&chan->killed, 0); /* allocate memory for dma push buffer */ - target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; + target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; if (nouveau_vram_pushbuf) - target = TTM_PL_FLAG_VRAM; + target = NOUVEAU_GEM_DOMAIN_VRAM; ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, &chan->push.buffer); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 4e8112fde3e6..af70ef8e5471 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -254,12 +254,12 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) chunk->pagemap.owner = drm->dev; ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, - TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL, &chunk->bo); if (ret) goto out_release; - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) goto out_bo_free; @@ -346,7 +346,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm) mutex_lock(&drm->dmem->mutex); list_for_each_entry(chunk, &drm->dmem->chunks, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); /* FIXME handle pin failure */ WARN_ON(ret); } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 3159a2172db5..24ec5339efb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, if (ret) goto out_unref; - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) { NV_ERROR(drm, "failed to pin fb: %d\n", ret); goto out_unref; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 81f111ad3f4f..89adadf4706b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -176,20 +176,12 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, { struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; - u32 flags = 0; int ret; - if (domain & NOUVEAU_GEM_DOMAIN_VRAM) - flags |= TTM_PL_FLAG_VRAM; - if (domain & NOUVEAU_GEM_DOMAIN_GART) - flags |= TTM_PL_FLAG_TT; - if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) - flags |= TTM_PL_FLAG_SYSTEM; + if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) + domain |= NOUVEAU_GEM_DOMAIN_CPU; - if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) - flags |= TTM_PL_FLAG_UNCACHED; - - nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, tile_flags); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); @@ -202,7 +194,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, return ret; } - ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL); + ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL); if (ret) { nouveau_bo_ref(NULL, &nvbo); return ret; @@ -296,32 +288,28 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, struct ttm_buffer_object *bo = &nvbo->bo; uint32_t domains = valid_domains & nvbo->valid_domains & (write_domains ? write_domains : read_domains); - uint32_t pref_flags = 0, valid_flags = 0; + uint32_t pref_domains = 0;; if (!domains) return -EINVAL; - if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) - valid_flags |= TTM_PL_FLAG_VRAM; - - if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) - valid_flags |= TTM_PL_FLAG_TT; + valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && bo->mem.mem_type == TTM_PL_VRAM) - pref_flags |= TTM_PL_FLAG_VRAM; + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && bo->mem.mem_type == TTM_PL_TT) - pref_flags |= TTM_PL_FLAG_TT; + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) - pref_flags |= TTM_PL_FLAG_VRAM; + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else - pref_flags |= TTM_PL_FLAG_TT; + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; - nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); + nouveau_bo_placement_set(nvbo, pref_domains, valid_domains); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 7766b810653f..b2ecb91f8ddc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -64,14 +64,12 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, struct nouveau_bo *nvbo; struct dma_resv *robj = attach->dmabuf->resv; u64 size = attach->dmabuf->size; - u32 flags = 0; int align = 0; int ret; - flags = TTM_PL_FLAG_TT; - dma_resv_lock(robj, NULL); - nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0); + nvbo = nouveau_bo_alloc(&drm->client, &size, &align, + NOUVEAU_GEM_DOMAIN_GART, 0, 0); if (IS_ERR(nvbo)) { obj = ERR_CAST(nvbo); goto unlock; @@ -88,7 +86,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, goto unlock; } - ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); + ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART, + sg, robj); if (ret) { nouveau_bo_ref(NULL, &nvbo); obj = ERR_PTR(ret); @@ -108,7 +107,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) int ret; /* pin buffer into GTT */ - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 6b697ee6bc0e..1253fdec712d 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -130,10 +130,11 @@ nv17_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &priv->bo); if (!ret) { - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(priv->bo); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 49b46f51073c..447238e3cbe7 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -81,10 +81,11 @@ nv50_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &priv->bo); if (!ret) { - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(priv->bo); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 7ed36b3a6b7d..7c9c928c3196 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -209,12 +209,13 @@ nv84_fence_create(struct nouveau_drm *drm) mutex_init(&priv->mutex); /* Use VRAM if there is any ; otherwise fallback to system memory */ - domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : - /* - * fences created in sysmem must be non-cached or we - * will lose CPU/GPU coherency! - */ - TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; + domain = drm->client.device.info.ram_size != 0 ? + NOUVEAU_GEM_DOMAIN_VRAM : + /* + * fences created in sysmem must be non-cached or we + * will lose CPU/GPU coherency! + */ + NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0, domain, 0, 0, NULL, NULL, &priv->bo); if (ret == 0) { |