diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2015-08-20 14:54:14 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2015-08-28 12:40:28 +1000 |
commit | 142ea05f49b9517929f8b27ee800160e7ebf3a02 (patch) | |
tree | ab36039aa5c9c593772243dcdddfd22470cfffb1 /drivers/gpu/drm/nouveau/nvkm/engine | |
parent | 5444e770e3991ddb5a9583d622fc18bbf414b551 (diff) |
drm/nouveau/gr: switch to gpuobj accessor macros
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c | 92 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c | 74 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c | 88 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c | 86 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c | 86 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c | 10 |
14 files changed, 385 insertions, 317 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 7854baff3374..f36e0896ae9c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1289,27 +1289,28 @@ gf100_grctx_generate(struct gf100_gr *gr) } /* PGD pointer */ - nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000)); - nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000)); - nv_wo32(chan, 0x0208, 0xffffffff); - nv_wo32(chan, 0x020c, 0x000000ff); + nvkm_kmap(chan); + nvkm_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000)); + nvkm_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000)); + nvkm_wo32(chan, 0x0208, 0xffffffff); + nvkm_wo32(chan, 0x020c, 0x000000ff); /* PGT[0] pointer */ - nv_wo32(chan, 0x1000, 0x00000000); - nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8); + nvkm_wo32(chan, 0x1000, 0x00000000); + nvkm_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8); /* identity-map the whole "channel" into its own vm */ for (i = 0; i < chan->size / 4096; i++) { u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1; - nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr)); - nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr)); + nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr)); + nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr)); } /* context pointer (virt) */ - nv_wo32(chan, 0x0210, 0x00080004); - nv_wo32(chan, 0x0214, 0x00000000); - + nvkm_wo32(chan, 0x0210, 0x00080004); + nvkm_wo32(chan, 0x0214, 0x00000000); bar->flush(bar); + nvkm_done(chan); nvkm_wr32(device, 0x100cb8, (chan->addr + 0x1000) >> 8); nvkm_wr32(device, 0x100cbc, 0x80000001); @@ -1335,11 +1336,13 @@ gf100_grctx_generate(struct gf100_gr *gr) break; ); - nv_wo32(chan, 0x8001c, 1); - nv_wo32(chan, 0x80020, 0); - nv_wo32(chan, 0x80028, 0); - nv_wo32(chan, 0x8002c, 0); + nvkm_kmap(chan); + nvkm_wo32(chan, 0x8001c, 1); + nvkm_wo32(chan, 0x80020, 0); + nvkm_wo32(chan, 0x80028, 0); + nvkm_wo32(chan, 0x8002c, 0); bar->flush(bar); + nvkm_done(chan); } else { nvkm_wr32(device, 0x409840, 0x80000000); nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12); @@ -1367,8 +1370,10 @@ gf100_grctx_generate(struct gf100_gr *gr) gr->data = kmalloc(gr->size, GFP_KERNEL); if (gr->data) { + nvkm_kmap(chan); for (i = 0; i < gr->size; i += 4) - gr->data[i / 4] = nv_ro32(chan, 0x80000 + i); + gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i); + nvkm_done(chan); ret = 0; } else { ret = -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c index 0c717084b44f..8ecdc94e3538 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c @@ -580,16 +580,18 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx) if (ctx->mode != NVKM_GRCTX_VALS) return; + nvkm_kmap(obj); offset += 0x0280/4; for (i = 0; i < 16; i++, offset += 2) - nv_wo32(obj, offset * 4, 0x3f800000); + nvkm_wo32(obj, offset * 4, 0x3f800000); for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { for (i = 0; i < vs_nr_b0 * 6; i += 6) - nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); + nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); for (i = 0; i < vs_nr_b1 * 4; i += 4) - nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); + nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); } + nvkm_done(obj); } static void @@ -674,7 +676,7 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size) struct nvkm_grctx ctx = { .device = device, .mode = NVKM_GRCTX_PROG, - .data = ctxprog, + .ucode = ctxprog, .ctxprog_max = 256, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h index 8a89961956af..6170b21b50cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h @@ -9,7 +9,8 @@ struct nvkm_grctx { NVKM_GRCTX_PROG, NVKM_GRCTX_VALS } mode; - void *data; + u32 *ucode; + struct nvkm_gpuobj *data; u32 ctxprog_max; u32 ctxprog_len; @@ -22,7 +23,7 @@ struct nvkm_grctx { static inline void cp_out(struct nvkm_grctx *ctx, u32 inst) { - u32 *ctxprog = ctx->data; + u32 *ctxprog = ctx->ucode; if (ctx->mode != NVKM_GRCTX_PROG) return; @@ -56,7 +57,7 @@ cp_ctx(struct nvkm_grctx *ctx, u32 reg, u32 length) static inline void cp_name(struct nvkm_grctx *ctx, int name) { - u32 *ctxprog = ctx->data; + u32 *ctxprog = ctx->ucode; int i; if (ctx->mode != NVKM_GRCTX_PROG) @@ -124,6 +125,8 @@ gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val) reg = (reg - 0x00400000) / 4; reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; - nv_wo32(ctx->data, reg * 4, val); + nvkm_kmap(ctx->data); + nvkm_wo32(ctx->data, reg * 4, val); + nvkm_done(ctx->data); } #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c index e76bf4a217dc..69561ee4e115 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c @@ -268,7 +268,7 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size) struct nvkm_grctx ctx = { .device = device, .mode = NVKM_GRCTX_PROG, - .data = ctxprog, + .ucode = ctxprog, .ctxprog_max = 512, }; @@ -783,9 +783,12 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx) static void dd_emit(struct nvkm_grctx *ctx, int num, u32 val) { int i; - if (val && ctx->mode == NVKM_GRCTX_VALS) + if (val && ctx->mode == NVKM_GRCTX_VALS) { + nvkm_kmap(ctx->data); for (i = 0; i < num; i++) - nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); + nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); + nvkm_done(ctx->data); + } ctx->ctxvals_pos += num; } @@ -1155,9 +1158,12 @@ nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx) static void xf_emit(struct nvkm_grctx *ctx, int num, u32 val) { int i; - if (val && ctx->mode == NVKM_GRCTX_VALS) + if (val && ctx->mode == NVKM_GRCTX_VALS) { + nvkm_kmap(ctx->data); for (i = 0; i < num; i++) - nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); + nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); + nvkm_done(ctx->data); + } ctx->ctxvals_pos += num << 3; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index a27daf986ed7..a7141e08930c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -283,6 +283,7 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct gf100_gr_data *data = gr->mmio_data; struct gf100_gr_mmio *mmio = gr->mmio_list; struct gf100_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; /* allocate memory for context, and fill with default values */ @@ -324,6 +325,7 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, } /* finally, fill in the mmio list and point the context at it */ + nvkm_kmap(chan->mmio); for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) { u32 addr = mmio->addr; u32 data = mmio->data; @@ -333,28 +335,32 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, data |= info >> mmio->shift; } - nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); - nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data); + nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); + nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data); mmio++; } + nvkm_done(chan->mmio); + image = &chan->base.base.gpuobj; + nvkm_kmap(image); for (i = 0; i < gr->size; i += 4) - nv_wo32(chan, i, gr->data[i / 4]); + nvkm_wo32(image, i, gr->data[i / 4]); if (!gr->firmware) { - nv_wo32(chan, 0x00, chan->mmio_nr / 2); - nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8); + nvkm_wo32(image, 0x00, chan->mmio_nr / 2); + nvkm_wo32(image, 0x04, chan->mmio_vma.offset >> 8); } else { - nv_wo32(chan, 0xf4, 0); - nv_wo32(chan, 0xf8, 0); - nv_wo32(chan, 0x10, chan->mmio_nr / 2); - nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset)); - nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset)); - nv_wo32(chan, 0x1c, 1); - nv_wo32(chan, 0x20, 0); - nv_wo32(chan, 0x28, 0); - nv_wo32(chan, 0x2c, 0); + nvkm_wo32(image, 0xf4, 0); + nvkm_wo32(image, 0xf8, 0); + nvkm_wo32(image, 0x10, chan->mmio_nr / 2); + nvkm_wo32(image, 0x14, lower_32_bits(chan->mmio_vma.offset)); + nvkm_wo32(image, 0x18, upper_32_bits(chan->mmio_vma.offset)); + nvkm_wo32(image, 0x1c, 1); + nvkm_wo32(image, 0x20, 0); + nvkm_wo32(image, 0x28, 0); + nvkm_wo32(image, 0x2c, 0); } + nvkm_done(image); return 0; } @@ -1679,10 +1685,15 @@ gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - for (i = 0; i < 0x1000; i += 4) { - nv_wo32(gr->unk4188b4, i, 0x00000010); - nv_wo32(gr->unk4188b8, i, 0x00000010); - } + nvkm_kmap(gr->unk4188b4); + for (i = 0; i < 0x1000; i += 4) + nvkm_wo32(gr->unk4188b4, i, 0x00000010); + nvkm_done(gr->unk4188b4); + + nvkm_kmap(gr->unk4188b8); + for (i = 0; i < 0x1000; i += 4) + nvkm_wo32(gr->unk4188b8, i, 0x00000010); + nvkm_done(gr->unk4188b8); gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16; gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c index 7295a915949e..323f020166da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c @@ -443,36 +443,42 @@ nv04_gr(struct nv04_gr_chan *chan) */ static void -nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value) +nv04_gr_set_ctx1(struct nvkm_object *obj, u32 mask, u32 value) { - struct nv04_gr *gr = (void *)object->engine; + struct nvkm_gpuobj *object = container_of(obj, typeof(*object), object); + struct nv04_gr *gr = (void *)object->object.engine; struct nvkm_device *device = gr->base.engine.subdev.device; int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; u32 tmp; - tmp = nv_ro32(object, 0x00); + nvkm_kmap(object); + tmp = nvkm_ro32(object, 0x00); tmp &= ~mask; tmp |= value; - nv_wo32(object, 0x00, tmp); + nvkm_wo32(object, 0x00, tmp); + nvkm_done(object); nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp); nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); } static void -nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value) +nv04_gr_set_ctx_val(struct nvkm_object *obj, u32 mask, u32 value) { + struct nvkm_gpuobj *object = container_of(obj, typeof(*object), object); int class, op, valid = 1; u32 tmp, ctx1; - ctx1 = nv_ro32(object, 0x00); + nvkm_kmap(object); + ctx1 = nvkm_ro32(object, 0x00); class = ctx1 & 0xff; op = (ctx1 >> 15) & 7; - tmp = nv_ro32(object, 0x0c); + tmp = nvkm_ro32(object, 0x0c); tmp &= ~mask; tmp |= value; - nv_wo32(object, 0x0c, tmp); + nvkm_wo32(object, 0x0c, tmp); + nvkm_done(object); /* check for valid surf2d/surf_dst/surf_color */ if (!(tmp & 0x02000000)) @@ -504,23 +510,24 @@ nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value) break; } - nv04_gr_set_ctx1(object, 0x01000000, valid << 24); + nv04_gr_set_ctx1(obj, 0x01000000, valid << 24); } static int -nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd, +nv04_gr_mthd_set_operation(struct nvkm_object *obj, u32 mthd, void *args, u32 size) { - u32 class = nv_ro32(object, 0) & 0xff; + struct nvkm_gpuobj *object = container_of(obj, typeof(*object), object); + u32 class = nvkm_ro32(object, 0) & 0xff; u32 data = *(u32 *)args; if (data > 5) return 1; /* Old versions of the objects only accept first three operations. */ if (data > 2 && class < 0x40) return 1; - nv04_gr_set_ctx1(object, 0x00038000, data << 15); + nv04_gr_set_ctx1(obj, 0x00038000, data << 15); /* changing operation changes set of objects needed for validation */ - nv04_gr_set_ctx_val(object, 0, 0); + nv04_gr_set_ctx_val(obj, 0, 0); return 0; } @@ -963,13 +970,15 @@ nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - nv_wo32(obj, 0x00, nv_mclass(obj)); + nvkm_kmap(obj); + nvkm_wo32(obj, 0x00, nv_mclass(obj)); #ifdef __BIG_ENDIAN - nv_mo32(obj, 0x00, 0x00080000, 0x00080000); + nvkm_mo32(obj, 0x00, 0x00080000, 0x00080000); #endif - nv_wo32(obj, 0x04, 0x00000000); - nv_wo32(obj, 0x08, 0x00000000); - nv_wo32(obj, 0x0c, 0x00000000); + nvkm_wo32(obj, 0x04, 0x00000000); + nvkm_wo32(obj, 0x08, 0x00000000); + nvkm_wo32(obj, 0x0c, 0x00000000); + nvkm_done(obj); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index dab64540e69f..f4b8eaced1b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -41,6 +41,7 @@ nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv20_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0, @@ -50,51 +51,54 @@ nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; chan->chid = nvkm_fifo_chan(parent)->chid; - - nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); - nv_wo32(chan, 0x033c, 0xffff0000); - nv_wo32(chan, 0x03a0, 0x0fff0000); - nv_wo32(chan, 0x03a4, 0x0fff0000); - nv_wo32(chan, 0x047c, 0x00000101); - nv_wo32(chan, 0x0490, 0x00000111); - nv_wo32(chan, 0x04a8, 0x44400000); + image = &chan->base.base.gpuobj; + + nvkm_kmap(image); + nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24)); + nvkm_wo32(image, 0x033c, 0xffff0000); + nvkm_wo32(image, 0x03a0, 0x0fff0000); + nvkm_wo32(image, 0x03a4, 0x0fff0000); + nvkm_wo32(image, 0x047c, 0x00000101); + nvkm_wo32(image, 0x0490, 0x00000111); + nvkm_wo32(image, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) - nv_wo32(chan, i, 0x00030303); + nvkm_wo32(image, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) - nv_wo32(chan, i, 0x00080000); + nvkm_wo32(image, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) - nv_wo32(chan, i, 0x01012000); + nvkm_wo32(image, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) - nv_wo32(chan, i, 0x000105b8); + nvkm_wo32(image, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) - nv_wo32(chan, i, 0x00080008); + nvkm_wo32(image, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) - nv_wo32(chan, i, 0x07ff0000); - nv_wo32(chan, 0x05a4, 0x4b7fffff); - nv_wo32(chan, 0x05fc, 0x00000001); - nv_wo32(chan, 0x0604, 0x00004000); - nv_wo32(chan, 0x0610, 0x00000001); - nv_wo32(chan, 0x0618, 0x00040000); - nv_wo32(chan, 0x061c, 0x00010000); + nvkm_wo32(image, i, 0x07ff0000); + nvkm_wo32(image, 0x05a4, 0x4b7fffff); + nvkm_wo32(image, 0x05fc, 0x00000001); + nvkm_wo32(image, 0x0604, 0x00004000); + nvkm_wo32(image, 0x0610, 0x00000001); + nvkm_wo32(image, 0x0618, 0x00040000); + nvkm_wo32(image, 0x061c, 0x00010000); for (i = 0x1c1c; i <= 0x248c; i += 16) { - nv_wo32(chan, (i + 0), 0x10700ff9); - nv_wo32(chan, (i + 4), 0x0436086c); - nv_wo32(chan, (i + 8), 0x000c001b); + nvkm_wo32(image, (i + 0), 0x10700ff9); + nvkm_wo32(image, (i + 4), 0x0436086c); + nvkm_wo32(image, (i + 8), 0x000c001b); } - nv_wo32(chan, 0x281c, 0x3f800000); - nv_wo32(chan, 0x2830, 0x3f800000); - nv_wo32(chan, 0x285c, 0x40000000); - nv_wo32(chan, 0x2860, 0x3f800000); - nv_wo32(chan, 0x2864, 0x3f000000); - nv_wo32(chan, 0x286c, 0x40000000); - nv_wo32(chan, 0x2870, 0x3f800000); - nv_wo32(chan, 0x2878, 0xbf800000); - nv_wo32(chan, 0x2880, 0xbf800000); - nv_wo32(chan, 0x34a4, 0x000fe000); - nv_wo32(chan, 0x3530, 0x000003f8); - nv_wo32(chan, 0x3540, 0x002fe000); + nvkm_wo32(image, 0x281c, 0x3f800000); + nvkm_wo32(image, 0x2830, 0x3f800000); + nvkm_wo32(image, 0x285c, 0x40000000); + nvkm_wo32(image, 0x2860, 0x3f800000); + nvkm_wo32(image, 0x2864, 0x3f000000); + nvkm_wo32(image, 0x286c, 0x40000000); + nvkm_wo32(image, 0x2870, 0x3f800000); + nvkm_wo32(image, 0x2878, 0xbf800000); + nvkm_wo32(image, 0x2880, 0xbf800000); + nvkm_wo32(image, 0x34a4, 0x000fe000); + nvkm_wo32(image, 0x3530, 0x000003f8); + nvkm_wo32(image, 0x3540, 0x002fe000); for (i = 0x355c; i <= 0x3578; i += 4) - nv_wo32(chan, i, 0x001c527c); + nvkm_wo32(image, i, 0x001c527c); + nvkm_done(image); return 0; } @@ -109,7 +113,9 @@ nv20_gr_context_init(struct nvkm_object *object) if (ret) return ret; - nv_wo32(gr->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4); + nvkm_kmap(gr->ctxtab); + nvkm_wo32(gr->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4); + nvkm_done(gr->ctxtab); return 0; } @@ -136,7 +142,9 @@ nv20_gr_context_fini(struct nvkm_object *object, bool suspend) } nvkm_mask(device, 0x400720, 0x00000001, 0x00000001); - nv_wo32(gr->ctxtab, chan->chid * 4, 0x00000000); + nvkm_kmap(gr->ctxtab); + nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000); + nvkm_done(gr->ctxtab); return nvkm_gr_context_fini(&chan->base, suspend); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c index 50bad48a3eab..1a186bd93f64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c @@ -37,6 +37,7 @@ nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv20_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724, @@ -46,60 +47,63 @@ nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; chan->chid = nvkm_fifo_chan(parent)->chid; + image = &chan->base.base.gpuobj; - nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); - nv_wo32(chan, 0x035c, 0xffff0000); - nv_wo32(chan, 0x03c0, 0x0fff0000); - nv_wo32(chan, 0x03c4, 0x0fff0000); - nv_wo32(chan, 0x049c, 0x00000101); - nv_wo32(chan, 0x04b0, 0x00000111); - nv_wo32(chan, 0x04c8, 0x00000080); - nv_wo32(chan, 0x04cc, 0xffff0000); - nv_wo32(chan, 0x04d0, 0x00000001); - nv_wo32(chan, 0x04e4, 0x44400000); - nv_wo32(chan, 0x04fc, 0x4b800000); + nvkm_kmap(image); + nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24)); + nvkm_wo32(image, 0x035c, 0xffff0000); + nvkm_wo32(image, 0x03c0, 0x0fff0000); + nvkm_wo32(image, 0x03c4, 0x0fff0000); + nvkm_wo32(image, 0x049c, 0x00000101); + nvkm_wo32(image, 0x04b0, 0x00000111); + nvkm_wo32(image, 0x04c8, 0x00000080); + nvkm_wo32(image, 0x04cc, 0xffff0000); + nvkm_wo32(image, 0x04d0, 0x00000001); + nvkm_wo32(image, 0x04e4, 0x44400000); + nvkm_wo32(image, 0x04fc, 0x4b800000); for (i = 0x0510; i <= 0x051c; i += 4) - nv_wo32(chan, i, 0x00030303); + nvkm_wo32(image, i, 0x00030303); for (i = 0x0530; i <= 0x053c; i += 4) - nv_wo32(chan, i, 0x00080000); + nvkm_wo32(image, i, 0x00080000); for (i = 0x0548; i <= 0x0554; i += 4) - nv_wo32(chan, i, 0x01012000); + nvkm_wo32(image, i, 0x01012000); for (i = 0x0558; i <= 0x0564; i += 4) - nv_wo32(chan, i, 0x000105b8); + nvkm_wo32(image, i, 0x000105b8); for (i = 0x0568; i <= 0x0574; i += 4) - nv_wo32(chan, i, 0x00080008); + nvkm_wo32(image, i, 0x00080008); for (i = 0x0598; i <= 0x05d4; i += 4) - nv_wo32(chan, i, 0x07ff0000); - nv_wo32(chan, 0x05e0, 0x4b7fffff); - nv_wo32(chan, 0x0620, 0x00000080); - nv_wo32(chan, 0x0624, 0x30201000); - nv_wo32(chan, 0x0628, 0x70605040); - nv_wo32(chan, 0x062c, 0xb0a09080); - nv_wo32(chan, 0x0630, 0xf0e0d0c0); - nv_wo32(chan, 0x0664, 0x00000001); - nv_wo32(chan, 0x066c, 0x00004000); - nv_wo32(chan, 0x0678, 0x00000001); - nv_wo32(chan, 0x0680, 0x00040000); - nv_wo32(chan, 0x0684, 0x00010000); + nvkm_wo32(image, i, 0x07ff0000); + nvkm_wo32(image, 0x05e0, 0x4b7fffff); + nvkm_wo32(image, 0x0620, 0x00000080); + nvkm_wo32(image, 0x0624, 0x30201000); + nvkm_wo32(image, 0x0628, 0x70605040); + nvkm_wo32(image, 0x062c, 0xb0a09080); + nvkm_wo32(image, 0x0630, 0xf0e0d0c0); + nvkm_wo32(image, 0x0664, 0x00000001); + nvkm_wo32(image, 0x066c, 0x00004000); + nvkm_wo32(image, 0x0678, 0x00000001); + nvkm_wo32(image, 0x0680, 0x00040000); + nvkm_wo32(image, 0x0684, 0x00010000); for (i = 0x1b04; i <= 0x2374; i += 16) { - nv_wo32(chan, (i + 0), 0x10700ff9); - nv_wo32(chan, (i + 4), 0x0436086c); - nv_wo32(chan, (i + 8), 0x000c001b); + nvkm_wo32(image, (i + 0), 0x10700ff9); + nvkm_wo32(image, (i + 4), 0x0436086c); + nvkm_wo32(image, (i + 8), 0x000c001b); } - nv_wo32(chan, 0x2704, 0x3f800000); - nv_wo32(chan, 0x2718, 0x3f800000); - nv_wo32(chan, 0x2744, 0x40000000); - nv_wo32(chan, 0x2748, 0x3f800000); - nv_wo32(chan, 0x274c, 0x3f000000); - nv_wo32(chan, 0x2754, 0x40000000); - nv_wo32(chan, 0x2758, 0x3f800000); - nv_wo32(chan, 0x2760, 0xbf800000); - nv_wo32(chan, 0x2768, 0xbf800000); - nv_wo32(chan, 0x308c, 0x000fe000); - nv_wo32(chan, 0x3108, 0x000003f8); - nv_wo32(chan, 0x3468, 0x002fe000); + nvkm_wo32(image, 0x2704, 0x3f800000); + nvkm_wo32(image, 0x2718, 0x3f800000); + nvkm_wo32(image, 0x2744, 0x40000000); + nvkm_wo32(image, 0x2748, 0x3f800000); + nvkm_wo32(image, 0x274c, 0x3f000000); + nvkm_wo32(image, 0x2754, 0x40000000); + nvkm_wo32(image, 0x2758, 0x3f800000); + nvkm_wo32(image, 0x2760, 0xbf800000); + nvkm_wo32(image, 0x2768, 0xbf800000); + nvkm_wo32(image, 0x308c, 0x000fe000); + nvkm_wo32(image, 0x3108, 0x000003f8); + nvkm_wo32(image, 0x3468, 0x002fe000); for (i = 0x3484; i <= 0x34a0; i += 4) - nv_wo32(chan, i, 0x001c527c); + nvkm_wo32(image, i, 0x001c527c); + nvkm_done(image); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c index 5c8ae50ee8e7..dfb62dc6b3b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c @@ -13,6 +13,7 @@ nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv20_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0, @@ -22,51 +23,54 @@ nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; chan->chid = nvkm_fifo_chan(parent)->chid; + image = &chan->base.base.gpuobj; - nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); - nv_wo32(chan, 0x033c, 0xffff0000); - nv_wo32(chan, 0x03a0, 0x0fff0000); - nv_wo32(chan, 0x03a4, 0x0fff0000); - nv_wo32(chan, 0x047c, 0x00000101); - nv_wo32(chan, 0x0490, 0x00000111); - nv_wo32(chan, 0x04a8, 0x44400000); + nvkm_kmap(image); + nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24)); + nvkm_wo32(image, 0x033c, 0xffff0000); + nvkm_wo32(image, 0x03a0, 0x0fff0000); + nvkm_wo32(image, 0x03a4, 0x0fff0000); + nvkm_wo32(image, 0x047c, 0x00000101); + nvkm_wo32(image, 0x0490, 0x00000111); + nvkm_wo32(image, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) - nv_wo32(chan, i, 0x00030303); + nvkm_wo32(image, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) - nv_wo32(chan, i, 0x00080000); + nvkm_wo32(image, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) - nv_wo32(chan, i, 0x01012000); + nvkm_wo32(image, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) - nv_wo32(chan, i, 0x000105b8); + nvkm_wo32(image, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) - nv_wo32(chan, i, 0x00080008); + nvkm_wo32(image, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) - nv_wo32(chan, i, 0x07ff0000); - nv_wo32(chan, 0x05a4, 0x4b7fffff); - nv_wo32(chan, 0x05fc, 0x00000001); - nv_wo32(chan, 0x0604, 0x00004000); - nv_wo32(chan, 0x0610, 0x00000001); - nv_wo32(chan, 0x0618, 0x00040000); - nv_wo32(chan, 0x061c, 0x00010000); + nvkm_wo32(image, i, 0x07ff0000); + nvkm_wo32(image, 0x05a4, 0x4b7fffff); + nvkm_wo32(image, 0x05fc, 0x00000001); + nvkm_wo32(image, 0x0604, 0x00004000); + nvkm_wo32(image, 0x0610, 0x00000001); + nvkm_wo32(image, 0x0618, 0x00040000); + nvkm_wo32(image, 0x061c, 0x00010000); for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ - nv_wo32(chan, (i + 0), 0x10700ff9); - nv_wo32(chan, (i + 4), 0x0436086c); - nv_wo32(chan, (i + 8), 0x000c001b); + nvkm_wo32(image, (i + 0), 0x10700ff9); + nvkm_wo32(image, (i + 4), 0x0436086c); + nvkm_wo32(image, (i + 8), 0x000c001b); } - nv_wo32(chan, 0x269c, 0x3f800000); - nv_wo32(chan, 0x26b0, 0x3f800000); - nv_wo32(chan, 0x26dc, 0x40000000); - nv_wo32(chan, 0x26e0, 0x3f800000); - nv_wo32(chan, 0x26e4, 0x3f000000); - nv_wo32(chan, 0x26ec, 0x40000000); - nv_wo32(chan, 0x26f0, 0x3f800000); - nv_wo32(chan, 0x26f8, 0xbf800000); - nv_wo32(chan, 0x2700, 0xbf800000); - nv_wo32(chan, 0x3024, 0x000fe000); - nv_wo32(chan, 0x30a0, 0x000003f8); - nv_wo32(chan, 0x33fc, 0x002fe000); + nvkm_wo32(image, 0x269c, 0x3f800000); + nvkm_wo32(image, 0x26b0, 0x3f800000); + nvkm_wo32(image, 0x26dc, 0x40000000); + nvkm_wo32(image, 0x26e0, 0x3f800000); + nvkm_wo32(image, 0x26e4, 0x3f000000); + nvkm_wo32(image, 0x26ec, 0x40000000); + nvkm_wo32(image, 0x26f0, 0x3f800000); + nvkm_wo32(image, 0x26f8, 0xbf800000); + nvkm_wo32(image, 0x2700, 0xbf800000); + nvkm_wo32(image, 0x3024, 0x000fe000); + nvkm_wo32(image, 0x30a0, 0x000003f8); + nvkm_wo32(image, 0x33fc, 0x002fe000); for (i = 0x341c; i <= 0x3438; i += 4) - nv_wo32(chan, i, 0x001c527c); + nvkm_wo32(image, i, 0x001c527c); + nvkm_done(image); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c index 8be77b4f15ad..51573736bb48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c @@ -40,6 +40,7 @@ nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv20_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48, @@ -49,59 +50,62 @@ nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; chan->chid = nvkm_fifo_chan(parent)->chid; - - nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); - nv_wo32(chan, 0x0410, 0x00000101); - nv_wo32(chan, 0x0424, 0x00000111); - nv_wo32(chan, 0x0428, 0x00000060); - nv_wo32(chan, 0x0444, 0x00000080); - nv_wo32(chan, 0x0448, 0xffff0000); - nv_wo32(chan, 0x044c, 0x00000001); - nv_wo32(chan, 0x0460, 0x44400000); - nv_wo32(chan, 0x048c, 0xffff0000); + image = &chan->base.base.gpuobj; + + nvkm_kmap(image); + nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24)); + nvkm_wo32(image, 0x0410, 0x00000101); + nvkm_wo32(image, 0x0424, 0x00000111); + nvkm_wo32(image, 0x0428, 0x00000060); + nvkm_wo32(image, 0x0444, 0x00000080); + nvkm_wo32(image, 0x0448, 0xffff0000); + nvkm_wo32(image, 0x044c, 0x00000001); + nvkm_wo32(image, 0x0460, 0x44400000); + nvkm_wo32(image, 0x048c, 0xffff0000); for (i = 0x04e0; i < 0x04e8; i += 4) - nv_wo32(chan, i, 0x0fff0000); - nv_wo32(chan, 0x04ec, 0x00011100); + nvkm_wo32(image, i, 0x0fff0000); + nvkm_wo32(image, 0x04ec, 0x00011100); for (i = 0x0508; i < 0x0548; i += 4) - nv_wo32(chan, i, 0x07ff0000); - nv_wo32(chan, 0x0550, 0x4b7fffff); - nv_wo32(chan, 0x058c, 0x00000080); - nv_wo32(chan, 0x0590, 0x30201000); - nv_wo32(chan, 0x0594, 0x70605040); - nv_wo32(chan, 0x0598, 0xb8a89888); - nv_wo32(chan, 0x059c, 0xf8e8d8c8); - nv_wo32(chan, 0x05b0, 0xb0000000); + nvkm_wo32(image, i, 0x07ff0000); + nvkm_wo32(image, 0x0550, 0x4b7fffff); + nvkm_wo32(image, 0x058c, 0x00000080); + nvkm_wo32(image, 0x0590, 0x30201000); + nvkm_wo32(image, 0x0594, 0x70605040); + nvkm_wo32(image, 0x0598, 0xb8a89888); + nvkm_wo32(image, 0x059c, 0xf8e8d8c8); + nvkm_wo32(image, 0x05b0, 0xb0000000); for (i = 0x0600; i < 0x0640; i += 4) - nv_wo32(chan, i, 0x00010588); + nvkm_wo32(image, i, 0x00010588); for (i = 0x0640; i < 0x0680; i += 4) - nv_wo32(chan, i, 0x00030303); + nvkm_wo32(image, i, 0x00030303); for (i = 0x06c0; i < 0x0700; i += 4) - nv_wo32(chan, i, 0x0008aae4); + nvkm_wo32(image, i, 0x0008aae4); for (i = 0x0700; i < 0x0740; i += 4) - nv_wo32(chan, i, 0x01012000); + nvkm_wo32(image, i, 0x01012000); for (i = 0x0740; i < 0x0780; i += 4) - nv_wo32(chan, i, 0x00080008); - nv_wo32(chan, 0x085c, 0x00040000); - nv_wo32(chan, 0x0860, 0x00010000); + nvkm_wo32(image, i, 0x00080008); + nvkm_wo32(image, 0x085c, 0x00040000); + nvkm_wo32(image, 0x0860, 0x00010000); for (i = 0x0864; i < 0x0874; i += 4) - nv_wo32(chan, i, 0x00040004); + nvkm_wo32(image, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { - nv_wo32(chan, i + 0, 0x10700ff9); - nv_wo32(chan, i + 1, 0x0436086c); - nv_wo32(chan, i + 2, 0x000c001b); + nvkm_wo32(image, i + 0, 0x10700ff9); + nvkm_wo32(image, i + 1, 0x0436086c); + nvkm_wo32(image, i + 2, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) - nv_wo32(chan, i, 0x0000ffff); - nv_wo32(chan, 0x344c, 0x3f800000); - nv_wo32(chan, 0x3808, 0x3f800000); - nv_wo32(chan, 0x381c, 0x3f800000); - nv_wo32(chan, 0x3848, 0x40000000); - nv_wo32(chan, 0x384c, 0x3f800000); - nv_wo32(chan, 0x3850, 0x3f000000); - nv_wo32(chan, 0x3858, 0x40000000); - nv_wo32(chan, 0x385c, 0x3f800000); - nv_wo32(chan, 0x3864, 0xbf800000); - nv_wo32(chan, 0x386c, 0xbf800000); + nvkm_wo32(image, i, 0x0000ffff); + nvkm_wo32(image, 0x344c, 0x3f800000); + nvkm_wo32(image, 0x3808, 0x3f800000); + nvkm_wo32(image, 0x381c, 0x3f800000); + nvkm_wo32(image, 0x3848, 0x40000000); + nvkm_wo32(image, 0x384c, 0x3f800000); + nvkm_wo32(image, 0x3850, 0x3f000000); + nvkm_wo32(image, 0x3858, 0x40000000); + nvkm_wo32(image, 0x385c, 0x3f800000); + nvkm_wo32(image, 0x3864, 0xbf800000); + nvkm_wo32(image, 0x386c, 0xbf800000); + nvkm_done(image); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c index e17eb0b13277..f9d71185ee74 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c @@ -39,6 +39,7 @@ nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv20_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc, @@ -48,59 +49,62 @@ nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; chan->chid = nvkm_fifo_chan(parent)->chid; + image = &chan->base.base.gpuobj; - nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); - nv_wo32(chan, 0x040c, 0x01000101); - nv_wo32(chan, 0x0420, 0x00000111); - nv_wo32(chan, 0x0424, 0x00000060); - nv_wo32(chan, 0x0440, 0x00000080); - nv_wo32(chan, 0x0444, 0xffff0000); - nv_wo32(chan, 0x0448, 0x00000001); - nv_wo32(chan, 0x045c, 0x44400000); - nv_wo32(chan, 0x0480, 0xffff0000); + nvkm_kmap(image); + nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24)); + nvkm_wo32(image, 0x040c, 0x01000101); + nvkm_wo32(image, 0x0420, 0x00000111); + nvkm_wo32(image, 0x0424, 0x00000060); + nvkm_wo32(image, 0x0440, 0x00000080); + nvkm_wo32(image, 0x0444, 0xffff0000); + nvkm_wo32(image, 0x0448, 0x00000001); + nvkm_wo32(image, 0x045c, 0x44400000); + nvkm_wo32(image, 0x0480, 0xffff0000); for (i = 0x04d4; i < 0x04dc; i += 4) - nv_wo32(chan, i, 0x0fff0000); - nv_wo32(chan, 0x04e0, 0x00011100); + nvkm_wo32(image, i, 0x0fff0000); + nvkm_wo32(image, 0x04e0, 0x00011100); for (i = 0x04fc; i < 0x053c; i += 4) - nv_wo32(chan, i, 0x07ff0000); - nv_wo32(chan, 0x0544, 0x4b7fffff); - nv_wo32(chan, 0x057c, 0x00000080); - nv_wo32(chan, 0x0580, 0x30201000); - nv_wo32(chan, 0x0584, 0x70605040); - nv_wo32(chan, 0x0588, 0xb8a89888); - nv_wo32(chan, 0x058c, 0xf8e8d8c8); - nv_wo32(chan, 0x05a0, 0xb0000000); + nvkm_wo32(image, i, 0x07ff0000); + nvkm_wo32(image, 0x0544, 0x4b7fffff); + nvkm_wo32(image, 0x057c, 0x00000080); + nvkm_wo32(image, 0x0580, 0x30201000); + nvkm_wo32(image, 0x0584, 0x70605040); + nvkm_wo32(image, 0x0588, 0xb8a89888); + nvkm_wo32(image, 0x058c, 0xf8e8d8c8); + nvkm_wo32(image, 0x05a0, 0xb0000000); for (i = 0x05f0; i < 0x0630; i += 4) - nv_wo32(chan, i, 0x00010588); + nvkm_wo32(image, i, 0x00010588); for (i = 0x0630; i < 0x0670; i += 4) - nv_wo32(chan, i, 0x00030303); + nvkm_wo32(image, i, 0x00030303); for (i = 0x06b0; i < 0x06f0; i += 4) - nv_wo32(chan, i, 0x0008aae4); + nvkm_wo32(image, i, 0x0008aae4); for (i = 0x06f0; i < 0x0730; i += 4) - nv_wo32(chan, i, 0x01012000); + nvkm_wo32(image, i, 0x01012000); for (i = 0x0730; i < 0x0770; i += 4) - nv_wo32(chan, i, 0x00080008); - nv_wo32(chan, 0x0850, 0x00040000); - nv_wo32(chan, 0x0854, 0x00010000); + nvkm_wo32(image, i, 0x00080008); + nvkm_wo32(image, 0x0850, 0x00040000); + nvkm_wo32(image, 0x0854, 0x00010000); for (i = 0x0858; i < 0x0868; i += 4) - nv_wo32(chan, i, 0x00040004); + nvkm_wo32(image, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { - nv_wo32(chan, i + 0, 0x10700ff9); - nv_wo32(chan, i + 1, 0x0436086c); - nv_wo32(chan, i + 2, 0x000c001b); + nvkm_wo32(image, i + 0, 0x10700ff9); + nvkm_wo32(image, i + 1, 0x0436086c); + nvkm_wo32(image, i + 2, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) - nv_wo32(chan, i, 0x0000ffff); - nv_wo32(chan, 0x2ae0, 0x3f800000); - nv_wo32(chan, 0x2e9c, 0x3f800000); - nv_wo32(chan, 0x2eb0, 0x3f800000); - nv_wo32(chan, 0x2edc, 0x40000000); - nv_wo32(chan, 0x2ee0, 0x3f800000); - nv_wo32(chan, 0x2ee4, 0x3f000000); - nv_wo32(chan, 0x2eec, 0x40000000); - nv_wo32(chan, 0x2ef0, 0x3f800000); - nv_wo32(chan, 0x2ef8, 0xbf800000); - nv_wo32(chan, 0x2f00, 0xbf800000); + nvkm_wo32(image, i, 0x0000ffff); + nvkm_wo32(image, 0x2ae0, 0x3f800000); + nvkm_wo32(image, 0x2e9c, 0x3f800000); + nvkm_wo32(image, 0x2eb0, 0x3f800000); + nvkm_wo32(image, 0x2edc, 0x40000000); + nvkm_wo32(image, 0x2ee0, 0x3f800000); + nvkm_wo32(image, 0x2ee4, 0x3f000000); + nvkm_wo32(image, 0x2eec, 0x40000000); + nvkm_wo32(image, 0x2ef0, 0x3f800000); + nvkm_wo32(image, 0x2ef8, 0xbf800000); + nvkm_wo32(image, 0x2f00, 0xbf800000); + nvkm_done(image); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c index 35ba75130f93..c6357f2fdb36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c @@ -39,6 +39,7 @@ nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv20_gr_chan *chan; + struct nvkm_gpuobj *image; int ret, i; ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c, @@ -48,59 +49,62 @@ nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; chan->chid = nvkm_fifo_chan(parent)->chid; + image = &chan->base.base.gpuobj; - nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); - nv_wo32(chan, 0x040c, 0x00000101); - nv_wo32(chan, 0x0420, 0x00000111); - nv_wo32(chan, 0x0424, 0x00000060); - nv_wo32(chan, 0x0440, 0x00000080); - nv_wo32(chan, 0x0444, 0xffff0000); - nv_wo32(chan, 0x0448, 0x00000001); - nv_wo32(chan, 0x045c, 0x44400000); - nv_wo32(chan, 0x0488, 0xffff0000); + nvkm_kmap(image); + nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24)); + nvkm_wo32(image, 0x040c, 0x00000101); + nvkm_wo32(image, 0x0420, 0x00000111); + nvkm_wo32(image, 0x0424, 0x00000060); + nvkm_wo32(image, 0x0440, 0x00000080); + nvkm_wo32(image, 0x0444, 0xffff0000); + nvkm_wo32(image, 0x0448, 0x00000001); + nvkm_wo32(image, 0x045c, 0x44400000); + nvkm_wo32(image, 0x0488, 0xffff0000); for (i = 0x04dc; i < 0x04e4; i += 4) - nv_wo32(chan, i, 0x0fff0000); - nv_wo32(chan, 0x04e8, 0x00011100); + nvkm_wo32(image, i, 0x0fff0000); + nvkm_wo32(image, 0x04e8, 0x00011100); for (i = 0x0504; i < 0x0544; i += 4) - nv_wo32(chan, i, 0x07ff0000); - nv_wo32(chan, 0x054c, 0x4b7fffff); - nv_wo32(chan, 0x0588, 0x00000080); - nv_wo32(chan, 0x058c, 0x30201000); - nv_wo32(chan, 0x0590, 0x70605040); - nv_wo32(chan, 0x0594, 0xb8a89888); - nv_wo32(chan, 0x0598, 0xf8e8d8c8); - nv_wo32(chan, 0x05ac, 0xb0000000); + nvkm_wo32(image, i, 0x07ff0000); + nvkm_wo32(image, 0x054c, 0x4b7fffff); + nvkm_wo32(image, 0x0588, 0x00000080); + nvkm_wo32(image, 0x058c, 0x30201000); + nvkm_wo32(image, 0x0590, 0x70605040); + nvkm_wo32(image, 0x0594, 0xb8a89888); + nvkm_wo32(image, 0x0598, 0xf8e8d8c8); + nvkm_wo32(image, 0x05ac, 0xb0000000); for (i = 0x0604; i < 0x0644; i += 4) - nv_wo32(chan, i, 0x00010588); + nvkm_wo32(image, i, 0x00010588); for (i = 0x0644; i < 0x0684; i += 4) - nv_wo32(chan, i, 0x00030303); + nvkm_wo32(image, i, 0x00030303); for (i = 0x06c4; i < 0x0704; i += 4) - nv_wo32(chan, i, 0x0008aae4); + nvkm_wo32(image, i, 0x0008aae4); for (i = 0x0704; i < 0x0744; i += 4) - nv_wo32(chan, i, 0x01012000); + nvkm_wo32(image, i, 0x01012000); for (i = 0x0744; i < 0x0784; i += 4) - nv_wo32(chan, i, 0x00080008); - nv_wo32(chan, 0x0860, 0x00040000); - nv_wo32(chan, 0x0864, 0x00010000); + nvkm_wo32(image, i, 0x00080008); + nvkm_wo32(image, 0x0860, 0x00040000); + nvkm_wo32(image, 0x0864, 0x00010000); for (i = 0x0868; i < 0x0878; i += 4) - nv_wo32(chan, i, 0x00040004); + nvkm_wo32(image, i, 0x00040004); for (i = 0x1f1c; i <= 0x308c ; i += 16) { - nv_wo32(chan, i + 0, 0x10700ff9); - nv_wo32(chan, i + 4, 0x0436086c); - nv_wo32(chan, i + 8, 0x000c001b); + nvkm_wo32(image, i + 0, 0x10700ff9); + nvkm_wo32(image, i + 4, 0x0436086c); + nvkm_wo32(image, i + 8, 0x000c001b); } for (i = 0x30bc; i < 0x30cc; i += 4) - nv_wo32(chan, i, 0x0000ffff); - nv_wo32(chan, 0x3450, 0x3f800000); - nv_wo32(chan, 0x380c, 0x3f800000); - nv_wo32(chan, 0x3820, 0x3f800000); - nv_wo32(chan, 0x384c, 0x40000000); - nv_wo32(chan, 0x3850, 0x3f800000); - nv_wo32(chan, 0x3854, 0x3f000000); - nv_wo32(chan, 0x385c, 0x40000000); - nv_wo32(chan, 0x3860, 0x3f800000); - nv_wo32(chan, 0x3868, 0xbf800000); - nv_wo32(chan, 0x3870, 0xbf800000); + nvkm_wo32(image, i, 0x0000ffff); + nvkm_wo32(image, 0x3450, 0x3f800000); + nvkm_wo32(image, 0x380c, 0x3f800000); + nvkm_wo32(image, 0x3820, 0x3f800000); + nvkm_wo32(image, 0x384c, 0x40000000); + nvkm_wo32(image, 0x3850, 0x3f800000); + nvkm_wo32(image, 0x3854, 0x3f000000); + nvkm_wo32(image, 0x385c, 0x40000000); + nvkm_wo32(image, 0x3860, 0x3f800000); + nvkm_wo32(image, 0x3868, 0xbf800000); + nvkm_wo32(image, 0x3870, 0xbf800000); + nvkm_done(image); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index 10ffb676e55e..1ebf2edef4d4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -63,14 +63,16 @@ nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - nv_wo32(obj, 0x00, nv_mclass(obj)); - nv_wo32(obj, 0x04, 0x00000000); - nv_wo32(obj, 0x08, 0x00000000); + nvkm_kmap(obj); + nvkm_wo32(obj, 0x00, nv_mclass(obj)); + nvkm_wo32(obj, 0x04, 0x00000000); + nvkm_wo32(obj, 0x08, 0x00000000); #ifdef __BIG_ENDIAN - nv_mo32(obj, 0x08, 0x01000000, 0x01000000); + nvkm_mo32(obj, 0x08, 0x01000000, 0x01000000); #endif - nv_wo32(obj, 0x0c, 0x00000000); - nv_wo32(obj, 0x10, 0x00000000); + nvkm_wo32(obj, 0x0c, 0x00000000); + nvkm_wo32(obj, 0x10, 0x00000000); + nvkm_done(obj); return 0; } @@ -146,7 +148,7 @@ nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; nv40_grctx_fill(nv_device(gr), nv_gpuobj(chan)); - nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4); + nvkm_wo32(&chan->base.base.gpuobj, 0x00000, nv_gpuobj(chan)->addr >> 4); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c index 4ea7f0938769..c50cfe4875ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c @@ -62,10 +62,12 @@ nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - nv_wo32(obj, 0x00, nv_mclass(obj)); - nv_wo32(obj, 0x04, 0x00000000); - nv_wo32(obj, 0x08, 0x00000000); - nv_wo32(obj, 0x0c, 0x00000000); + nvkm_kmap(obj); + nvkm_wo32(obj, 0x00, nv_mclass(obj)); + nvkm_wo32(obj, 0x04, 0x00000000); + nvkm_wo32(obj, 0x08, 0x00000000); + nvkm_wo32(obj, 0x0c, 0x00000000); + nvkm_done(obj); return 0; } |