diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2017-11-01 03:56:19 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2017-11-02 13:32:18 +1000 |
commit | 07bbc1c5f49b64323d9e5c1e0d5d7d201e1f2627 (patch) | |
tree | 8ef86f7972db2f92d01824787bc9a3c729346354 /drivers | |
parent | dde59b9c341a1b5ce2229f7d4a57f19435da82d7 (diff) |
drm/nouveau/core/memory: split info pointers from accessor pointers
The accessor functions can change as a result of acquire()/release() calls,
and are protected by any refcounting done there.
Other functions must remain constant, as they can be called any time.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/include/nvkm/core/memory.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 133 |
7 files changed, 155 insertions, 118 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h index 1d7ccde7cde8..acd07de3e4ee 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h @@ -9,7 +9,10 @@ struct nvkm_vm; #define NVOBJ_FLAG_HEAP 0x00000004 struct nvkm_gpuobj { - const struct nvkm_gpuobj_func *func; + union { + const struct nvkm_gpuobj_func *func; + const struct nvkm_gpuobj_func *ptrs; + }; struct nvkm_gpuobj *parent; struct nvkm_memory *memory; struct nvkm_mm_node *node; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h index 33ca6769266a..14ce7df9864e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h @@ -14,6 +14,7 @@ enum nvkm_memory_target { struct nvkm_memory { const struct nvkm_memory_func *func; + const struct nvkm_memory_ptrs *ptrs; }; struct nvkm_memory_func { @@ -24,9 +25,12 @@ struct nvkm_memory_func { void (*boot)(struct nvkm_memory *, struct nvkm_vm *); void __iomem *(*acquire)(struct nvkm_memory *); void (*release)(struct nvkm_memory *); + void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset); +}; + +struct nvkm_memory_ptrs { u32 (*rd32)(struct nvkm_memory *, u64 offset); void (*wr32)(struct nvkm_memory *, u64 offset, u32 data); - void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset); }; void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *); @@ -43,8 +47,8 @@ void nvkm_memory_del(struct nvkm_memory **); * macros to guarantee correct behaviour across all chipsets */ #define nvkm_kmap(o) (o)->func->acquire(o) -#define nvkm_ro32(o,a) (o)->func->rd32((o), (a)) -#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d)) +#define nvkm_ro32(o,a) (o)->ptrs->rd32((o), (a)) +#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d)) #define nvkm_mo32(o,a,m,d) ({ \ u32 _addr = (a), _data = nvkm_ro32((o), _addr); \ nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 372f01ba7766..a15125ed455d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -112,9 +112,13 @@ nvkm_instobj_func = { .size = nvkm_instobj_size, .acquire = nvkm_instobj_acquire, .release = nvkm_instobj_release, + .map = nvkm_instobj_map, +}; + +static const struct nvkm_memory_ptrs +nvkm_instobj_ptrs = { .rd32 = nvkm_instobj_rd32, .wr32 = nvkm_instobj_wr32, - .map = nvkm_instobj_map, }; static void @@ -137,8 +141,10 @@ nvkm_instobj_acquire_slow(struct nvkm_memory *memory) { struct nvkm_instobj *iobj = nvkm_instobj(memory); iobj->map = nvkm_kmap(iobj->parent); - if (iobj->map) + if (iobj->map) { memory->func = &nvkm_instobj_func; + memory->ptrs = &nvkm_instobj_ptrs; + } return iobj->map; } @@ -165,9 +171,13 @@ nvkm_instobj_func_slow = { .boot = nvkm_instobj_boot, .acquire = nvkm_instobj_acquire_slow, .release = nvkm_instobj_release_slow, + .map = nvkm_instobj_map, +}; + +static const struct nvkm_memory_ptrs +nvkm_instobj_ptrs_slow = { .rd32 = nvkm_instobj_rd32_slow, .wr32 = nvkm_instobj_wr32_slow, - .map = nvkm_instobj_map, }; int @@ -196,6 +206,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, } nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); + iobj->memory.ptrs = &nvkm_instobj_ptrs_slow; iobj->parent = memory; iobj->imem = imem; spin_lock(&iobj->imem->lock); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index cd5adbec5e57..41bf8770cfd7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -346,8 +346,6 @@ gk20a_instobj_func_dma = { .size = gk20a_instobj_size, .acquire = gk20a_instobj_acquire_dma, .release = gk20a_instobj_release_dma, - .rd32 = gk20a_instobj_rd32, - .wr32 = gk20a_instobj_wr32, .map = gk20a_instobj_map, }; @@ -359,9 +357,13 @@ gk20a_instobj_func_iommu = { .size = gk20a_instobj_size, .acquire = gk20a_instobj_acquire_iommu, .release = gk20a_instobj_release_iommu, + .map = gk20a_instobj_map, +}; + +static const struct nvkm_memory_ptrs +gk20a_instobj_ptrs = { .rd32 = gk20a_instobj_rd32, .wr32 = gk20a_instobj_wr32, - .map = gk20a_instobj_map, }; static int @@ -377,6 +379,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, *_node = &node->base; nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); + node->base.memory.ptrs = &gk20a_instobj_ptrs; node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, @@ -424,6 +427,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, node->dma_addrs = (void *)(node->pages + npages); nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); + node->base.memory.ptrs = &gk20a_instobj_ptrs; /* Allocate backing memory */ for (i = 0; i < npages; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c index 6133c8bb2d42..d4176cc67708 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -43,22 +43,31 @@ struct nv04_instobj { struct nvkm_mm_node *node; }; -static enum nvkm_memory_target -nv04_instobj_target(struct nvkm_memory *memory) +static void +nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) { - return NVKM_MEM_TARGET_INST; + struct nv04_instobj *iobj = nv04_instobj(memory); + struct nvkm_device *device = iobj->imem->base.subdev.device; + nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data); } -static u64 -nv04_instobj_addr(struct nvkm_memory *memory) +static u32 +nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) { - return nv04_instobj(memory)->node->offset; + struct nv04_instobj *iobj = nv04_instobj(memory); + struct nvkm_device *device = iobj->imem->base.subdev.device; + return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset); } -static u64 -nv04_instobj_size(struct nvkm_memory *memory) +static const struct nvkm_memory_ptrs +nv04_instobj_ptrs = { + .rd32 = nv04_instobj_rd32, + .wr32 = nv04_instobj_wr32, +}; + +static void +nv04_instobj_release(struct nvkm_memory *memory) { - return nv04_instobj(memory)->node->length; } static void __iomem * @@ -69,25 +78,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory) return device->pri + 0x700000 + iobj->node->offset; } -static void -nv04_instobj_release(struct nvkm_memory *memory) +static u64 +nv04_instobj_size(struct nvkm_memory *memory) { + return nv04_instobj(memory)->node->length; } -static u32 -nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) +static u64 +nv04_instobj_addr(struct nvkm_memory *memory) { - struct nv04_instobj *iobj = nv04_instobj(memory); - struct nvkm_device *device = iobj->imem->base.subdev.device; - return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset); + return nv04_instobj(memory)->node->offset; } -static void -nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +static enum nvkm_memory_target +nv04_instobj_target(struct nvkm_memory *memory) { - struct nv04_instobj *iobj = nv04_instobj(memory); - struct nvkm_device *device = iobj->imem->base.subdev.device; - nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data); + return NVKM_MEM_TARGET_INST; } static void * @@ -108,8 +114,6 @@ nv04_instobj_func = { .addr = nv04_instobj_addr, .acquire = nv04_instobj_acquire, .release = nv04_instobj_release, - .rd32 = nv04_instobj_rd32, - .wr32 = nv04_instobj_wr32, }; static int @@ -125,6 +129,7 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, *pmemory = &iobj->memory; nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory); + iobj->memory.ptrs = &nv04_instobj_ptrs; iobj->imem = imem; mutex_lock(&imem->base.subdev.mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c index c0543875e490..5fa2b7c0decc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c @@ -45,22 +45,29 @@ struct nv40_instobj { struct nvkm_mm_node *node; }; -static enum nvkm_memory_target -nv40_instobj_target(struct nvkm_memory *memory) +static void +nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) { - return NVKM_MEM_TARGET_INST; + struct nv40_instobj *iobj = nv40_instobj(memory); + iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset); } -static u64 -nv40_instobj_addr(struct nvkm_memory *memory) +static u32 +nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) { - return nv40_instobj(memory)->node->offset; + struct nv40_instobj *iobj = nv40_instobj(memory); + return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset); } -static u64 -nv40_instobj_size(struct nvkm_memory *memory) +static const struct nvkm_memory_ptrs +nv40_instobj_ptrs = { + .rd32 = nv40_instobj_rd32, + .wr32 = nv40_instobj_wr32, +}; + +static void +nv40_instobj_release(struct nvkm_memory *memory) { - return nv40_instobj(memory)->node->length; } static void __iomem * @@ -70,23 +77,22 @@ nv40_instobj_acquire(struct nvkm_memory *memory) return iobj->imem->iomem + iobj->node->offset; } -static void -nv40_instobj_release(struct nvkm_memory *memory) +static u64 +nv40_instobj_size(struct nvkm_memory *memory) { + return nv40_instobj(memory)->node->length; } -static u32 -nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) +static u64 +nv40_instobj_addr(struct nvkm_memory *memory) { - struct nv40_instobj *iobj = nv40_instobj(memory); - return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset); + return nv40_instobj(memory)->node->offset; } -static void -nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +static enum nvkm_memory_target +nv40_instobj_target(struct nvkm_memory *memory) { - struct nv40_instobj *iobj = nv40_instobj(memory); - iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset); + return NVKM_MEM_TARGET_INST; } static void * @@ -107,8 +113,6 @@ nv40_instobj_func = { .addr = nv40_instobj_addr, .acquire = nv40_instobj_acquire, .release = nv40_instobj_release, - .rd32 = nv40_instobj_rd32, - .wr32 = nv40_instobj_wr32, }; static int @@ -124,6 +128,7 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, *pmemory = &iobj->memory; nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory); + iobj->memory.ptrs = &nv40_instobj_ptrs; iobj->imem = imem; mutex_lock(&imem->base.subdev.mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index a3cd3e193d03..e3273aed3381 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -49,50 +49,51 @@ struct nv50_instobj { void *map; }; -static enum nvkm_memory_target -nv50_instobj_target(struct nvkm_memory *memory) +static void +nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) { - return NVKM_MEM_TARGET_VRAM; -} + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nv50_instmem *imem = iobj->imem; + struct nvkm_device *device = imem->base.subdev.device; + u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; + u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; -static u64 -nv50_instobj_addr(struct nvkm_memory *memory) -{ - return nv50_instobj(memory)->mem->offset; + if (unlikely(imem->addr != base)) { + nvkm_wr32(device, 0x001700, base >> 16); + imem->addr = base; + } + nvkm_wr32(device, 0x700000 + addr, data); } -static u64 -nv50_instobj_size(struct nvkm_memory *memory) +static u32 +nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) { - return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT; + struct nv50_instobj *iobj = nv50_instobj(memory); + struct nv50_instmem *imem = iobj->imem; + struct nvkm_device *device = imem->base.subdev.device; + u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; + u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; + u32 data; + + if (unlikely(imem->addr != base)) { + nvkm_wr32(device, 0x001700, base >> 16); + imem->addr = base; + } + data = nvkm_rd32(device, 0x700000 + addr); + return data; } +static const struct nvkm_memory_ptrs +nv50_instobj_slow = { + .rd32 = nv50_instobj_rd32_slow, + .wr32 = nv50_instobj_wr32_slow, +}; + static void -nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) +nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) { struct nv50_instobj *iobj = nv50_instobj(memory); - struct nvkm_subdev *subdev = &iobj->imem->base.subdev; - struct nvkm_device *device = subdev->device; - u64 size = nvkm_memory_size(memory); - void __iomem *map; - int ret; - - iobj->map = ERR_PTR(-ENOMEM); - - ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar); - if (ret == 0) { - map = ioremap(device->func->resource_addr(device, 3) + - (u32)iobj->bar.offset, size); - if (map) { - nvkm_memory_map(memory, &iobj->bar, 0); - iobj->map = map; - } else { - nvkm_warn(subdev, "PRAMIN ioremap failed\n"); - nvkm_vm_put(&iobj->bar); - } - } else { - nvkm_warn(subdev, "PRAMIN exhausted\n"); - } + nvkm_vm_map_at(vma, offset, iobj->mem); } static void @@ -120,45 +121,50 @@ nv50_instobj_acquire(struct nvkm_memory *memory) return NULL; } -static u32 -nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) +static void +nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) { struct nv50_instobj *iobj = nv50_instobj(memory); - struct nv50_instmem *imem = iobj->imem; - struct nvkm_device *device = imem->base.subdev.device; - u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; - u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; - u32 data; + struct nvkm_subdev *subdev = &iobj->imem->base.subdev; + struct nvkm_device *device = subdev->device; + u64 size = nvkm_memory_size(memory); + void __iomem *map; + int ret; - if (unlikely(imem->addr != base)) { - nvkm_wr32(device, 0x001700, base >> 16); - imem->addr = base; + iobj->map = ERR_PTR(-ENOMEM); + + ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar); + if (ret == 0) { + map = ioremap(device->func->resource_addr(device, 3) + + (u32)iobj->bar.offset, size); + if (map) { + nvkm_memory_map(memory, &iobj->bar, 0); + iobj->map = map; + } else { + nvkm_warn(subdev, "PRAMIN ioremap failed\n"); + nvkm_vm_put(&iobj->bar); + } + } else { + nvkm_warn(subdev, "PRAMIN exhausted\n"); } - data = nvkm_rd32(device, 0x700000 + addr); - return data; } -static void -nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) +static u64 +nv50_instobj_size(struct nvkm_memory *memory) { - struct nv50_instobj *iobj = nv50_instobj(memory); - struct nv50_instmem *imem = iobj->imem; - struct nvkm_device *device = imem->base.subdev.device; - u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; - u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; + return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT; +} - if (unlikely(imem->addr != base)) { - nvkm_wr32(device, 0x001700, base >> 16); - imem->addr = base; - } - nvkm_wr32(device, 0x700000 + addr, data); +static u64 +nv50_instobj_addr(struct nvkm_memory *memory) +{ + return nv50_instobj(memory)->mem->offset; } -static void -nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) +static enum nvkm_memory_target +nv50_instobj_target(struct nvkm_memory *memory) { - struct nv50_instobj *iobj = nv50_instobj(memory); - nvkm_vm_map_at(vma, offset, iobj->mem); + return NVKM_MEM_TARGET_VRAM; } static void * @@ -183,8 +189,6 @@ nv50_instobj_func = { .boot = nv50_instobj_boot, .acquire = nv50_instobj_acquire, .release = nv50_instobj_release, - .rd32 = nv50_instobj_rd32, - .wr32 = nv50_instobj_wr32, .map = nv50_instobj_map, }; @@ -202,6 +206,7 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, *pmemory = &iobj->memory; nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory); + iobj->memory.ptrs = &nv50_instobj_slow; iobj->imem = imem; size = max((size + 4095) & ~4095, (u32)4096); |