diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2015-01-14 09:57:36 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2015-01-22 12:17:43 +1000 |
commit | 5ce3bf3c72436c49fbd9a5b71d7d278665f4bf55 (patch) | |
tree | f2aaba0aa92d945db18b68c93f53723ab4659807 /drivers/gpu/drm/nouveau/nvkm/subdev/mmu | |
parent | ebb58dc2ef8c62d1affa28160f57faa7b0e1dc02 (diff) |
drm/nouveau/mmu: rename from vmmgr (no binary change)
Switch to NVIDIA's name for the device.
The namespace of NVKM is being changed to nvkm_ instead of nouveau_,
which will be used for the DRM part of the driver. This is being
done in order to make it very clear as to what part of the driver a
given symbol belongs to, and as a minor step towards splitting the
DRM driver out to be able to stand on its own (for virt).
Because there's already a large amount of churn here anyway, this is
as good a time as any to also switch to NVIDIA's device and chipset
naming to ease collaboration with them.
A comparison of objdump disassemblies proves no code changes.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 483 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c | 151 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c | 159 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c | 249 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c | 240 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nvc0.c | 241 |
8 files changed, 1548 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild new file mode 100644 index 000000000000..729e27f2cbd4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -0,0 +1,6 @@ +nvkm-y += nvkm/subdev/mmu/base.o +nvkm-y += nvkm/subdev/mmu/nv04.o +nvkm-y += nvkm/subdev/mmu/nv41.o +nvkm-y += nvkm/subdev/mmu/nv44.o +nvkm-y += nvkm/subdev/mmu/nv50.o +nvkm-y += nvkm/subdev/mmu/nvc0.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c new file mode 100644 index 000000000000..e3cb186c440b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -0,0 +1,483 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/mm.h> + +#include <subdev/fb.h> +#include <subdev/mmu.h> + +void +nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mmu *mmu = vm->mmu; + struct nouveau_mm_node *r; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + u32 end, len; + + delta = 0; + list_for_each_entry(r, &node->regions, rl_entry) { + u64 phys = (u64)r->offset << 12; + u32 num = r->length >> bits; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + mmu->map(vma, pgt, node, pte, len, phys, delta); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + phys += len << (bits + 12); + pde++; + pte = 0; + } + + delta += (u64)len << vma->node->type; + } + } + + mmu->flush(vm); +} + +static void +nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, + struct nouveau_mem *mem) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mmu *mmu = vm->mmu; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + unsigned m, sglen; + u32 end, len; + int i; + struct scatterlist *sg; + + for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + sglen = sg_dma_len(sg) >> PAGE_SHIFT; + + end = pte + sglen; + if (unlikely(end >= max)) + end = max; + len = end - pte; + + for (m = 0; m < len; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + + mmu->map_sg(vma, pgt, mem, pte, 1, &addr); + num--; + pte++; + + if (num == 0) + goto finish; + } + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + if (m < sglen) { + for (; m < sglen; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + + mmu->map_sg(vma, pgt, mem, pte, 1, &addr); + num--; + pte++; + if (num == 0) + goto finish; + } + } + + } +finish: + mmu->flush(vm); +} + +static void +nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, + struct nouveau_mem *mem) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mmu *mmu = vm->mmu; + dma_addr_t *list = mem->pages; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + mmu->map_sg(vma, pgt, mem, pte, len, list); + + num -= len; + pte += len; + list += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + mmu->flush(vm); +} + +void +nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) +{ + if (node->sg) + nouveau_vm_map_sg_table(vma, 0, node->size << 12, node); + else + if (node->pages) + nouveau_vm_map_sg(vma, 0, node->size << 12, node); + else + nouveau_vm_map_at(vma, 0, node); +} + +void +nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mmu *mmu = vm->mmu; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + mmu->unmap(pgt, pte, len); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + mmu->flush(vm); +} + +void +nouveau_vm_unmap(struct nouveau_vma *vma) +{ + nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); +} + +static void +nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) +{ + struct nouveau_mmu *mmu = vm->mmu; + struct nouveau_vm_pgd *vpgd; + struct nouveau_vm_pgt *vpgt; + struct nouveau_gpuobj *pgt; + u32 pde; + + for (pde = fpde; pde <= lpde; pde++) { + vpgt = &vm->pgt[pde - vm->fpde]; + if (--vpgt->refcount[big]) + continue; + + pgt = vpgt->obj[big]; + vpgt->obj[big] = NULL; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + mmu->map_pgt(vpgd->obj, pde, vpgt->obj); + } + + mutex_unlock(&nv_subdev(mmu)->mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&nv_subdev(mmu)->mutex); + } +} + +static int +nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) +{ + struct nouveau_mmu *mmu = vm->mmu; + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nouveau_vm_pgd *vpgd; + struct nouveau_gpuobj *pgt; + int big = (type != mmu->spg_shift); + u32 pgt_size; + int ret; + + pgt_size = (1 << (mmu->pgt_bits + 12)) >> type; + pgt_size *= 8; + + mutex_unlock(&nv_subdev(mmu)->mutex); + ret = nouveau_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &pgt); + mutex_lock(&nv_subdev(mmu)->mutex); + if (unlikely(ret)) + return ret; + + /* someone beat us to filling the PDE while we didn't have the lock */ + if (unlikely(vpgt->refcount[big]++)) { + mutex_unlock(&nv_subdev(mmu)->mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&nv_subdev(mmu)->mutex); + return 0; + } + + vpgt->obj[big] = pgt; + list_for_each_entry(vpgd, &vm->pgd_list, head) { + mmu->map_pgt(vpgd->obj, pde, vpgt->obj); + } + + return 0; +} + +int +nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *vma) +{ + struct nouveau_mmu *mmu = vm->mmu; + u32 align = (1 << page_shift) >> 12; + u32 msize = size >> 12; + u32 fpde, lpde, pde; + int ret; + + mutex_lock(&nv_subdev(mmu)->mutex); + ret = nouveau_mm_head(&vm->mm, 0, page_shift, msize, msize, align, + &vma->node); + if (unlikely(ret != 0)) { + mutex_unlock(&nv_subdev(mmu)->mutex); + return ret; + } + + fpde = (vma->node->offset >> mmu->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; + + for (pde = fpde; pde <= lpde; pde++) { + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + int big = (vma->node->type != mmu->spg_shift); + + if (likely(vpgt->refcount[big])) { + vpgt->refcount[big]++; + continue; + } + + ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); + if (ret) { + if (pde != fpde) + nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); + nouveau_mm_free(&vm->mm, &vma->node); + mutex_unlock(&nv_subdev(mmu)->mutex); + return ret; + } + } + mutex_unlock(&nv_subdev(mmu)->mutex); + + vma->vm = NULL; + nouveau_vm_ref(vm, &vma->vm, NULL); + vma->offset = (u64)vma->node->offset << 12; + vma->access = access; + return 0; +} + +void +nouveau_vm_put(struct nouveau_vma *vma) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mmu *mmu = vm->mmu; + u32 fpde, lpde; + + if (unlikely(vma->node == NULL)) + return; + fpde = (vma->node->offset >> mmu->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; + + mutex_lock(&nv_subdev(mmu)->mutex); + nouveau_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); + nouveau_mm_free(&vm->mm, &vma->node); + mutex_unlock(&nv_subdev(mmu)->mutex); + + nouveau_vm_ref(NULL, &vma->vm, NULL); +} + +int +nouveau_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, + u64 mm_offset, u32 block, struct nouveau_vm **pvm) +{ + struct nouveau_vm *vm; + u64 mm_length = (offset + length) - mm_offset; + int ret; + + vm = kzalloc(sizeof(*vm), GFP_KERNEL); + if (!vm) + return -ENOMEM; + + INIT_LIST_HEAD(&vm->pgd_list); + vm->mmu = mmu; + kref_init(&vm->refcount); + vm->fpde = offset >> (mmu->pgt_bits + 12); + vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12); + + vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); + if (!vm->pgt) { + kfree(vm); + return -ENOMEM; + } + + ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, + block >> 12); + if (ret) { + vfree(vm->pgt); + kfree(vm); + return ret; + } + + *pvm = vm; + + return 0; +} + +int +nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **pvm) +{ + struct nouveau_mmu *mmu = nouveau_mmu(device); + return mmu->create(mmu, offset, length, mm_offset, pvm); +} + +static int +nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +{ + struct nouveau_mmu *mmu = vm->mmu; + struct nouveau_vm_pgd *vpgd; + int i; + + if (!pgd) + return 0; + + vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); + if (!vpgd) + return -ENOMEM; + + nouveau_gpuobj_ref(pgd, &vpgd->obj); + + mutex_lock(&nv_subdev(mmu)->mutex); + for (i = vm->fpde; i <= vm->lpde; i++) + mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); + list_add(&vpgd->head, &vm->pgd_list); + mutex_unlock(&nv_subdev(mmu)->mutex); + return 0; +} + +static void +nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) +{ + struct nouveau_mmu *mmu = vm->mmu; + struct nouveau_vm_pgd *vpgd, *tmp; + struct nouveau_gpuobj *pgd = NULL; + + if (!mpgd) + return; + + mutex_lock(&nv_subdev(mmu)->mutex); + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + if (vpgd->obj == mpgd) { + pgd = vpgd->obj; + list_del(&vpgd->head); + kfree(vpgd); + break; + } + } + mutex_unlock(&nv_subdev(mmu)->mutex); + + nouveau_gpuobj_ref(NULL, &pgd); +} + +static void +nouveau_vm_del(struct kref *kref) +{ + struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount); + struct nouveau_vm_pgd *vpgd, *tmp; + + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + nouveau_vm_unlink(vm, vpgd->obj); + } + + nouveau_mm_fini(&vm->mm); + vfree(vm->pgt); + kfree(vm); +} + +int +nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, + struct nouveau_gpuobj *pgd) +{ + if (ref) { + int ret = nouveau_vm_link(ref, pgd); + if (ret) + return ret; + + kref_get(&ref->refcount); + } + + if (*ptr) { + nouveau_vm_unlink(*ptr, pgd); + kref_put(&(*ptr)->refcount, nouveau_vm_del); + } + + *ptr = ref; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c new file mode 100644 index 000000000000..a317d8f13570 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c @@ -0,0 +1,151 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> + +#include "nv04.h" + +#define NV04_PDMA_SIZE (128 * 1024 * 1024) +#define NV04_PDMA_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + pte = 0x00008 + (pte * 4); + while (cnt) { + u32 page = PAGE_SIZE / NV04_PDMA_PAGE; + u32 phys = (u32)*list++; + while (cnt && page--) { + nv_wo32(pgt, pte, phys | 3); + phys += NV04_PDMA_PAGE; + pte += 4; + cnt -= 1; + } + } +} + +static void +nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte = 0x00008 + (pte * 4); + while (cnt--) { + nv_wo32(pgt, pte, 0x00000000); + pte += 4; + } +} + +static void +nv04_vm_flush(struct nouveau_vm *vm) +{ +} + +/******************************************************************************* + * VM object + ******************************************************************************/ + +int +nv04_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, u64 mmstart, + struct nouveau_vm **pvm) +{ + return -EINVAL; +} + +/******************************************************************************* + * MMU subdev + ******************************************************************************/ + +static int +nv04_mmu_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_mmu_priv *priv; + struct nouveau_gpuobj *dma; + int ret; + + ret = nouveau_mmu_create(parent, engine, oclass, "PCIGART", + "pcigart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV04_PDMA_SIZE; + priv->base.dma_bits = 32; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv04_vm_map_sg; + priv->base.unmap = nv04_vm_unmap; + priv->base.flush = nv04_vm_flush; + + ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + + 8, 16, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + dma = priv->vm->pgt[0].obj[0]; + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ + nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); + return 0; +} + +void +nv04_mmu_dtor(struct nouveau_object *object) +{ + struct nv04_mmu_priv *priv = (void *)object; + if (priv->vm) { + nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]); + nouveau_vm_ref(NULL, &priv->vm, NULL); + } + if (priv->nullp) { + pci_free_consistent(nv_device(priv)->pdev, 16 * 1024, + priv->nullp, priv->null); + } + nouveau_mmu_destroy(&priv->base); +} + +struct nouveau_oclass +nv04_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_mmu_ctor, + .dtor = nv04_mmu_dtor, + .init = _nouveau_mmu_init, + .fini = _nouveau_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h new file mode 100644 index 000000000000..919b254ef6a1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h @@ -0,0 +1,19 @@ +#ifndef __NV04_MMU_PRIV__ +#define __NV04_MMU_PRIV__ + +#include <subdev/mmu.h> + +struct nv04_mmu_priv { + struct nouveau_mmu base; + struct nouveau_vm *vm; + dma_addr_t null; + void *nullp; +}; + +static inline struct nv04_mmu_priv * +nv04_mmu(void *obj) +{ + return (void *)nouveau_mmu(obj); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c new file mode 100644 index 000000000000..61af036f1252 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c @@ -0,0 +1,159 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/option.h> + +#include <subdev/timer.h> +#include <subdev/mmu.h> + +#include "nv04.h" + +#define NV41_GART_SIZE (512 * 1024 * 1024) +#define NV41_GART_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + pte = pte * 4; + while (cnt) { + u32 page = PAGE_SIZE / NV41_GART_PAGE; + u64 phys = (u64)*list++; + while (cnt && page--) { + nv_wo32(pgt, pte, (phys >> 7) | 1); + phys += NV41_GART_PAGE; + pte += 4; + cnt -= 1; + } + } +} + +static void +nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte = pte * 4; + while (cnt--) { + nv_wo32(pgt, pte, 0x00000000); + pte += 4; + } +} + +static void +nv41_vm_flush(struct nouveau_vm *vm) +{ + struct nv04_mmu_priv *priv = (void *)vm->mmu; + + mutex_lock(&nv_subdev(priv)->mutex); + nv_wr32(priv, 0x100810, 0x00000022); + if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { + nv_warn(priv, "flush timeout, 0x%08x\n", + nv_rd32(priv, 0x100810)); + } + nv_wr32(priv, 0x100810, 0x00000000); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +/******************************************************************************* + * MMU subdev + ******************************************************************************/ + +static int +nv41_mmu_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nv04_mmu_priv *priv; + int ret; + + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { + return nouveau_object_ctor(parent, engine, &nv04_mmu_oclass, + data, size, pobject); + } + + ret = nouveau_mmu_create(parent, engine, oclass, "PCIEGART", + "pciegart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV41_GART_SIZE; + priv->base.dma_bits = 39; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv41_vm_map_sg; + priv->base.unmap = nv41_vm_unmap; + priv->base.flush = nv41_vm_flush; + + ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (NV41_GART_SIZE / NV41_GART_PAGE) * 4, + 16, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + return 0; +} + +static int +nv41_mmu_init(struct nouveau_object *object) +{ + struct nv04_mmu_priv *priv = (void *)object; + struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0]; + int ret; + + ret = nouveau_mmu_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x100800, dma->addr | 0x00000002); + nv_mask(priv, 0x10008c, 0x00000100, 0x00000100); + nv_wr32(priv, 0x100820, 0x00000000); + return 0; +} + +struct nouveau_oclass +nv41_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x41), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv41_mmu_ctor, + .dtor = nv04_mmu_dtor, + .init = nv41_mmu_init, + .fini = _nouveau_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c new file mode 100644 index 000000000000..f5319e3e7fe5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c @@ -0,0 +1,249 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/option.h> + +#include <subdev/timer.h> +#include <subdev/mmu.h> + +#include "nv04.h" + +#define NV44_GART_SIZE (512 * 1024 * 1024) +#define NV44_GART_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null, + dma_addr_t *list, u32 pte, u32 cnt) +{ + u32 base = (pte << 2) & ~0x0000000f; + u32 tmp[4]; + + tmp[0] = nv_ro32(pgt, base + 0x0); + tmp[1] = nv_ro32(pgt, base + 0x4); + tmp[2] = nv_ro32(pgt, base + 0x8); + tmp[3] = nv_ro32(pgt, base + 0xc); + + while (cnt--) { + u32 addr = list ? (*list++ >> 12) : (null >> 12); + switch (pte++ & 0x3) { + case 0: + tmp[0] &= ~0x07ffffff; + tmp[0] |= addr; + break; + case 1: + tmp[0] &= ~0xf8000000; + tmp[0] |= addr << 27; + tmp[1] &= ~0x003fffff; + tmp[1] |= addr >> 5; + break; + case 2: + tmp[1] &= ~0xffc00000; + tmp[1] |= addr << 22; + tmp[2] &= ~0x0001ffff; + tmp[2] |= addr >> 10; + break; + case 3: + tmp[2] &= ~0xfffe0000; + tmp[2] |= addr << 17; + tmp[3] &= ~0x00000fff; + tmp[3] |= addr >> 15; + break; + } + } + + nv_wo32(pgt, base + 0x0, tmp[0]); + nv_wo32(pgt, base + 0x4, tmp[1]); + nv_wo32(pgt, base + 0x8, tmp[2]); + nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000); +} + +static void +nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + struct nv04_mmu_priv *priv = (void *)vma->vm->mmu; + u32 tmp[4]; + int i; + + if (pte & 3) { + u32 max = 4 - (pte & 3); + u32 part = (cnt > max) ? max : cnt; + nv44_vm_fill(pgt, priv->null, list, pte, part); + pte += part; + list += part; + cnt -= part; + } + + while (cnt >= 4) { + for (i = 0; i < 4; i++) + tmp[i] = *list++ >> 12; + nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27); + nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22); + nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17); + nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000); + cnt -= 4; + } + + if (cnt) + nv44_vm_fill(pgt, priv->null, list, pte, cnt); +} + +static void +nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + struct nv04_mmu_priv *priv = (void *)nouveau_mmu(pgt); + + if (pte & 3) { + u32 max = 4 - (pte & 3); + u32 part = (cnt > max) ? max : cnt; + nv44_vm_fill(pgt, priv->null, NULL, pte, part); + pte += part; + cnt -= part; + } + + while (cnt >= 4) { + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + cnt -= 4; + } + + if (cnt) + nv44_vm_fill(pgt, priv->null, NULL, pte, cnt); +} + +static void +nv44_vm_flush(struct nouveau_vm *vm) +{ + struct nv04_mmu_priv *priv = (void *)vm->mmu; + nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE); + nv_wr32(priv, 0x100808, 0x00000020); + if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001)) + nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808)); + nv_wr32(priv, 0x100808, 0x00000000); +} + +/******************************************************************************* + * MMU subdev + ******************************************************************************/ + +static int +nv44_mmu_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nv04_mmu_priv *priv; + int ret; + + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { + return nouveau_object_ctor(parent, engine, &nv04_mmu_oclass, + data, size, pobject); + } + + ret = nouveau_mmu_create(parent, engine, oclass, "PCIEGART", + "pciegart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV44_GART_SIZE; + priv->base.dma_bits = 39; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv44_vm_map_sg; + priv->base.unmap = nv44_vm_unmap; + priv->base.flush = nv44_vm_flush; + + priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null); + if (!priv->nullp) { + nv_error(priv, "unable to allocate dummy pages\n"); + return -ENOMEM; + } + + ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (NV44_GART_SIZE / NV44_GART_PAGE) * 4, + 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + return 0; +} + +static int +nv44_mmu_init(struct nouveau_object *object) +{ + struct nv04_mmu_priv *priv = (void *)object; + struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0]; + u32 addr; + int ret; + + ret = nouveau_mmu_init(&priv->base); + if (ret) + return ret; + + /* calculate vram address of this PRAMIN block, object must be + * allocated on 512KiB alignment, and not exceed a total size + * of 512KiB for this to work correctly + */ + addr = nv_rd32(priv, 0x10020c); + addr -= ((gart->addr >> 19) + 1) << 19; + + nv_wr32(priv, 0x100850, 0x80000000); + nv_wr32(priv, 0x100818, priv->null); + nv_wr32(priv, 0x100804, NV44_GART_SIZE); + nv_wr32(priv, 0x100850, 0x00008000); + nv_mask(priv, 0x10008c, 0x00000200, 0x00000200); + nv_wr32(priv, 0x100820, 0x00000000); + nv_wr32(priv, 0x10082c, 0x00000001); + nv_wr32(priv, 0x100800, addr | 0x00000010); + return 0; +} + +struct nouveau_oclass +nv44_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x44), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv44_mmu_ctor, + .dtor = nv04_mmu_dtor, + .init = nv44_mmu_init, + .fini = _nouveau_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c new file mode 100644 index 000000000000..6ddc65dc684d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c @@ -0,0 +1,240 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/gpuobj.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/bar.h> +#include <subdev/mmu.h> + +struct nv50_mmu_priv { + struct nouveau_mmu base; +}; + +static void +nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]) +{ + u64 phys = 0xdeadcafe00000000ULL; + u32 coverage = 0; + + if (pgt[0]) { + phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */ + coverage = (pgt[0]->size >> 3) << 12; + } else + if (pgt[1]) { + phys = 0x00000001 | pgt[1]->addr; /* present */ + coverage = (pgt[1]->size >> 3) << 16; + } + + if (phys & 1) { + if (coverage <= 32 * 1024 * 1024) + phys |= 0x60; + else if (coverage <= 64 * 1024 * 1024) + phys |= 0x40; + else if (coverage <= 128 * 1024 * 1024) + phys |= 0x20; + } + + nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); + nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); +} + +static inline u64 +vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys |= 1; /* present */ + phys |= (u64)memtype << 40; + phys |= target << 4; + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= (1 << 6); + if (!(vma->access & NV_MEM_ACCESS_WO)) + phys |= (1 << 3); + return phys; +} + +static void +nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) +{ + u32 comp = (mem->memtype & 0x180) >> 7; + u32 block, target; + int i; + + /* IGPs don't have real VRAM, re-target to stolen system memory */ + target = 0; + if (nouveau_fb(vma->vm->mmu)->ram->stolen) { + phys += nouveau_fb(vma->vm->mmu)->ram->stolen; + target = 3; + } + + phys = vm_addr(vma, phys, mem->memtype, target); + pte <<= 3; + cnt <<= 3; + + while (cnt) { + u32 offset_h = upper_32_bits(phys); + u32 offset_l = lower_32_bits(phys); + + for (i = 7; i >= 0; i--) { + block = 1 << (i + 3); + if (cnt >= block && !(pte & (block - 1))) + break; + } + offset_l |= (i << 7); + + phys += block << (vma->node->type - 3); + cnt -= block; + if (comp) { + u32 tag = mem->tag->offset + ((delta >> 16) * comp); + offset_h |= (tag << 17); + delta += block << (vma->node->type - 3); + } + + while (block) { + nv_wo32(pgt, pte + 0, offset_l); + nv_wo32(pgt, pte + 4, offset_h); + pte += 8; + block -= 8; + } + } +} + +static void +nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; + pte <<= 3; + while (cnt--) { + u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +static void +nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +static void +nv50_vm_flush(struct nouveau_vm *vm) +{ + struct nv50_mmu_priv *priv = (void *)vm->mmu; + struct nouveau_bar *bar = nouveau_bar(priv); + struct nouveau_engine *engine; + int i, vme; + + bar->flush(bar); + + mutex_lock(&nv_subdev(priv)->mutex); + for (i = 0; i < NVDEV_SUBDEV_NR; i++) { + if (!atomic_read(&vm->engref[i])) + continue; + + /* unfortunate hw bug workaround... */ + engine = nouveau_engine(priv, i); + if (engine && engine->tlb_flush) { + engine->tlb_flush(engine); + continue; + } + + switch (i) { + case NVDEV_ENGINE_GR : vme = 0x00; break; + case NVDEV_ENGINE_VP : vme = 0x01; break; + case NVDEV_SUBDEV_BAR : vme = 0x06; break; + case NVDEV_ENGINE_PPP : + case NVDEV_ENGINE_MPEG : vme = 0x08; break; + case NVDEV_ENGINE_BSP : vme = 0x09; break; + case NVDEV_ENGINE_CRYPT: vme = 0x0a; break; + case NVDEV_ENGINE_COPY0: vme = 0x0d; break; + default: + continue; + } + + nv_wr32(priv, 0x100c80, (vme << 16) | 1); + if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) + nv_error(priv, "vm flush timeout: engine %d\n", vme); + } + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +nv50_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **pvm) +{ + u32 block = (1 << (mmu->pgt_bits + 12)); + if (block > length) + block = length; + + return nouveau_vm_create(mmu, offset, length, mm_offset, block, pvm); +} + +static int +nv50_mmu_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_mmu_priv *priv; + int ret; + + ret = nouveau_mmu_create(parent, engine, oclass, "VM", "vm", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.limit = 1ULL << 40; + priv->base.dma_bits = 40; + priv->base.pgt_bits = 29 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 16; + priv->base.create = nv50_vm_create; + priv->base.map_pgt = nv50_vm_map_pgt; + priv->base.map = nv50_vm_map; + priv->base.map_sg = nv50_vm_map_sg; + priv->base.unmap = nv50_vm_unmap; + priv->base.flush = nv50_vm_flush; + return 0; +} + +struct nouveau_oclass +nv50_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_mmu_ctor, + .dtor = _nouveau_mmu_dtor, + .init = _nouveau_mmu_init, + .fini = _nouveau_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nvc0.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nvc0.c new file mode 100644 index 000000000000..bd695c59aac7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nvc0.c @@ -0,0 +1,241 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/gpuobj.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/mmu.h> +#include <subdev/ltc.h> +#include <subdev/bar.h> + +struct nvc0_mmu_priv { + struct nouveau_mmu base; +}; + + +/* Map from compressed to corresponding uncompressed storage type. + * The value 0xff represents an invalid storage type. + */ +const u8 nvc0_pte_storage_type_map[256] = +{ + 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */ + 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */ + 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */ + 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */ + 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27, + 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */ + 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */ + 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */ + 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, + 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */ + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3, + 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */ + 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */ + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */ + 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff +}; + + +static void +nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, + struct nouveau_gpuobj *pgt[2]) +{ + u32 pde[2] = { 0, 0 }; + + if (pgt[0]) + pde[1] = 0x00000001 | (pgt[0]->addr >> 8); + if (pgt[1]) + pde[0] = 0x00000001 | (pgt[1]->addr >> 8); + + nv_wo32(pgd, (index * 8) + 0, pde[0]); + nv_wo32(pgd, (index * 8) + 4, pde[1]); +} + +static inline u64 +nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys >>= 8; + + phys |= 0x00000001; /* present */ + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= 0x00000002; + + phys |= ((u64)target << 32); + phys |= ((u64)memtype << 36); + + return phys; +} + +static void +nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) +{ + u64 next = 1 << (vma->node->type - 8); + + phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); + pte <<= 3; + + if (mem->tag) { + struct nouveau_ltc *ltc = nouveau_ltc(vma->vm->mmu); + u32 tag = mem->tag->offset + (delta >> 17); + phys |= (u64)tag << (32 + 12); + next |= (u64)1 << (32 + 12); + ltc->tags_clear(ltc, tag, cnt); + } + + while (cnt--) { + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + phys += next; + pte += 8; + } +} + +static void +nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; + /* compressed storage types are invalid for system memory */ + u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff]; + + pte <<= 3; + while (cnt--) { + u64 phys = nvc0_vm_addr(vma, *list++, memtype, target); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +static void +nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +static void +nvc0_vm_flush(struct nouveau_vm *vm) +{ + struct nvc0_mmu_priv *priv = (void *)vm->mmu; + struct nouveau_bar *bar = nouveau_bar(priv); + struct nouveau_vm_pgd *vpgd; + u32 type; + + bar->flush(bar); + + type = 0x00000001; /* PAGE_ALL */ + if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR])) + type |= 0x00000004; /* HUB_ONLY */ + + mutex_lock(&nv_subdev(priv)->mutex); + list_for_each_entry(vpgd, &vm->pgd_list, head) { + /* looks like maybe a "free flush slots" counter, the + * faster you write to 0x100cbc to more it decreases + */ + if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) { + nv_error(priv, "vm timeout 0: 0x%08x %d\n", + nv_rd32(priv, 0x100c80), type); + } + + nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8); + nv_wr32(priv, 0x100cbc, 0x80000000 | type); + + /* wait for flush to be queued? */ + if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) { + nv_error(priv, "vm timeout 1: 0x%08x %d\n", + nv_rd32(priv, 0x100c80), type); + } + } + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +nvc0_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **pvm) +{ + return nouveau_vm_create(mmu, offset, length, mm_offset, 4096, pvm); +} + +static int +nvc0_mmu_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_mmu_priv *priv; + int ret; + + ret = nouveau_mmu_create(parent, engine, oclass, "VM", "vm", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.limit = 1ULL << 40; + priv->base.dma_bits = 40; + priv->base.pgt_bits = 27 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 17; + priv->base.create = nvc0_vm_create; + priv->base.map_pgt = nvc0_vm_map_pgt; + priv->base.map = nvc0_vm_map; + priv->base.map_sg = nvc0_vm_map_sg; + priv->base.unmap = nvc0_vm_unmap; + priv->base.flush = nvc0_vm_flush; + return 0; +} + +struct nouveau_oclass +nvc0_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_mmu_ctor, + .dtor = _nouveau_mmu_dtor, + .init = _nouveau_mmu_init, + .fini = _nouveau_mmu_fini, + }, +}; |