diff options
author | Ralph Campbell <rcampbell@nvidia.com> | 2020-03-03 16:13:39 -0800 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2020-05-22 11:13:49 +1000 |
commit | e3d8b08904694e9ccae5163d0bb7d35fa66e5bdc (patch) | |
tree | 8f7d8263200f78977c57bd1bf3f32c1123bfc6ca /drivers/gpu/drm/nouveau/nouveau_svm.c | |
parent | 9c1c08a68dcdea3a933948b4e7927bdbc56aac4c (diff) |
drm/nouveau/svm: map pages after migration
When memory is migrated to the GPU, it is likely to be accessed by GPU
code soon afterwards. Instead of waiting for a GPU fault, map the
migrated memory into the GPU page tables with the same access permissions
as the source CPU page table entries. This preserves copy on write
semantics.
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_svm.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_svm.c | 59 |
1 files changed, 58 insertions, 1 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 645fedd77e21..fe89abf237a8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -70,6 +70,12 @@ struct nouveau_svm { #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a) #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a) +struct nouveau_pfnmap_args { + struct nvif_ioctl_v0 i; + struct nvif_ioctl_mthd_v0 m; + struct nvif_vmm_pfnmap_v0 p; +}; + struct nouveau_ivmm { struct nouveau_svmm *svmm; u64 inst; @@ -187,7 +193,8 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, addr = max(addr, vma->vm_start); next = min(vma->vm_end, end); /* This is a best effort so we ignore errors */ - nouveau_dmem_migrate_vma(cli->drm, vma, addr, next); + nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr, + next); addr = next; } @@ -784,6 +791,56 @@ nouveau_svm_fault(struct nvif_notify *notify) return NVIF_NOTIFY_KEEP; } +static struct nouveau_pfnmap_args * +nouveau_pfns_to_args(void *pfns) +{ + return container_of(pfns, struct nouveau_pfnmap_args, p.phys); +} + +u64 * +nouveau_pfns_alloc(unsigned long npages) +{ + struct nouveau_pfnmap_args *args; + + args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL); + if (!args) + return NULL; + + args->i.type = NVIF_IOCTL_V0_MTHD; + args->m.method = NVIF_VMM_V0_PFNMAP; + args->p.page = PAGE_SHIFT; + + return args->p.phys; +} + +void +nouveau_pfns_free(u64 *pfns) +{ + struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); + + kfree(args); +} + +void +nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, + unsigned long addr, u64 *pfns, unsigned long npages) +{ + struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); + int ret; + + args->p.addr = addr; + args->p.size = npages << PAGE_SHIFT; + + mutex_lock(&svmm->mutex); + + svmm->vmm->vmm.object.client->super = true; + ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) + + npages * sizeof(args->p.phys[0]), NULL); + svmm->vmm->vmm.object.client->super = false; + + mutex_unlock(&svmm->mutex); +} + static void nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id) { |