summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2017-08-08 22:21:33 -0400
committerJérôme Glisse <jglisse@redhat.com>2017-08-23 16:41:40 -0400
commit5d4b6402c3d8ac28abf9260ca0678009b9833381 (patch)
tree5150699697346cca8d3f31c585f218206356d7bb
parent18b15be647a05056e4c5f239e67ec00a208afe42 (diff)
drm/nouveau/core/mm: allow to create bo vma to fix offset inside vm
This allow to create a bo vma (nvkm_vma) at a fix offset inside a vm. Usefull when we want to force same virtual address on CPU and GPU. Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c47
6 files changed, 129 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index a1f57ffb4163..edaad5265d52 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -34,6 +34,9 @@ int nvkm_mm_init(struct nvkm_mm *, u64 offset, u64 length, u64 block);
int nvkm_mm_fini(struct nvkm_mm *);
int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u64 size_max,
u64 size_min, u64 align, struct nvkm_mm_node **);
+int nvkm_mm_fix(struct nvkm_mm *mm,
+ u8 type, u64 offset, u64 size,
+ struct nvkm_mm_node **pnode);
int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u64 size_max,
u64 size_min, u64 align, struct nvkm_mm_node **);
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 126e2da3fe22..73e7893ea20b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -46,6 +46,8 @@ int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *inst);
int nvkm_vm_boot(struct nvkm_vm *, u64 size);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *);
+int nvkm_vm_get_fix(struct nvkm_vm *vm, u64 offset, u64 size, u32 page_shift,
+ u32 access, struct nvkm_vma *vma);
void nvkm_vm_put(struct nvkm_vma *);
void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e427f80344c4..3009045cdf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1611,6 +1611,29 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
return 0;
}
+int
+nouveau_bo_vma_add_fix(struct nouveau_bo *nvbo,
+ struct nvkm_vm *vm,
+ struct nvkm_vma *vma,
+ u64 offset, u64 size)
+{
+ int ret;
+
+ ret = nvkm_vm_get_fix(vm, offset, size, nvbo->page_shift,
+ NV_MEM_ACCESS_RW, vma);
+ if (ret)
+ return ret;
+
+ if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->mmu->lpg_shift))
+ nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
+
+ list_add_tail(&vma->head, &nvbo->vma_list);
+ vma->refcount = 1;
+ return 0;
+}
+
void
nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index b06a5385d6dd..b46f3060b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -94,6 +94,10 @@ nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
int nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
struct nvkm_vma *);
+int nouveau_bo_vma_add_fix(struct nouveau_bo *nvbo,
+ struct nvkm_vm *vm,
+ struct nvkm_vma *vma,
+ u64 offset, u64 size);
void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
/* TODO: submit equivalent to TTM generic API upstream? */
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 02c6cff8f665..a38630804139 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -157,6 +157,56 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u64 size_max, u64 size_min,
return -ENOSPC;
}
+int
+nvkm_mm_fix(struct nvkm_mm *mm,
+ u8 type, u64 offset, u64 size,
+ struct nvkm_mm_node **pnode)
+{
+ struct nvkm_mm_node *prev, *this, *next;
+ u64 splitoff;
+ u64 s, e;
+
+ /* Catch overflow */
+ if ((offset + size) < offset)
+ return -ENOSPC;
+
+ list_for_each_entry(this, &mm->free, fl_entry) {
+ s = this->offset;
+ e = this->offset + this->length;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, mm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type)
+ e = rounddown(e, mm->block_size);
+
+ if (offset > e || (offset + size) < s)
+ continue;
+
+ /* Range partialy covered */
+ if (offset < s || (offset + size) > e)
+ return -ENOSPC;
+
+ splitoff = offset - this->offset;
+ if (splitoff && !region_head(mm, this, splitoff))
+ return -ENOMEM;
+
+ this = region_head(mm, this, size);
+ if (!this)
+ return -ENOMEM;
+
+ this->next = NULL;
+ this->type = type;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
static struct nvkm_mm_node *
region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u64 size)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 4a94f58e7f23..4ee75b9bcd94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,6 +316,53 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
return 0;
}
+int
+nvkm_vm_get_fix(struct nvkm_vm *vm, u64 offset, u64 size, u32 page_shift,
+ u32 access, struct nvkm_vma *vma)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ u64 moffset = offset >> 12;
+ u64 msize = size >> 12;
+ u32 fpde, lpde, pde;
+ int ret;
+
+ mutex_lock(&vm->mutex);
+ ret = nvkm_mm_fix(&vm->mm, page_shift, moffset, msize, &vma->node);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&vm->mutex);
+ return ret;
+ }
+
+ fpde = (vma->node->offset >> mmu->func->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != mmu->func->spg_shift);
+
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
+ continue;
+ }
+
+ ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
+ if (ret) {
+ if (pde != fpde)
+ nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
+ nvkm_mm_free(&vm->mm, &vma->node);
+ mutex_unlock(&vm->mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&vm->mutex);
+
+ vma->vm = NULL;
+ nvkm_vm_ref(vm, &vma->vm, NULL);
+ vma->offset = (u64)vma->node->offset << 12;
+ vma->access = access;
+ return 0;
+}
+
void
nvkm_vm_put(struct nvkm_vma *vma)
{