summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1014
1 files changed, 780 insertions, 234 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bd0d33125c18..0235d7933efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -57,6 +57,8 @@
struct amdgpu_pte_update_params {
/* amdgpu device we do this update for */
struct amdgpu_device *adev;
+ /* optional amdgpu_vm we do this update for */
+ struct amdgpu_vm *vm;
/* address where to copy page table entries from */
uint64_t src;
/* indirect buffer to fill with commands */
@@ -64,33 +66,49 @@ struct amdgpu_pte_update_params {
/* Function which actually does the update */
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
- uint32_t flags);
+ uint64_t flags);
/* indicate update pt or its shadow */
bool shadow;
};
+/* Helper to disable partial resident texture feature from a fence callback */
+struct amdgpu_prt_cb {
+ struct amdgpu_device *adev;
+ struct dma_fence_cb cb;
+};
+
/**
- * amdgpu_vm_num_pde - return the number of page directory entries
+ * amdgpu_vm_num_entries - return the number of entries in a PD/PT
*
* @adev: amdgpu_device pointer
*
- * Calculate the number of page directory entries.
+ * Calculate the number of entries in a page directory or page table.
*/
-static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
+static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
+ unsigned level)
{
- return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
+ if (level == 0)
+ /* For the root directory */
+ return adev->vm_manager.max_pfn >>
+ (amdgpu_vm_block_size * adev->vm_manager.num_level);
+ else if (level == adev->vm_manager.num_level)
+ /* For the page tables on the leaves */
+ return AMDGPU_VM_PTE_COUNT;
+ else
+ /* Everything in between */
+ return 1 << amdgpu_vm_block_size;
}
/**
- * amdgpu_vm_directory_size - returns the size of the page directory in bytes
+ * amdgpu_vm_bo_size - returns the size of the BOs in bytes
*
* @adev: amdgpu_device pointer
*
- * Calculate the size of the page directory in bytes.
+ * Calculate the size of the BO for a page directory or page table in bytes.
*/
-static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
+static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
{
- return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
+ return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
}
/**
@@ -107,15 +125,56 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
- entry->robj = vm->page_directory;
+ entry->robj = vm->root.bo;
entry->priority = 0;
- entry->tv.bo = &vm->page_directory->tbo;
+ entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
/**
+ * amdgpu_vm_validate_layer - validate a single page table level
+ *
+ * @parent: parent page table level
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
+ *
+ * Validate the page table BOs on command submission if neccessary.
+ */
+static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
+ int (*validate)(void *, struct amdgpu_bo *),
+ void *param)
+{
+ unsigned i;
+ int r;
+
+ if (!parent->entries)
+ return 0;
+
+ for (i = 0; i <= parent->last_entry_used; ++i) {
+ struct amdgpu_vm_pt *entry = &parent->entries[i];
+
+ if (!entry->bo)
+ continue;
+
+ r = validate(param, entry->bo);
+ if (r)
+ return r;
+
+ /*
+ * Recurse into the sub directory. This is harmless because we
+ * have only a maximum of 5 layers.
+ */
+ r = amdgpu_vm_validate_level(entry, validate, param);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+/**
* amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
@@ -130,8 +189,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t num_evictions;
- unsigned i;
- int r;
/* We only need to validate the page tables
* if they aren't already valid.
@@ -140,19 +197,33 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (num_evictions == vm->last_eviction_counter)
return 0;
- /* add the vm page table to the list */
- for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo *bo = vm->page_tables[i].bo;
+ return amdgpu_vm_validate_level(&vm->root, validate, param);
+}
- if (!bo)
+/**
+ * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
+ *
+ * @adev: amdgpu device instance
+ * @vm: vm providing the BOs
+ *
+ * Move the PT BOs to the tail of the LRU.
+ */
+static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
+{
+ unsigned i;
+
+ if (!parent->entries)
+ return;
+
+ for (i = 0; i <= parent->last_entry_used; ++i) {
+ struct amdgpu_vm_pt *entry = &parent->entries[i];
+
+ if (!entry->bo)
continue;
- r = validate(param, bo);
- if (r)
- return r;
+ ttm_bo_move_to_lru_tail(&entry->bo->tbo);
+ amdgpu_vm_move_level_in_lru(entry);
}
-
- return 0;
}
/**
@@ -167,18 +238,131 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct ttm_bo_global *glob = adev->mman.bdev.glob;
- unsigned i;
spin_lock(&glob->lru_lock);
- for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo *bo = vm->page_tables[i].bo;
+ amdgpu_vm_move_level_in_lru(&vm->root);
+ spin_unlock(&glob->lru_lock);
+}
- if (!bo)
- continue;
+ /**
+ * amdgpu_vm_alloc_levels - allocate the PD/PT levels
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @saddr: start of the address range
+ * @eaddr: end of the address range
+ *
+ * Make sure the page directories and page tables are allocated
+ */
+static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ uint64_t saddr, uint64_t eaddr,
+ unsigned level)
+{
+ unsigned shift = (adev->vm_manager.num_level - level) *
+ amdgpu_vm_block_size;
+ unsigned pt_idx, from, to;
+ int r;
+
+ if (!parent->entries) {
+ unsigned num_entries = amdgpu_vm_num_entries(adev, level);
- ttm_bo_move_to_lru_tail(&bo->tbo);
+ parent->entries = drm_calloc_large(num_entries,
+ sizeof(struct amdgpu_vm_pt));
+ if (!parent->entries)
+ return -ENOMEM;
+ memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
}
- spin_unlock(&glob->lru_lock);
+
+ from = saddr >> shift;
+ to = eaddr >> shift;
+ if (from >= amdgpu_vm_num_entries(adev, level) ||
+ to >= amdgpu_vm_num_entries(adev, level))
+ return -EINVAL;
+
+ if (to > parent->last_entry_used)
+ parent->last_entry_used = to;
+
+ ++level;
+ saddr = saddr & ((1 << shift) - 1);
+ eaddr = eaddr & ((1 << shift) - 1);
+
+ /* walk over the address space and allocate the page tables */
+ for (pt_idx = from; pt_idx <= to; ++pt_idx) {
+ struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+ struct amdgpu_bo *pt;
+
+ if (!entry->bo) {
+ r = amdgpu_bo_create(adev,
+ amdgpu_vm_bo_size(adev, level),
+ AMDGPU_GPU_PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED,
+ NULL, resv, &pt);
+ if (r)
+ return r;
+
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(vm->root.bo);
+
+ entry->bo = pt;
+ entry->addr = 0;
+ }
+
+ if (level < adev->vm_manager.num_level) {
+ uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
+ uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
+ ((1 << shift) - 1);
+ r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
+ sub_eaddr, level);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_alloc_pts - Allocate page tables.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: VM to allocate page tables for
+ * @saddr: Start address which needs to be allocated
+ * @size: Size from start address we need.
+ *
+ * Make sure the page tables are allocated.
+ */
+int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t saddr, uint64_t size)
+{
+ uint64_t last_pfn;
+ uint64_t eaddr;
+
+ /* validate the parameters */
+ if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ eaddr = saddr + size - 1;
+ last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
+ if (last_pfn >= adev->vm_manager.max_pfn) {
+ dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
+ last_pfn, adev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr /= AMDGPU_GPU_PAGE_SIZE;
+
+ return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
}
static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
@@ -369,6 +553,16 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
return false;
}
+static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
+{
+ u64 addr = mc_addr;
+
+ if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr)
+ addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr);
+
+ return addr;
+}
+
/**
* amdgpu_vm_flush - hardware flush the vm
*
@@ -391,41 +585,59 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
id->oa_size != job->oa_size);
int r;
- if (ring->funcs->emit_pipeline_sync && (
- job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)))
- amdgpu_ring_emit_pipeline_sync(ring);
+ if (job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_is_gpu_reset(adev, id) ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)) {
+ unsigned patch_offset = 0;
- if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
- amdgpu_vm_is_gpu_reset(adev, id))) {
- struct dma_fence *fence;
+ if (ring->funcs->init_cond_exec)
+ patch_offset = amdgpu_ring_init_cond_exec(ring);
- trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+ if (ring->funcs->emit_pipeline_sync &&
+ (job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)))
+ amdgpu_ring_emit_pipeline_sync(ring);
- r = amdgpu_fence_emit(ring, &fence);
- if (r)
- return r;
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
+ struct dma_fence *fence;
+ u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
- mutex_lock(&adev->vm_manager.lock);
- dma_fence_put(id->last_flush);
- id->last_flush = fence;
- mutex_unlock(&adev->vm_manager.lock);
- }
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
- if (gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
- }
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
+
+ mutex_lock(&adev->vm_manager.lock);
+ dma_fence_put(id->last_flush);
+ id->last_flush = fence;
+ mutex_unlock(&adev->vm_manager.lock);
+ }
+
+ if (gds_switch_needed) {
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
+ }
+
+ if (ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+ if (ring->funcs->emit_switch_buffer) {
+ amdgpu_ring_emit_switch_buffer(ring);
+ amdgpu_ring_emit_switch_buffer(ring);
+ }
+ }
return 0;
}
@@ -490,7 +702,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
- uint32_t flags)
+ uint64_t flags)
{
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
@@ -519,7 +731,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
- uint32_t flags)
+ uint64_t flags)
{
uint64_t src = (params->src + (addr >> 12) * 8);
@@ -554,24 +766,24 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
}
/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
+ * amdgpu_vm_update_level - update a single level in the hierarchy
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
+ * @parent: parent directory
*
- * Allocates new page tables if necessary
- * and updates the page directory.
+ * Makes sure all entries in @parent are up to date.
* Returns 0 for success, error for failure.
*/
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+static int amdgpu_vm_update_level(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
uint64_t pd_addr, shadow_addr;
- uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
+ uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
@@ -580,16 +792,19 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
int r;
+ if (!parent->entries)
+ return 0;
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 6;
+ ndw += parent->last_entry_used * 6;
+
+ pd_addr = amdgpu_bo_gpu_offset(parent->bo);
- pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+ shadow = parent->bo->shadow;
if (shadow) {
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
if (r)
@@ -608,9 +823,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
params.adev = adev;
params.ib = &job->ibs[0];
- /* walk over the address space and update the page directory */
- for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
+ /* walk over the address space and update the directory */
+ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+ struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
@@ -626,10 +841,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
pt = amdgpu_bo_gpu_offset(bo);
- if (vm->page_tables[pt_idx].addr == pt)
+ if (parent->entries[pt_idx].addr == pt)
continue;
- vm->page_tables[pt_idx].addr = pt;
+ parent->entries[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -637,15 +852,18 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
+ uint64_t pt_addr =
+ amdgpu_vm_adjust_mc_addr(adev, last_pt);
+
if (shadow)
amdgpu_vm_do_set_ptes(&params,
last_shadow,
- last_pt, count,
+ pt_addr, count,
incr,
AMDGPU_PTE_VALID);
amdgpu_vm_do_set_ptes(&params, last_pde,
- last_pt, count, incr,
+ pt_addr, count, incr,
AMDGPU_PTE_VALID);
}
@@ -659,36 +877,51 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count) {
- if (vm->page_directory->shadow)
- amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+ uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
+
+ if (vm->root.bo->shadow)
+ amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
count, incr, AMDGPU_PTE_VALID);
- amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+ amdgpu_vm_do_set_ptes(&params, last_pde, pt_addr,
count, incr, AMDGPU_PTE_VALID);
}
if (params.ib->length_dw == 0) {
amdgpu_job_free(job);
- return 0;
- }
-
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+ } else {
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
+
+ amdgpu_bo_fence(parent->bo, fence, true);
+ dma_fence_put(vm->last_dir_update);
+ vm->last_dir_update = dma_fence_get(fence);
+ dma_fence_put(fence);
+ }
+ /*
+ * Recurse into the subdirectories. This recursion is harmless because
+ * we only have a maximum of 5 layers.
+ */
+ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- amdgpu_bo_fence(vm->page_directory, fence, true);
- dma_fence_put(vm->page_directory_fence);
- vm->page_directory_fence = dma_fence_get(fence);
- dma_fence_put(fence);
+ if (!entry->bo)
+ continue;
+
+ r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
+ if (r)
+ return r;
+ }
return 0;
@@ -697,6 +930,47 @@ error_free:
return r;
}
+/*
+ * amdgpu_vm_update_directories - make sure that all directories are valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Makes sure all directories are up to date.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
+}
+
+/**
+ * amdgpu_vm_find_pt - find the page table for an address
+ *
+ * @p: see amdgpu_pte_update_params definition
+ * @addr: virtual address in question
+ *
+ * Find the page table BO for a virtual address, return NULL when none found.
+ */
+static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
+ uint64_t addr)
+{
+ struct amdgpu_vm_pt *entry = &p->vm->root;
+ unsigned idx, level = p->adev->vm_manager.num_level;
+
+ while (entry->entries) {
+ idx = addr >> (amdgpu_vm_block_size * level--);
+ idx %= amdgpu_bo_size(entry->bo) / 8;
+ entry = &entry->entries[idx];
+ }
+
+ if (level)
+ return NULL;
+
+ return entry->bo;
+}
+
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
@@ -710,23 +984,25 @@ error_free:
* Update the page tables in the range @start - @end.
*/
static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+ uint64_t dst, uint64_t flags)
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
- uint64_t pt_idx;
struct amdgpu_bo *pt;
unsigned nptes; /* next number of ptes to be updated */
uint64_t next_pe_start;
/* initialize the variables */
addr = start;
- pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].bo;
+ pt = amdgpu_vm_get_pt(params, addr);
+ if (!pt) {
+ pr_err("PT not found, aborting update_ptes\n");
+ return;
+ }
+
if (params->shadow) {
if (!pt->shadow)
return;
@@ -748,8 +1024,12 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
- pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].bo;
+ pt = amdgpu_vm_get_pt(params, addr);
+ if (!pt) {
+ pr_err("PT not found, aborting update_ptes\n");
+ return;
+ }
+
if (params->shadow) {
if (!pt->shadow)
return;
@@ -800,9 +1080,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
* @flags: hw mapping flags
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+ uint64_t dst, uint64_t flags)
{
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
@@ -834,25 +1113,25 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
if (params->src || !(flags & AMDGPU_PTE_VALID) ||
(frag_start >= frag_end)) {
- amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
+ amdgpu_vm_update_ptes(params, start, end, dst, flags);
return;
}
/* handle the 4K area at the beginning */
if (start != frag_start) {
- amdgpu_vm_update_ptes(params, vm, start, frag_start,
+ amdgpu_vm_update_ptes(params, start, frag_start,
dst, flags);
dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
}
/* handle the area in the middle */
- amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+ amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != end) {
dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
- amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
+ amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
}
}
@@ -879,7 +1158,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
- uint32_t flags, uint64_t addr,
+ uint64_t flags, uint64_t addr,
struct dma_fence **fence)
{
struct amdgpu_ring *ring;
@@ -892,14 +1171,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
+ params.vm = vm;
params.src = src;
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- params.src = src;
-
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_UNDEFINED;
@@ -967,19 +1243,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
owner);
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
+ r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
if (r)
goto error_free;
params.shadow = true;
- amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+ amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
params.shadow = false;
- amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+ amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
amdgpu_ring_pad_ib(ring, params.ib);
WARN_ON(params.ib->length_dw > ndw);
@@ -988,12 +1264,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(vm->page_directory, f, true);
- if (fence) {
- dma_fence_put(*fence);
- *fence = dma_fence_get(f);
- }
- dma_fence_put(f);
+ amdgpu_bo_fence(vm->root.bo, f, true);
+ dma_fence_put(*fence);
+ *fence = f;
return 0;
error_free:
@@ -1020,11 +1293,11 @@ error_free:
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint32_t gtt_flags,
+ uint64_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags,
+ uint64_t flags,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
@@ -1039,6 +1312,12 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
+ flags &= ~AMDGPU_PTE_EXECUTABLE;
+ flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ flags &= ~AMDGPU_PTE_MTYPE_MASK;
+ flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
+
trace_amdgpu_vm_bo_update(mapping);
pfn = mapping->offset >> PAGE_SHIFT;
@@ -1111,13 +1390,13 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
- uint32_t gtt_flags, flags;
+ uint64_t gtt_flags, flags;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
struct dma_fence *exclusive;
int r;
- if (clear) {
+ if (clear || !bo_va->bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
@@ -1134,9 +1413,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
- flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
+ if (bo_va->bo) {
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
+ gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+ adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
+ flags : 0;
+ } else {
+ flags = 0x0;
+ gtt_flags = ~0x0;
+ }
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1171,10 +1456,142 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_update_prt_state - update the global PRT state
+ */
+static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
+{
+ unsigned long flags;
+ bool enable;
+
+ spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
+ enable = !!atomic_read(&adev->vm_manager.num_prt_users);
+ adev->gart.gart_funcs->set_prt(adev, enable);
+ spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
+}
+
+/**
+ * amdgpu_vm_prt_get - add a PRT user
+ */
+static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
+{
+ if (!adev->gart.gart_funcs->set_prt)
+ return;
+
+ if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
+ amdgpu_vm_update_prt_state(adev);
+}
+
+/**
+ * amdgpu_vm_prt_put - drop a PRT user
+ */
+static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
+{
+ if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
+ amdgpu_vm_update_prt_state(adev);
+}
+
+/**
+ * amdgpu_vm_prt_cb - callback for updating the PRT status
+ */
+static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
+{
+ struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
+
+ amdgpu_vm_prt_put(cb->adev);
+ kfree(cb);
+}
+
+/**
+ * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ */
+static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
+ struct dma_fence *fence)
+{
+ struct amdgpu_prt_cb *cb;
+
+ if (!adev->gart.gart_funcs->set_prt)
+ return;
+
+ cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
+ if (!cb) {
+ /* Last resort when we are OOM */
+ if (fence)
+ dma_fence_wait(fence, false);
+
+ amdgpu_vm_prt_put(cb->adev);
+ } else {
+ cb->adev = adev;
+ if (!fence || dma_fence_add_callback(fence, &cb->cb,
+ amdgpu_vm_prt_cb))
+ amdgpu_vm_prt_cb(fence, &cb->cb);
+ }
+}
+
+/**
+ * amdgpu_vm_free_mapping - free a mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @mapping: mapping to be freed
+ * @fence: fence of the unmap operation
+ *
+ * Free a mapping and make sure we decrease the PRT usage count if applicable.
+ */
+static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va_mapping *mapping,
+ struct dma_fence *fence)
+{
+ if (mapping->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_add_prt_cb(adev, fence);
+ kfree(mapping);
+}
+
+/**
+ * amdgpu_vm_prt_fini - finish all prt mappings
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Register a cleanup callback to disable PRT support after VM dies.
+ */
+static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct dma_fence *excl, **shared;
+ unsigned i, shared_count;
+ int r;
+
+ r = reservation_object_get_fences_rcu(resv, &excl,
+ &shared_count, &shared);
+ if (r) {
+ /* Not enough memory to grab the fence list, as last resort
+ * block for all the fences to complete.
+ */
+ reservation_object_wait_timeout_rcu(resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ return;
+ }
+
+ /* Add a callback for each fence in the reservation object */
+ amdgpu_vm_prt_get(adev);
+ amdgpu_vm_add_prt_cb(adev, excl);
+
+ for (i = 0; i < shared_count; ++i) {
+ amdgpu_vm_prt_get(adev);
+ amdgpu_vm_add_prt_cb(adev, shared[i]);
+ }
+
+ kfree(shared);
+}
+
+/**
* amdgpu_vm_clear_freed - clear freed BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @fence: optional resulting fence (unchanged if no work needed to be done
+ * or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
* Returns 0 for success.
@@ -1182,9 +1599,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
* PTs have to be reserved and mutex must be locked!
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence)
{
struct amdgpu_bo_va_mapping *mapping;
+ struct dma_fence *f = NULL;
int r;
while (!list_empty(&vm->freed)) {
@@ -1193,12 +1612,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
- 0, 0, NULL);
- kfree(mapping);
- if (r)
+ 0, 0, &f);
+ amdgpu_vm_free_mapping(adev, vm, mapping, f);
+ if (r) {
+ dma_fence_put(f);
return r;
+ }
+ }
+ if (fence && f) {
+ dma_fence_put(*fence);
+ *fence = f;
+ } else {
+ dma_fence_put(f);
}
+
return 0;
}
@@ -1271,7 +1699,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
- list_add_tail(&bo_va->bo_list, &bo->va);
+ if (bo)
+ list_add_tail(&bo_va->bo_list, &bo->va);
return bo_va;
}
@@ -1298,9 +1727,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
struct interval_tree_node *it;
- unsigned last_pfn, pt_idx;
uint64_t eaddr;
- int r;
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
@@ -1309,15 +1736,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
- if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
- return -EINVAL;
-
- last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
- if (last_pfn >= adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
- last_pfn, adev->vm_manager.max_pfn);
+ if (saddr >= eaddr ||
+ (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
- }
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1330,15 +1751,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1);
- r = -EINVAL;
- goto error;
+ return -EINVAL;
}
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
- if (!mapping) {
- r = -ENOMEM;
- goto error;
- }
+ if (!mapping)
+ return -ENOMEM;
INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr;
@@ -1349,53 +1767,74 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
- /* Make sure the page tables are allocated */
- saddr >>= amdgpu_vm_block_size;
- eaddr >>= amdgpu_vm_block_size;
-
- BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
+ if (flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
- if (eaddr > vm->max_pde_used)
- vm->max_pde_used = eaddr;
+ return 0;
+}
- /* walk over the address space and allocate the page tables */
- for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
- struct reservation_object *resv = vm->page_directory->tbo.resv;
- struct amdgpu_bo *pt;
+/**
+ * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @saddr: where to map the BO
+ * @offset: requested offset in the BO
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add a mapping of the BO at the specefied addr into the VM. Replace existing
+ * mappings as we do so.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved and unreserved outside!
+ */
+int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t saddr, uint64_t offset,
+ uint64_t size, uint64_t flags)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_vm *vm = bo_va->vm;
+ uint64_t eaddr;
+ int r;
- if (vm->page_tables[pt_idx].bo)
- continue;
+ /* validate the parameters */
+ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+ size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
- r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
- AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED,
- NULL, resv, &pt);
- if (r)
- goto error_free;
+ /* make sure object fit at this offset */
+ eaddr = saddr + size - 1;
+ if (saddr >= eaddr ||
+ (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ return -EINVAL;
- /* Keep a reference to the page table to avoid freeing
- * them up in the wrong order.
- */
- pt->parent = amdgpu_bo_ref(vm->page_directory);
+ /* Allocate all the needed memory */
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
- vm->page_tables[pt_idx].bo = pt;
- vm->page_tables[pt_idx].addr = 0;
+ r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+ if (r) {
+ kfree(mapping);
+ return r;
}
- return 0;
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr /= AMDGPU_GPU_PAGE_SIZE;
-error_free:
- list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
- trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- kfree(mapping);
+ mapping->it.start = saddr;
+ mapping->it.last = eaddr;
+ mapping->offset = offset;
+ mapping->flags = flags;
-error:
- return r;
+ list_add(&mapping->list, &bo_va->invalids);
+ interval_tree_insert(&mapping->it, &vm->va);
+
+ if (flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+
+ return 0;
}
/**
@@ -1444,7 +1883,109 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
if (valid)
list_add(&mapping->list, &vm->freed);
else
- kfree(mapping);
+ amdgpu_vm_free_mapping(adev, vm, mapping,
+ bo_va->last_pt_update);
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: VM structure to use
+ * @saddr: start of the range
+ * @size: size of the range
+ *
+ * Remove all mappings in a range, split them as appropriate.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t saddr, uint64_t size)
+{
+ struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
+ struct interval_tree_node *it;
+ LIST_HEAD(removed);
+ uint64_t eaddr;
+
+ eaddr = saddr + size - 1;
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr /= AMDGPU_GPU_PAGE_SIZE;
+
+ /* Allocate all the needed memory */
+ before = kzalloc(sizeof(*before), GFP_KERNEL);
+ if (!before)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&before->list);
+
+ after = kzalloc(sizeof(*after), GFP_KERNEL);
+ if (!after) {
+ kfree(before);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&after->list);
+
+ /* Now gather all removed mappings */
+ it = interval_tree_iter_first(&vm->va, saddr, eaddr);
+ while (it) {
+ tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
+ it = interval_tree_iter_next(it, saddr, eaddr);
+
+ /* Remember mapping split at the start */
+ if (tmp->it.start < saddr) {
+ before->it.start = tmp->it.start;
+ before->it.last = saddr - 1;
+ before->offset = tmp->offset;
+ before->flags = tmp->flags;
+ list_add(&before->list, &tmp->list);
+ }
+
+ /* Remember mapping split at the end */
+ if (tmp->it.last > eaddr) {
+ after->it.start = eaddr + 1;
+ after->it.last = tmp->it.last;
+ after->offset = tmp->offset;
+ after->offset += after->it.start - tmp->it.start;
+ after->flags = tmp->flags;
+ list_add(&after->list, &tmp->list);
+ }
+
+ list_del(&tmp->list);
+ list_add(&tmp->list, &removed);
+ }
+
+ /* And free them up */
+ list_for_each_entry_safe(tmp, next, &removed, list) {
+ interval_tree_remove(&tmp->it, &vm->va);
+ list_del(&tmp->list);
+
+ if (tmp->it.start < saddr)
+ tmp->it.start = saddr;
+ if (tmp->it.last > eaddr)
+ tmp->it.last = eaddr;
+
+ list_add(&tmp->list, &vm->freed);
+ trace_amdgpu_vm_bo_unmap(NULL, tmp);
+ }
+
+ /* Insert partial mapping before the range */
+ if (!list_empty(&before->list)) {
+ interval_tree_insert(&before->it, &vm->va);
+ if (before->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+ } else {
+ kfree(before);
+ }
+
+ /* Insert partial mapping after the range */
+ if (!list_empty(&after->list)) {
+ interval_tree_insert(&after->it, &vm->va);
+ if (after->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+ } else {
+ kfree(after);
+ }
return 0;
}
@@ -1480,7 +2021,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
- kfree(mapping);
+ amdgpu_vm_free_mapping(adev, vm, mapping,
+ bo_va->last_pt_update);
}
dma_fence_put(bo_va->last_pt_update);
@@ -1521,7 +2063,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8);
- unsigned pd_size, pd_entries;
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
@@ -1536,16 +2077,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
- pd_size = amdgpu_vm_directory_size(adev);
- pd_entries = amdgpu_vm_num_pdes(adev);
-
- /* allocate page table array */
- vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
- if (vm->page_tables == NULL) {
- DRM_ERROR("Cannot allocate memory for page table array\n");
- return -ENOMEM;
- }
-
/* create scheduler entity for page table updates */
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
@@ -1555,44 +2086,64 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amd_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs);
if (r)
- goto err;
+ return r;
- vm->page_directory_fence = NULL;
+ vm->last_dir_update = NULL;
- r = amdgpu_bo_create(adev, pd_size, align, true,
+ r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- NULL, NULL, &vm->page_directory);
+ NULL, NULL, &vm->root.bo);
if (r)
goto error_free_sched_entity;
- r = amdgpu_bo_reserve(vm->page_directory, false);
+ r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
- goto error_free_page_directory;
+ goto error_free_root;
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
- amdgpu_bo_unreserve(vm->page_directory);
+ amdgpu_bo_unreserve(vm->root.bo);
return 0;
-error_free_page_directory:
- amdgpu_bo_unref(&vm->page_directory->shadow);
- amdgpu_bo_unref(&vm->page_directory);
- vm->page_directory = NULL;
+error_free_root:
+ amdgpu_bo_unref(&vm->root.bo->shadow);
+ amdgpu_bo_unref(&vm->root.bo);
+ vm->root.bo = NULL;
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
-err:
- drm_free_large(vm->page_tables);
-
return r;
}
/**
+ * amdgpu_vm_free_levels - free PD/PT levels
+ *
+ * @level: PD/PT starting level to free
+ *
+ * Free the page directory or page table level and all sub levels.
+ */
+static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
+{
+ unsigned i;
+
+ if (level->bo) {
+ amdgpu_bo_unref(&level->bo->shadow);
+ amdgpu_bo_unref(&level->bo);
+ }
+
+ if (level->entries)
+ for (i = 0; i <= level->last_entry_used; i++)
+ amdgpu_vm_free_levels(&level->entries[i]);
+
+ drm_free_large(level->entries);
+}
+
+/**
* amdgpu_vm_fini - tear down a vm instance
*
* @adev: amdgpu_device pointer
@@ -1604,7 +2155,7 @@ err:
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- int i;
+ bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
@@ -1617,24 +2168,17 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- list_del(&mapping->list);
- kfree(mapping);
- }
-
- for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
- struct amdgpu_bo *pt = vm->page_tables[i].bo;
-
- if (!pt)
- continue;
+ if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+ amdgpu_vm_prt_fini(adev, vm);
+ prt_fini_needed = false;
+ }
- amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt);
+ list_del(&mapping->list);
+ amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- drm_free_large(vm->page_tables);
- amdgpu_bo_unref(&vm->page_directory->shadow);
- amdgpu_bo_unref(&vm->page_directory);
- dma_fence_put(vm->page_directory_fence);
+ amdgpu_vm_free_levels(&vm->root);
+ dma_fence_put(vm->last_dir_update);
}
/**
@@ -1665,6 +2209,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
+ spin_lock_init(&adev->vm_manager.prt_lock);
+ atomic_set(&adev->vm_manager.num_prt_users, 0);
}
/**