summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-06-10 11:03:50 +1000
committerDave Airlie <airlied@redhat.com>2021-06-10 11:28:09 +1000
commit09b020bb05a514f560979438fa40406bc63d5353 (patch)
tree2568980e133e47bce3509cab92f7adac99b51cc4 /drivers/gpu/drm/amd/amdgpu
parent5745d647d5563d3e9d32013ad4e5c629acff04d7 (diff)
parent5b7a2c92b6102447a973f2f1ef19d660ec329881 (diff)
Merge tag 'drm-misc-next-2021-06-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.14: UAPI Changes: * drm/panfrost: Export AFBC_FEATURES register to userspace Cross-subsystem Changes: * dma-buf: Fix debug printing; Rename dma_resv_*() functions + changes in callers; Cleanups Core Changes: * Add prefetching memcpy for WC * Avoid circular dependency on CONFIG_FB * Cleanups * Documentation fixes throughout DRM * ttm: Make struct ttm_resource the base of all managers + changes in all users of TTM; Add a generic memcpy for page-based iomem; Remove use of VM_MIXEDMAP; Cleanups Driver Changes: * drm/bridge: Add TI SN65DSI83 and SN65DSI84 + DT bindings * drm/hyperv: Add DRM driver for HyperV graphics output * drm/msm: Fix module dependencies * drm/panel: KD53T133: Support rotation * drm/pl111: Fix module dependencies * drm/qxl: Fixes * drm/stm: Cleanups * drm/sun4i: Be explicit about format modifiers * drm/vc4: Use struct gpio_desc; Cleanups * drm/vgem: Cleanups * drm/vmwgfx: Use ttm_bo_move_null() if there's nothing to copy * fbdev/mach64: Cleanups * fbdev/mb862xx: Use DEVICE_ATTR_RO Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/YMBw3DF2b9udByfT@linux-uq9g
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c97
19 files changed, 240 insertions, 214 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 141cd297e948..668a28b80a62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
if (!ef)
return -EINVAL;
- old = dma_resv_get_list(resv);
+ old = dma_resv_shared_list(resv);
if (!old)
return 0;
@@ -1668,7 +1668,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the next restore worker
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
- bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
ret = vm_validate_pt_pd_bos(avm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0585442b000e..736b8c121979 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4127,9 +4127,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
/* No need to recover an evicted BO */
- if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
- shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
- shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
r = amdgpu_bo_restore_shadow(shadow, &next);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 49f73b5b89b0..ac7b37dfff5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
- &work->shared_count,
- &work->shared);
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+ &work->shared_count, &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 6ec1312b7389..c3053b83b80c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -49,10 +49,10 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
unsigned int count;
int r;
- if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+ if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
- r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences(obj, NULL, &count, &fences);
if (r)
return r;
@@ -226,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (r)
return ERR_PTR(r);
- } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
AMDGPU_GEM_DOMAIN_GTT)) {
return ERR_PTR(-EBUSY);
}
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
@@ -245,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
- bo->tbo.base.size, attach->dev, dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
+ bo->tbo.base.size, attach->dev,
+ dir, &sgt);
if (r)
return ERR_PTR(r);
break;
@@ -436,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 73c76a3e2b12..1c3e3b608332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- fence = dma_resv_get_excl(bo->tbo.base.resv);
+ fence = dma_resv_excl_fence(bo->tbo.base.resv);
if (fence) {
amdgpu_bo_fence(bo, fence, true);
fence = NULL;
@@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 7061c4a0e421..85b8bfb8857b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -101,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
*addr = bo->tbo.ttm->dma_address[0];
break;
@@ -112,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
*addr = 0;
break;
}
- *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
+ *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 9ab33048923e..ec96e0b26b11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -22,18 +22,26 @@
* Authors: Christian König
*/
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
+struct amdgpu_gtt_node {
+ struct ttm_buffer_object *tbo;
+ struct ttm_range_mgr_node base;
+};
+
static inline struct amdgpu_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_gtt_mgr, manager);
}
-struct amdgpu_gtt_node {
- struct drm_mm_node node;
- struct ttm_buffer_object *tbo;
-};
+static inline struct amdgpu_gtt_node *
+to_amdgpu_gtt_node(struct ttm_resource *res)
+{
+ return container_of(res, struct amdgpu_gtt_node, base.base);
+}
/**
* DOC: mem_info_gtt_total
@@ -93,13 +101,15 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
/**
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
- * @mem: the mem object to check
+ * @res: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
- return mem->mm_node != NULL;
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+
+ return drm_mm_node_allocated(&node->base.mm_nodes[0]);
}
/**
@@ -115,54 +125,57 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+ uint32_t num_pages = PFN_UP(tbo->base.size);
struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
- if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
- atomic64_read(&mgr->available) < mem->num_pages) {
+ if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
+ atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock);
return -ENOSPC;
}
- atomic64_sub(mem->num_pages, &mgr->available);
+ atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
- if (!place->lpfn) {
- mem->mm_node = NULL;
- mem->start = AMDGPU_BO_INVALID_OFFSET;
- return 0;
- }
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) {
r = -ENOMEM;
goto err_out;
}
node->tbo = tbo;
+ ttm_resource_init(tbo, place, &node->base.base);
- spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
- tbo->page_alignment, 0, place->fpfn,
- place->lpfn, DRM_MM_INSERT_BEST);
- spin_unlock(&mgr->lock);
-
- if (unlikely(r))
- goto err_free;
-
- mem->mm_node = node;
- mem->start = node->node.start;
+ if (place->lpfn) {
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range(&mgr->mm,
+ &node->base.mm_nodes[0],
+ num_pages, tbo->page_alignment,
+ 0, place->fpfn, place->lpfn,
+ DRM_MM_INSERT_BEST);
+ spin_unlock(&mgr->lock);
+ if (unlikely(r))
+ goto err_free;
+
+ node->base.base.start = node->base.mm_nodes[0].start;
+ } else {
+ node->base.mm_nodes[0].start = 0;
+ node->base.mm_nodes[0].size = node->base.base.num_pages;
+ node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+ }
+ *res = &node->base.base;
return 0;
err_free:
kfree(node);
err_out:
- atomic64_add(mem->num_pages, &mgr->available);
+ atomic64_add(num_pages, &mgr->available);
return r;
}
@@ -176,19 +189,18 @@ err_out:
* Free the allocated GTT again.
*/
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct amdgpu_gtt_node *node = mem->mm_node;
- if (node) {
- spin_lock(&mgr->lock);
- drm_mm_remove_node(&node->node);
- spin_unlock(&mgr->lock);
- kfree(node);
- }
+ spin_lock(&mgr->lock);
+ if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
+ drm_mm_remove_node(&node->base.mm_nodes[0]);
+ spin_unlock(&mgr->lock);
+ atomic64_add(res->num_pages, &mgr->available);
- atomic64_add(mem->num_pages, &mgr->available);
+ kfree(node);
}
/**
@@ -224,7 +236,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
- node = container_of(mm_node, struct amdgpu_gtt_node, node);
+ node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
r = amdgpu_ttm_recover_gart(node->tbo);
if (r)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index c7f3aae23c62..b7fb72bff2c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned count;
int r;
- r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences(resv, NULL, &count, &fences);
if (r)
goto fallback;
@@ -156,8 +156,7 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 2741c28ff1b5..d6c54c7f7679 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a6fa3968463a..c83f116edde2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -364,14 +364,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
- &(*bo_ptr)->tbo.mem, &ctx);
+ &(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
@@ -575,15 +575,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+ bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -777,12 +777,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -905,8 +905,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->tbo.pin_count) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
- uint32_t mem_flags = bo->tbo.mem.placement;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
+ uint32_t mem_flags = bo->tbo.resource->placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
@@ -956,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
ttm_bo_pin(&bo->tbo);
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -1008,11 +1008,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
}
@@ -1245,7 +1245,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1256,7 +1256,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
- bo->mem.mem_type != TTM_PL_SYSTEM)
+ bo->resource->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */
@@ -1276,7 +1276,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
{
unsigned int domain;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
*vram_mem += amdgpu_bo_size(bo);
@@ -1318,7 +1318,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+ if (bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1355,10 +1355,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
@@ -1381,9 +1381,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
return VM_FAULT_SIGBUS;
@@ -1468,11 +1468,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
- WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1490,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
- offset = (bo->tbo.mem.start << PAGE_SHIFT) +
- amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
@@ -1544,7 +1544,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
unsigned int pin_count;
u64 size;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 90eab1c31027..126df03a7066 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -223,10 +223,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
- if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index d607f314cc1b..f6aff7ce5160 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -66,14 +66,18 @@ static DEVICE_ATTR_RO(mem_info_preempt_used);
static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
- atomic64_add(mem->num_pages, &mgr->used);
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (*res)
+ return -ENOMEM;
- mem->mm_node = NULL;
- mem->start = AMDGPU_BO_INVALID_OFFSET;
+ ttm_resource_init(tbo, place, *res);
+ (*res)->start = AMDGPU_BO_INVALID_OFFSET;
+
+ atomic64_add((*res)->num_pages, &mgr->used);
return 0;
}
@@ -86,11 +90,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
* Free the allocated GTT again.
*/
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
- atomic64_sub(mem->num_pages, &mgr->used);
+ atomic64_sub(res->num_pages, &mgr->used);
+ kfree(res);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 40f2adf305bc..59e0fefb15aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -28,6 +28,7 @@
#include <drm/drm_mm.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_range_manager.h>
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
@@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
{
struct drm_mm_node *node;
- if (!res || !res->mm_node) {
+ if (!res) {
cur->start = start;
cur->size = size;
cur->remaining = size;
@@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
- node = res->mm_node;
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4e558632a5d2..1b2ceccaf5b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -210,10 +210,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -EINVAL;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f);
- flist = dma_resv_get_list(resv);
+ flist = dma_resv_shared_list(resv);
if (!flist || r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 792d20261846..0527772fe1b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.mem.num_pages;
- __entry->type = bo->tbo.mem.mem_type;
+ __entry->pages = bo->tbo.resource->num_pages;
+ __entry->type = bo->tbo.resource->mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 832970cff64c..5d0c57b9917f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -45,6 +45,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include <drm/amdgpu_drm.h>
@@ -125,7 +126,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
rcu_read_unlock();
return;
}
- switch (bo->mem.mem_type) {
+
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
@@ -460,7 +462,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int r;
if (new_mem->mem_type == TTM_PL_TT ||
@@ -495,7 +497,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
@@ -605,7 +607,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_res_cursor cursor;
- amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+ &cursor);
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -954,50 +957,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ struct ttm_resource *tmp;
uint64_t addr, flags;
int r;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->mem.start = addr >> PAGE_SHIFT;
- } else {
+ bo->resource->start = addr >> PAGE_SHIFT;
+ return 0;
+ }
- /* allocate GART space */
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.mem_type = TTM_PL_TT;
- placements.flags = bo->mem.placement;
-
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* Bind pages */
- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
- amdgpu_gart_invalidate_tlb(adev);
- ttm_resource_free(bo, &bo->mem);
- bo->mem = tmp;
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_resource_free(bo, &tmp);
+ return r;
}
+ amdgpu_gart_invalidate_tlb(adev);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
@@ -1016,7 +1019,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
if (!tbo->ttm)
return 0;
- flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
return r;
@@ -1330,12 +1333,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->mem.num_pages;
+ unsigned long num_pages = bo->resource->num_pages;
struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
+ /* Swapout? */
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
if (bo->type == ttm_bo_type_kernel &&
!amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
@@ -1344,7 +1351,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = dma_resv_get_list(bo->base.resv);
+ flist = dma_resv_shared_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
@@ -1354,7 +1361,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
}
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_PREEMPT:
/* Preemptible BOs don't own system resources managed by the
* driver (pages, VRAM, GART space). They point to resources
@@ -1372,7 +1379,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+ amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
&cursor);
while (cursor.remaining) {
if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
@@ -1414,10 +1421,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
uint32_t value = 0;
int ret = 0;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
- amdgpu_res_first(&bo->mem, offset, len, &cursor);
+ amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) {
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
uint64_t bytes = 4 - (cursor.start & 3);
@@ -1942,21 +1949,21 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
+ if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
DRM_ERROR("Trying to clear preemptible memory.\n");
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ if (bo->tbo.resource->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
- num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
+ num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
num_loops = 0;
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
amdgpu_res_next(&cursor, cursor.size);
@@ -1981,12 +1988,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
uint64_t dst_addr = cursor.start;
- dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ dst_addr += amdgpu_ttm_domain_start(adev,
+ bo->tbo.resource->mem_type);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
cur_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index ce8f80a959c6..0f576f294d8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1125,9 +1125,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false,
- msecs_to_jiffies(10));
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8d218c5cfee8..777e8922ecf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
amdgpu_vm_bo_idle(base);
if (bo->preferred_domains &
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
return;
/*
@@ -658,10 +658,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
if (!bo->parent)
continue;
- ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+ ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
&vm->lru_bulk_move);
if (shadow)
- ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
+ ttm_bo_move_to_lru_tail(&shadow->tbo,
+ shadow->tbo.resource,
&vm->lru_bulk_move);
}
spin_unlock(&adev->mman.bdev.lru_lock);
@@ -1858,10 +1859,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
- mem = &bo->tbo.mem;
+ mem = bo->tbo.resource;
if (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT)
pages_addr = bo->tbo.ttm->dma_address;
@@ -1922,7 +1923,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
amdgpu_mem_type_to_domain(mem_type)))
@@ -2063,13 +2064,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned i, shared_count;
int r;
- r = dma_resv_get_fences_rcu(resv, &excl,
- &shared_count, &shared);
+ r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
@@ -2681,7 +2681,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
return false;
/* Try to block ongoing updates */
@@ -2861,8 +2861,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
- true, true, timeout);
+ timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
+ true, timeout);
if (timeout <= 0)
return timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 07e007dbff7c..9a6df02477ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,8 @@
*/
#include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
@@ -217,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *mem = &bo->tbo.mem;
- struct drm_mm_node *nodes = mem->mm_node;
- unsigned pages = mem->num_pages;
+ struct ttm_resource *res = bo->tbo.resource;
+ unsigned pages = res->num_pages;
+ struct drm_mm_node *mm;
u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
- if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
- usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+ mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+ for (usage = 0; pages; pages -= mm->size, mm++)
+ usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage;
}
@@ -365,15 +368,15 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
uint64_t vis_usage = 0, mem_bytes, max_bytes;
+ struct ttm_range_mgr_node *node;
struct drm_mm *mm = &mgr->mm;
enum drm_mm_insert_mode mode;
- struct drm_mm_node *nodes;
unsigned i;
int r;
@@ -386,10 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
/* bail out quickly if there's likely not enough VRAM for this BO */
- mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
+ mem_bytes = tbo->base.size;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
- atomic64_sub(mem_bytes, &mgr->usage);
- return -ENOSPC;
+ r = -ENOSPC;
+ goto error_sub;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -404,22 +407,23 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
#endif
pages_per_node = max_t(uint32_t, pages_per_node,
tbo->page_alignment);
- num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node);
}
- nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
- GFP_KERNEL | __GFP_ZERO);
- if (!nodes) {
- atomic64_sub(mem_bytes, &mgr->usage);
- return -ENOMEM;
+ node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!node) {
+ r = -ENOMEM;
+ goto error_sub;
}
+ ttm_resource_init(tbo, place, &node->base);
+
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
- mem->start = 0;
- pages_left = mem->num_pages;
+ pages_left = node->base.num_pages;
/* Limit maximum size to 2GB due to SG table limitations */
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
@@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (pages >= pages_per_node)
alignment = pages_per_node;
- r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment,
- 0, place->fpfn, lpfn, mode);
+ r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
+ alignment, 0, place->fpfn,
+ lpfn, mode);
if (unlikely(r)) {
if (pages > pages_per_node) {
if (is_power_of_2(pages))
@@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
pages = rounddown_pow_of_two(pages);
continue;
}
- goto error;
+ goto error_free;
}
- vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
- amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
+ amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
pages_left -= pages;
++i;
@@ -456,19 +461,20 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock);
if (i == 1)
- mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
+ node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
atomic64_add(vis_usage, &mgr->vis_usage);
- mem->mm_node = nodes;
+ *res = &node->base;
return 0;
-error:
+error_free:
while (i--)
- drm_mm_remove_node(&nodes[i]);
+ drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock);
- atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+ kvfree(node);
- kvfree(nodes);
+error_sub:
+ atomic64_sub(mem_bytes, &mgr->usage);
return r;
}
@@ -481,24 +487,22 @@ error:
* Free the allocated VRAM again.
*/
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_mm_node *nodes = mem->mm_node;
uint64_t usage = 0, vis_usage = 0;
- unsigned pages = mem->num_pages;
-
- if (!mem->mm_node)
- return;
+ unsigned i, pages;
spin_lock(&mgr->lock);
- while (pages) {
- pages -= nodes->size;
- drm_mm_remove_node(nodes);
- usage += nodes->size << PAGE_SHIFT;
- vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
- ++nodes;
+ for (i = 0, pages = res->num_pages; pages;
+ pages -= node->mm_nodes[i].size, ++i) {
+ struct drm_mm_node *mm = &node->mm_nodes[i];
+
+ drm_mm_remove_node(mm);
+ usage += mm->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
}
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
@@ -506,8 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
- kvfree(mem->mm_node);
- mem->mm_node = NULL;
+ kvfree(node);
}
/**
@@ -524,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_resource *mem,
+ struct ttm_resource *res,
u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
@@ -540,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
return -ENOMEM;
/* Determine the number of DRM_MM nodes to export */
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
amdgpu_res_next(&cursor, cursor.size);
@@ -560,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
* and the number of bytes from it. Access the following
* DRM_MM node(s) if more buffer needs to exported
*/
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + adev->gmc.aper_base;
size_t size = cursor.size;