summaryrefslogtreecommitdiff
path: root/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
diff options
context:
space:
mode:
authorMarek Olšák <marek.olsak@amd.com>2021-01-23 17:21:44 -0500
committerMarge Bot <eric+marge@anholt.net>2021-02-03 21:53:33 +0000
commit965c6445ad419aa49302f75db1d99345708c5aae (patch)
tree88f750bf149ee32e74ebbe221b14953ede3e31c0 /src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
parent7e47fe9a940f3989caf98024fdc584641601fa2e (diff)
winsys/amdgpu,radeonsi: add HUD counters for how much memory is wasted by slabs
Slabs always allocate the next power of two size from their pools. This wastes memory if the size is not a power of two. bo->base.size is overwritten because the default is the allocated power of two size, but we need the real size to compute the wasted size in amdgpu_bo_slab_destroy. entry_size is added to the hole in pb_slab_entry to hold the real entry size. Like other memory stats, no atomics are used. Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8683>
Diffstat (limited to 'src/gallium/winsys/amdgpu/drm/amdgpu_bo.c')
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_bo.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 0ab39539182..5108365976d 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -647,6 +647,15 @@ static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size,
return NULL;
}
+static unsigned get_slab_wasted_size(struct amdgpu_winsys_bo *bo)
+{
+ assert(bo->base.size <= bo->u.slab.entry.entry_size);
+ assert(bo->base.size < bo->base.alignment ||
+ bo->base.size < 1 << bo->ws->bo_slabs[0].min_order ||
+ bo->base.size > bo->u.slab.entry.entry_size / 2);
+ return bo->u.slab.entry.entry_size - bo->base.size;
+}
+
static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
@@ -661,6 +670,11 @@ static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
pb_slab_free(get_slabs(bo->ws,
bo->base.size,
0), &bo->u.slab.entry);
+
+ if (bo->base.placement & RADEON_DOMAIN_VRAM)
+ bo->ws->slab_wasted_vram -= get_slab_wasted_size(bo);
+ else
+ bo->ws->slab_wasted_gtt -= get_slab_wasted_size(bo);
}
static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
@@ -737,6 +751,7 @@ static struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
bo->unique_id = base_id + i;
bo->u.slab.entry.slab = &slab->base;
bo->u.slab.entry.group_index = group_index;
+ bo->u.slab.entry.entry_size = entry_size;
if (slab->buffer->bo) {
/* The slab is not suballocated. */
@@ -1331,8 +1346,13 @@ amdgpu_bo_create(struct amdgpu_winsys *ws,
return NULL;
bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
-
pipe_reference_init(&bo->base.reference, 1);
+ bo->base.size = size;
+
+ if (domain & RADEON_DOMAIN_VRAM)
+ ws->slab_wasted_vram += get_slab_wasted_size(bo);
+ else
+ ws->slab_wasted_gtt += get_slab_wasted_size(bo);
return &bo->base;
}