summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/radeon/radeon_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/radeon/radeon_dma.c')
-rw-r--r--src/mesa/drivers/dri/radeon/radeon_dma.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/src/mesa/drivers/dri/radeon/radeon_dma.c b/src/mesa/drivers/dri/radeon/radeon_dma.c
index c6edbae9a1b..b8c65f4ce62 100644
--- a/src/mesa/drivers/dri/radeon/radeon_dma.c
+++ b/src/mesa/drivers/dri/radeon/radeon_dma.c
@@ -151,6 +151,7 @@ void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
aos->components = size;
aos->count = count;
+ radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
switch (size) {
case 1: radeonEmitVec4(out, data, stride, count); break;
@@ -161,6 +162,7 @@ void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
assert(0);
break;
}
+ radeon_bo_unmap(aos->bo);
}
void radeon_init_dma(radeonContextPtr rmesa)
@@ -183,10 +185,6 @@ void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size)
__FUNCTION__, size, rmesa->dma.minimum_size);
- /* unmap old reserved bo */
- if (!is_empty_list(&rmesa->dma.reserved))
- radeon_bo_unmap(first_elem(&rmesa->dma.reserved)->bo);
-
if (is_empty_list(&rmesa->dma.free)
|| last_elem(&rmesa->dma.free)->bo->size < size) {
dma_bo = CALLOC_STRUCT(radeon_dma_bo);
@@ -207,6 +205,7 @@ again_alloc:
counter on unused buffers for later freeing them from
begin of list */
dma_bo = last_elem(&rmesa->dma.free);
+ assert(dma_bo->bo->cref == 1);
remove_from_list(dma_bo);
insert_at_head(&rmesa->dma.reserved, dma_bo);
}
@@ -223,8 +222,6 @@ again_alloc:
/* Cmd buff have been flushed in radeon_revalidate_bos */
goto again_alloc;
}
-
- radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
}
/* Allocates a region from rmesa->dma.current. If there isn't enough
@@ -281,7 +278,6 @@ void radeonFreeDmaRegions(radeonContextPtr rmesa)
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
remove_from_list(dma_bo);
- radeon_bo_unmap(dma_bo->bo);
radeon_bo_unref(dma_bo->bo);
FREE(dma_bo);
}
@@ -306,10 +302,6 @@ static int radeon_bo_is_idle(struct radeon_bo* bo)
WARN_ONCE("Your libdrm or kernel doesn't have support for busy query.\n"
"This may cause small performance drop for you.\n");
}
- /* Protect against bug in legacy bo handling that causes bos stay
- * referenced even after they should be freed */
- if (bo->cref != 1)
- return 0;
return ret != -EBUSY;
}
@@ -346,9 +338,7 @@ void radeonReleaseDmaRegions(radeonContextPtr rmesa)
foreach_s(dma_bo, temp, &rmesa->dma.wait) {
if (dma_bo->expire_counter == time) {
WARN_ONCE("Leaking dma buffer object!\n");
- /* force free of buffer so we don't realy start
- * leaking stuff now*/
- while ((dma_bo->bo = radeon_bo_unref(dma_bo->bo))) {}
+ radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
FREE(dma_bo);
continue;
@@ -367,9 +357,6 @@ void radeonReleaseDmaRegions(radeonContextPtr rmesa)
insert_at_tail(&rmesa->dma.free, dma_bo);
}
- /* unmap the last dma region */
- if (!is_empty_list(&rmesa->dma.reserved))
- radeon_bo_unmap(first_elem(&rmesa->dma.reserved)->bo);
/* move reserved to wait list */
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
/* free objects that are too small to be used because of large request */
@@ -403,11 +390,12 @@ void rcommon_flush_last_swtcl_prim( GLcontext *ctx )
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
struct radeon_dma *dma = &rmesa->dma;
-
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
dma->flush = NULL;
+ radeon_bo_unmap(rmesa->swtcl.bo);
+
if (!is_empty_list(&dma->reserved)) {
GLuint current_offset = dma->current_used;
@@ -422,6 +410,8 @@ void rcommon_flush_last_swtcl_prim( GLcontext *ctx )
}
rmesa->swtcl.numverts = 0;
}
+ radeon_bo_unref(rmesa->swtcl.bo);
+ rmesa->swtcl.bo = NULL;
}
/* Alloc space in the current dma region.
*/
@@ -432,6 +422,7 @@ rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
void *head;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
+
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
if (rmesa->dma.flush) {
@@ -455,7 +446,13 @@ rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
rmesa->dma.current_vertexptr );
- head = (first_elem(&rmesa->dma.reserved)->bo->ptr + rmesa->dma.current_vertexptr);
+ if (!rmesa->swtcl.bo) {
+ rmesa->swtcl.bo = first_elem(&rmesa->dma.reserved)->bo;
+ radeon_bo_ref(rmesa->swtcl.bo);
+ radeon_bo_map(rmesa->swtcl.bo, 1);
+ }
+
+ head = (rmesa->swtcl.bo->ptr + rmesa->dma.current_vertexptr);
rmesa->dma.current_vertexptr += bytes;
rmesa->swtcl.numverts += nverts;
return head;