diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-02-25 09:32:20 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-02-25 12:50:19 +0000 |
commit | 8d773b88f45594f45174dc6f1a264d968690ce84 (patch) | |
tree | 2303fec3daaecab82fba12d35c3a4d4b78f9a5c2 | |
parent | 8cb773e7c809e1de23cd64d3db862d1f8e7e955a (diff) |
sna/gen3+: Keep the vertex buffer resident between batches
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/gen3_render.c | 87 | ||||
-rw-r--r-- | src/sna/gen5_render.c | 56 | ||||
-rw-r--r-- | src/sna/gen6_render.c | 56 | ||||
-rw-r--r-- | src/sna/gen7_render.c | 52 |
4 files changed, 173 insertions, 78 deletions
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c index b50d0671..8f597cfa 100644 --- a/src/sna/gen3_render.c +++ b/src/sna/gen3_render.c @@ -1612,20 +1612,33 @@ static int gen3_vertex_finish(struct sna *sna) static void gen3_vertex_close(struct sna *sna) { - struct kgem_bo *bo; - int delta = 0; + struct kgem_bo *bo, *free_bo = NULL; + unsigned int delta = 0; + + assert(sna->render_state.gen3.vertex_offset == 0); - if (!sna->render.vertex_used) { + DBG(("%s: used=%d, vbo active? %d\n", + __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL)); + + if (sna->render.vertex_used == 0) { assert(sna->render.vbo == NULL); assert(sna->render.vertices == sna->render.vertex_data); assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data)); return; } - DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used)); - bo = sna->render.vbo; - if (bo == NULL) { + if (bo) { + if (IS_CPU_MAP(bo->map) || + sna->render.vertex_size - sna->render.vertex_used < 64) { + DBG(("%s: discarding vbo (was CPU mapped)\n", + __FUNCTION__)); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + free_bo = bo; + } + } else { if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) { DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__, sna->render.vertex_used, sna->kgem.nbatch)); @@ -1636,36 +1649,37 @@ static void gen3_vertex_close(struct sna *sna) bo = NULL; sna->kgem.nbatch += sna->render.vertex_used; } else { - bo = kgem_create_linear(&sna->kgem, - 4*sna->render.vertex_used); - if (bo && !kgem_bo_write(&sna->kgem, bo, - sna->render.vertex_data, - 4*sna->render.vertex_used)) { - kgem_bo_destroy(&sna->kgem, bo); - goto reset; - } DBG(("%s: new vbo: %d\n", __FUNCTION__, sna->render.vertex_used)); + bo = kgem_create_linear(&sna->kgem, + 4*sna->render.vertex_used); + if (bo) + kgem_bo_write(&sna->kgem, bo, + sna->render.vertex_data, + 4*sna->render.vertex_used); + free_bo = bo; } } DBG(("%s: reloc = %d\n", __FUNCTION__, sna->render.vertex_reloc[0])); - sna->kgem.batch[sna->render.vertex_reloc[0]] = - kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0], - bo, I915_GEM_DOMAIN_VERTEX << 16, delta); - if (bo) - kgem_bo_destroy(&sna->kgem, bo); + if (sna->render.vertex_reloc[0]) { + sna->kgem.batch[sna->render.vertex_reloc[0]] = + kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0], + bo, I915_GEM_DOMAIN_VERTEX << 16, delta); + sna->render.vertex_reloc[0] = 0; + } -reset: - sna->render.vertex_reloc[0] = 0; - sna->render.vertex_used = 0; - sna->render.vertex_index = 0; + if (sna->render.vbo == NULL) { + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + assert(sna->render.vertices == sna->render.vertex_data); + assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data)); + } - sna->render.vbo = NULL; - sna->render.vertices = sna->render.vertex_data; - sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + if (free_bo) + kgem_bo_destroy(&sna->kgem, free_bo); } static bool gen3_rectangle_begin(struct sna *sna, @@ -1885,10 +1899,23 @@ gen3_render_reset(struct sna *sna) state->last_floats_per_vertex = 0; state->last_vertex_offset = 0; state->vertex_offset = 0; +} - assert(sna->render.vertex_used == 0); - assert(sna->render.vertex_index == 0); - assert(sna->render.vertex_reloc[0] == 0); +static void +gen3_render_retire(struct kgem *kgem) +{ + struct sna *sna; + + sna = container_of(kgem, struct sna, kgem); + if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) { + DBG(("%s: discarding vbo\n", __FUNCTION__)); + kgem_bo_destroy(kgem, sna->render.vbo); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + } } static Bool gen3_composite_channel_set_format(struct sna_composite_channel *channel, @@ -4466,5 +4493,7 @@ Bool gen3_render_init(struct sna *sna) render->max_3d_size = MAX_3D_SIZE; render->max_3d_pitch = MAX_3D_PITCH; + + sna->kgem.retire = gen3_render_retire; return TRUE; } diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c index a80ce0a4..bcba0d87 100644 --- a/src/sna/gen5_render.c +++ b/src/sna/gen5_render.c @@ -410,11 +410,14 @@ static int gen5_vertex_finish(struct sna *sna) static void gen5_vertex_close(struct sna *sna) { - struct kgem_bo *bo; + struct kgem_bo *bo, *free_bo = NULL; unsigned int i, delta = 0; assert(sna->render_state.gen5.vertex_offset == 0); + DBG(("%s: used=%d, vbo active? %d\n", + __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL)); + if (!sna->render.vertex_used) { assert(sna->render.vbo == NULL); assert(sna->render.vertices == sna->render.vertex_data); @@ -422,10 +425,18 @@ static void gen5_vertex_close(struct sna *sna) return; } - DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used)); - bo = sna->render.vbo; - if (bo == NULL) { + if (bo) { + if (IS_CPU_MAP(bo->map) || + sna->render.vertex_size - sna->render.vertex_used < 64) { + DBG(("%s: discarding vbo (was CPU mapped)\n", + __FUNCTION__)); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + free_bo = bo; + } + } else { if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) { DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__, sna->render.vertex_used, sna->kgem.nbatch)); @@ -441,10 +452,11 @@ static void gen5_vertex_close(struct sna *sna) sna->render.vertex_data, 4*sna->render.vertex_used)) { kgem_bo_destroy(&sna->kgem, bo); - goto reset; + bo = NULL; } DBG(("%s: new vbo: %d\n", __FUNCTION__, sna->render.vertex_used)); + free_bo = bo; } } @@ -469,17 +481,13 @@ static void gen5_vertex_close(struct sna *sna) } } - if (bo) - kgem_bo_destroy(&sna->kgem, bo); - -reset: - sna->render.vertex_used = 0; - sna->render.vertex_index = 0; - sna->render_state.gen5.vb_id = 0; + if (sna->render.vbo == NULL) { + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + } - sna->render.vbo = NULL; - sna->render.vertices = sna->render.vertex_data; - sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + if (free_bo) + kgem_bo_destroy(&sna->kgem, free_bo); } static uint32_t gen5_get_blend(int op, @@ -3470,6 +3478,23 @@ gen5_render_context_switch(struct kgem *kgem, } } +static void +gen5_render_retire(struct kgem *kgem) +{ + struct sna *sna; + + sna = container_of(kgem, struct sna, kgem); + if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) { + DBG(("%s: discarding vbo\n", __FUNCTION__)); + kgem_bo_destroy(kgem, sna->render.vbo); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + } +} + static void gen5_render_reset(struct sna *sna) { sna->render_state.gen5.needs_invariant = TRUE; @@ -3730,6 +3755,7 @@ Bool gen5_render_init(struct sna *sna) return FALSE; sna->kgem.context_switch = gen5_render_context_switch; + sna->kgem.retire = gen5_render_retire; sna->render.composite = gen5_render_composite; #if !NO_COMPOSITE_SPANS diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c index b69b3a21..439fb521 100644 --- a/src/sna/gen6_render.c +++ b/src/sna/gen6_render.c @@ -989,9 +989,14 @@ static int gen6_vertex_finish(struct sna *sna) static void gen6_vertex_close(struct sna *sna) { - struct kgem_bo *bo; + struct kgem_bo *bo, *free_bo = NULL; unsigned int i, delta = 0; + assert(sna->render_state.gen6.vertex_offset == 0); + + DBG(("%s: used=%d, vbo active? %d\n", + __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL)); + if (!sna->render.vertex_used) { assert(sna->render.vbo == NULL); assert(sna->render.vertices == sna->render.vertex_data); @@ -999,13 +1004,16 @@ static void gen6_vertex_close(struct sna *sna) return; } - DBG(("%s: used=%d / %d\n", __FUNCTION__, - sna->render.vertex_used, sna->render.vertex_size)); - bo = sna->render.vbo; - if (bo == NULL) { - assert(sna->render.vertices == sna->render.vertex_data); - assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data)); + if (bo) { + if (sna->render.vertex_size - sna->render.vertex_used < 64) { + DBG(("%s: discarding vbo (full)\n", __FUNCTION__)); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + free_bo = bo; + } + } else { if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) { DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__, sna->render.vertex_used, sna->kgem.nbatch)); @@ -1021,10 +1029,11 @@ static void gen6_vertex_close(struct sna *sna) sna->render.vertex_data, 4*sna->render.vertex_used)) { kgem_bo_destroy(&sna->kgem, bo); - goto reset; + bo = NULL; } DBG(("%s: new vbo: %d\n", __FUNCTION__, sna->render.vertex_used)); + free_bo = bo; } } @@ -1049,17 +1058,15 @@ static void gen6_vertex_close(struct sna *sna) } } - if (bo) - kgem_bo_destroy(&sna->kgem, bo); - -reset: - sna->render.vertex_used = 0; - sna->render.vertex_index = 0; - sna->render_state.gen6.vb_id = 0; + if (sna->render.vbo == NULL) { + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + assert(sna->render.vertices == sna->render.vertex_data); + assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data)); + } - sna->render.vbo = NULL; - sna->render.vertices = sna->render.vertex_data; - sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + if (free_bo) + kgem_bo_destroy(&sna->kgem, free_bo); } typedef struct gen6_surface_state_padded { @@ -4095,8 +4102,21 @@ gen6_render_context_switch(struct kgem *kgem, static void gen6_render_retire(struct kgem *kgem) { + struct sna *sna; + if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire)) kgem->ring = kgem->mode; + + sna = container_of(kgem, struct sna, kgem); + if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) { + DBG(("%s: discarding vbo\n", __FUNCTION__)); + kgem_bo_destroy(kgem, sna->render.vbo); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + } } static void gen6_render_reset(struct sna *sna) diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c index 0d913f6f..e3d9757d 100644 --- a/src/sna/gen7_render.c +++ b/src/sna/gen7_render.c @@ -1086,9 +1086,14 @@ static int gen7_vertex_finish(struct sna *sna) static void gen7_vertex_close(struct sna *sna) { - struct kgem_bo *bo; + struct kgem_bo *bo, *free_bo = NULL; unsigned int i, delta = 0; + assert(sna->render_state.gen7.vertex_offset == 0); + + DBG(("%s: used=%d, vbo active? %d\n", + __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL)); + if (!sna->render.vertex_used) { assert(sna->render.vbo == NULL); assert(sna->render.vertices == sna->render.vertex_data); @@ -1096,11 +1101,16 @@ static void gen7_vertex_close(struct sna *sna) return; } - DBG(("%s: used=%d / %d\n", __FUNCTION__, - sna->render.vertex_used, sna->render.vertex_size)); - bo = sna->render.vbo; - if (bo == NULL) { + if (bo) { + if (sna->render.vertex_size - sna->render.vertex_used < 64) { + DBG(("%s: discarding vbo (full)\n", __FUNCTION__)); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + free_bo = bo; + } + } else { if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) { DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__, sna->render.vertex_used, sna->kgem.nbatch)); @@ -1116,10 +1126,11 @@ static void gen7_vertex_close(struct sna *sna) sna->render.vertex_data, 4*sna->render.vertex_used)) { kgem_bo_destroy(&sna->kgem, bo); - goto reset; + bo = NULL; } DBG(("%s: new vbo: %d\n", __FUNCTION__, sna->render.vertex_used)); + free_bo = bo; } } @@ -1144,17 +1155,13 @@ static void gen7_vertex_close(struct sna *sna) } } - if (bo) - kgem_bo_destroy(&sna->kgem, bo); - -reset: - sna->render.vertex_used = 0; - sna->render.vertex_index = 0; - sna->render_state.gen7.vb_id = 0; + if (sna->render.vbo == NULL) { + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + } - sna->render.vbo = NULL; - sna->render.vertices = sna->render.vertex_data; - sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + if (free_bo) + kgem_bo_destroy(&sna->kgem, free_bo); } static void null_create(struct sna_static_stream *stream) @@ -4080,8 +4087,21 @@ gen7_render_context_switch(struct kgem *kgem, static void gen7_render_retire(struct kgem *kgem) { + struct sna *sna; + if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire)) kgem->ring = kgem->mode; + + sna = container_of(kgem, struct sna, kgem); + if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) { + DBG(("%s: discarding vbo\n", __FUNCTION__)); + kgem_bo_destroy(kgem, sna->render.vbo); + sna->render.vbo = NULL; + sna->render.vertices = sna->render.vertex_data; + sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); + sna->render.vertex_used = 0; + sna->render.vertex_index = 0; + } } static void gen7_render_reset(struct sna *sna) |