summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-01-13 19:00:01 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-01-14 18:13:47 +0000
commita62429a1f79b8fa4a5ddaf61b2bc80fc8dbe576c (patch)
tree589d96fc4b8978b7da3edca53c01e630390877b4
parent24df8ab9742f771cfeb6d30bd8a61a17a9e22ca7 (diff)
sna: Upload continuation vertices into mmapped buffers
In the common case, we expect a very small number of vertices which will fit into the batch along with the commands. However, in full flow we overflow the on-stack buffer and likely several continuation buffers. Streaming those straight into the GTT seems like a good idea, with the usual caveats over aperture pressure. (Since these are linear we could use snoopable bo for the architectures that support such for vertex buffers and if we had kernel support.) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/gen3_render.c149
-rw-r--r--src/sna/gen4_render.c126
-rw-r--r--src/sna/gen5_render.c147
-rw-r--r--src/sna/gen6_render.c139
-rw-r--r--src/sna/gen7_render.c134
-rw-r--r--src/sna/sna_render.c3
-rw-r--r--src/sna/sna_render.h6
-rw-r--r--src/sna/sna_render_inline.h10
8 files changed, 532 insertions, 182 deletions
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 457e6942..fc896d91 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -456,7 +456,7 @@ gen3_emit_composite_primitive_identity_source(struct sna *sna,
float h = r->height;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 12;
v[8] = v[4] = r->dst.x + op->dst.x;
@@ -516,7 +516,7 @@ gen3_emit_composite_primitive_constant_identity_mask(struct sna *sna,
float h = r->height;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 12;
v[8] = v[4] = r->dst.x + op->dst.x;
@@ -552,7 +552,7 @@ gen3_emit_composite_primitive_identity_source_mask(struct sna *sna,
w = r->width;
h = r->height;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 18;
v[0] = dst_x + w;
@@ -597,7 +597,7 @@ gen3_emit_composite_primitive_affine_source_mask(struct sna *sna,
w = r->width;
h = r->height;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 18;
v[0] = dst_x + w;
@@ -1512,54 +1512,107 @@ static void gen3_vertex_flush(struct sna *sna)
sna->render_state.gen3.vertex_offset = 0;
}
-static void gen3_vertex_finish(struct sna *sna, Bool last)
+static int gen3_vertex_finish(struct sna *sna)
{
struct kgem_bo *bo;
- int delta;
-
- DBG(("%s: last? %d\n", __FUNCTION__, last));
gen3_vertex_flush(sna);
if (!sna->render.vertex_used)
- return;
+ return sna->render.vertex_size;
+
+ bo = sna->render.vbo;
+ if (bo) {
+ DBG(("%s: reloc = %d\n", __FUNCTION__,
+ sna->render.vertex_reloc[0]));
+
+ sna->kgem.batch[sna->render.vertex_reloc[0]] =
+ kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0],
+ bo, I915_GEM_DOMAIN_VERTEX << 16, 0);
+
+ sna->render.vertex_reloc[0] = 0;
+ sna->render.vertex_used = 0;
+ sna->render.vertex_index = 0;
+
+ kgem_bo_destroy(&sna->kgem, bo);
+ }
+
+ sna->render.vertices = NULL;
+ sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+ if (sna->render.vbo)
+ sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertices == NULL) {
+ kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+ sna->render.vbo = NULL;
+ return 0;
+ }
- if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
- DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
- sna->render.vertex_used, sna->kgem.nbatch));
- memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ if (sna->render.vertex_used) {
+ memcpy(sna->render.vertices,
sna->render.vertex_data,
- sna->render.vertex_used * 4);
- delta = sna->kgem.nbatch * 4;
- bo = NULL;
- sna->kgem.nbatch += sna->render.vertex_used;
- } else {
- bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
- if (bo && !kgem_bo_write(&sna->kgem, bo,
- sna->render.vertex_data,
- 4*sna->render.vertex_used)) {
- kgem_bo_destroy(&sna->kgem, bo);
- return;
+ sizeof(float)*sna->render.vertex_used);
+ }
+ sna->render.vertex_size = 64 * 1024 - 1;
+ return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen3_vertex_close(struct sna *sna)
+{
+ struct kgem_bo *bo;
+ int delta = 0;
+
+ gen3_vertex_flush(sna);
+
+ if (!sna->render.vertex_used) {
+ assert(sna->render.vbo == NULL);
+ assert(sna->render.vertices == sna->render.vertex_data);
+ assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+ return;
+ }
+
+ DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+ bo = sna->render.vbo;
+ if (bo == NULL) {
+ if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+ DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+ sna->render.vertex_used, sna->kgem.nbatch));
+ memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ sna->render.vertex_data,
+ sna->render.vertex_used * 4);
+ delta = sna->kgem.nbatch * 4;
+ bo = NULL;
+ sna->kgem.nbatch += sna->render.vertex_used;
+ } else {
+ bo = kgem_create_linear(&sna->kgem,
+ 4*sna->render.vertex_used);
+ if (bo && !kgem_bo_write(&sna->kgem, bo,
+ sna->render.vertex_data,
+ 4*sna->render.vertex_used)) {
+ kgem_bo_destroy(&sna->kgem, bo);
+ goto reset;
+ }
+ DBG(("%s: new vbo: %d\n", __FUNCTION__,
+ sna->render.vertex_used));
}
- delta = 0;
- DBG(("%s: new vbo: %d\n", __FUNCTION__,
- sna->render.vertex_used));
}
DBG(("%s: reloc = %d\n", __FUNCTION__,
sna->render.vertex_reloc[0]));
sna->kgem.batch[sna->render.vertex_reloc[0]] =
- kgem_add_reloc(&sna->kgem,
- sna->render.vertex_reloc[0],
- bo,
- I915_GEM_DOMAIN_VERTEX << 16,
- delta);
+ kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0],
+ bo, I915_GEM_DOMAIN_VERTEX << 16, delta);
+ if (bo)
+ kgem_bo_destroy(&sna->kgem, bo);
+
+reset:
sna->render.vertex_reloc[0] = 0;
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
- if (bo)
- kgem_bo_destroy(&sna->kgem, bo);
+ sna->render.vbo = NULL;
+ sna->render.vertices = sna->render.vertex_data;
+ sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
}
static bool gen3_rectangle_begin(struct sna *sna,
@@ -1612,10 +1665,7 @@ static int gen3_get_rectangles__flush(struct sna *sna, bool ca)
if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
return 0;
- gen3_vertex_finish(sna, FALSE);
- assert(sna->render.vertex_index == 0);
- assert(sna->render.vertex_used == 0);
- return ARRAY_SIZE(sna->render.vertex_data);
+ return gen3_vertex_finish(sna);
}
inline static int gen3_get_rectangles(struct sna *sna,
@@ -1647,7 +1697,7 @@ inline static int gen3_get_rectangles(struct sna *sna,
sna->render.vertex_index += 3*want;
assert(want);
- assert(sna->render.vertex_index * op->floats_per_vertex <= ARRAY_SIZE(sna->render.vertex_data));
+ assert(sna->render.vertex_index * op->floats_per_vertex <= sna->render.vertex_size);
return want;
}
@@ -2205,6 +2255,9 @@ gen3_align_vertex(struct sna *sna,
struct sna_composite_op *op)
{
if (op->floats_per_vertex != sna->render_state.gen3.last_floats_per_vertex) {
+ if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+ gen3_vertex_finish(sna);
+
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
sna->render_state.gen3.last_floats_per_vertex,
op->floats_per_vertex,
@@ -2747,7 +2800,7 @@ gen3_emit_composite_spans_primitive_zero(struct sna *sna,
const BoxRec *box,
float opacity)
{
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 6;
v[0] = op->base.dst.x + box->x2;
@@ -2766,7 +2819,7 @@ gen3_emit_composite_spans_primitive_zero_no_offset(struct sna *sna,
const BoxRec *box,
float opacity)
{
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 6;
v[0] = box->x2;
@@ -2781,7 +2834,7 @@ gen3_emit_composite_spans_primitive_constant(struct sna *sna,
const BoxRec *box,
float opacity)
{
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
v[0] = op->base.dst.x + box->x2;
@@ -2797,7 +2850,7 @@ gen3_emit_composite_spans_primitive_constant_no_offset(struct sna *sna,
const BoxRec *box,
float opacity)
{
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
v[0] = box->x2;
@@ -2813,7 +2866,7 @@ gen3_emit_composite_spans_primitive_identity_source(struct sna *sna,
const BoxRec *box,
float opacity)
{
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
v[0] = op->base.dst.x + box->x2;
@@ -2844,7 +2897,7 @@ gen3_emit_composite_spans_primitive_affine_source(struct sna *sna,
PictTransform *transform = op->base.src.transform;
float x, y, *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
v[0] = op->base.dst.x + box->x2;
@@ -2883,7 +2936,7 @@ gen3_emit_composite_spans_primitive_identity_gradient(struct sna *sna,
const BoxRec *box,
float opacity)
{
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
v[0] = op->base.dst.x + box->x2;
@@ -2912,7 +2965,7 @@ gen3_emit_composite_spans_primitive_affine_gradient(struct sna *sna,
float opacity)
{
PictTransform *transform = op->base.src.transform;
- float *v = sna->render.vertex_data + sna->render.vertex_used;
+ float *v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
v[0] = op->base.dst.x + box->x2;
@@ -4278,7 +4331,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
static void gen3_render_flush(struct sna *sna)
{
- gen3_vertex_finish(sna, TRUE);
+ gen3_vertex_close(sna);
}
static void
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 972b7187..942f8fb0 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -358,37 +358,97 @@ static void gen4_vertex_flush(struct sna *sna)
gen4_magic_ca_pass(sna, sna->render.op);
}
-static void gen4_vertex_finish(struct sna *sna, Bool last)
+static int gen4_vertex_finish(struct sna *sna)
{
struct kgem_bo *bo;
- unsigned int i, delta;
+ unsigned int i;
gen4_vertex_flush(sna);
if (!sna->render.vertex_used)
- return;
+ return sna->render.vertex_size;
/* Note: we only need dword alignment (currently) */
- if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
- DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
- sna->render.vertex_used, sna->kgem.nbatch));
- memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ bo = sna->render.vbo;
+ if (bo) {
+ for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+ if (sna->render.vertex_reloc[i]) {
+ DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+ i, sna->render.vertex_reloc[i]));
+
+ sna->kgem.batch[sna->render.vertex_reloc[i]] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i],
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ 0);
+ sna->render.vertex_reloc[i] = 0;
+ }
+ }
+
+ sna->render.vertex_used = 0;
+ sna->render.vertex_index = 0;
+ sna->render_state.gen4.vb_id = 0;
+
+ kgem_bo_destroy(&sna->kgem, bo);
+ }
+
+ sna->render.vertices = NULL;
+ sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+ if (sna->render.vbo)
+ sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertices == NULL) {
+ kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+ sna->render.vbo = NULL;
+ return 0;
+ }
+
+ if (sna->render.vertex_used) {
+ memcpy(sna->render.vertices,
sna->render.vertex_data,
- sna->render.vertex_used * 4);
- delta = sna->kgem.nbatch * 4;
- bo = NULL;
- sna->kgem.nbatch += sna->render.vertex_used;
- } else {
- bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
- if (bo && !kgem_bo_write(&sna->kgem, bo,
- sna->render.vertex_data,
- 4*sna->render.vertex_used)) {
- kgem_bo_destroy(&sna->kgem, bo);
- return;
+ sizeof(float)*sna->render.vertex_used);
+ }
+ sna->render.vertex_size = 64 * 1024 - 1;
+ return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen4_vertex_close(struct sna *sna)
+{
+ struct kgem_bo *bo;
+ unsigned int i, delta = 0;
+
+ gen4_vertex_flush(sna);
+ if (!sna->render.vertex_used) {
+ assert(sna->render.vbo == NULL);
+ assert(sna->render.vertices == sna->render.vertex_data);
+ assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+ return;
+ }
+
+ DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+ bo = sna->render.vbo;
+ if (bo == NULL) {
+ if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+ DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+ sna->render.vertex_used, sna->kgem.nbatch));
+ memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ sna->render.vertex_data,
+ sna->render.vertex_used * 4);
+ delta = sna->kgem.nbatch * 4;
+ bo = NULL;
+ sna->kgem.nbatch += sna->render.vertex_used;
+ } else {
+ bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+ if (bo && !kgem_bo_write(&sna->kgem, bo,
+ sna->render.vertex_data,
+ 4*sna->render.vertex_used)) {
+ kgem_bo_destroy(&sna->kgem, bo);
+ goto reset;
+ }
+ DBG(("%s: new vbo: %d\n", __FUNCTION__,
+ sna->render.vertex_used));
}
- delta = 0;
- DBG(("%s: new vbo: %d\n", __FUNCTION__,
- sna->render.vertex_used));
}
for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -409,11 +469,17 @@ static void gen4_vertex_finish(struct sna *sna, Bool last)
if (bo)
kgem_bo_destroy(&sna->kgem, bo);
+reset:
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
sna->render_state.gen4.vb_id = 0;
+
+ sna->render.vbo = NULL;
+ sna->render.vertices = sna->render.vertex_data;
+ sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
}
+
static uint32_t gen4_get_blend(int op,
Bool has_component_alpha,
uint32_t dst_format)
@@ -696,7 +762,7 @@ gen4_emit_composite_primitive_solid(struct sna *sna,
float f;
} dst;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -728,7 +794,7 @@ gen4_emit_composite_primitive_identity_source(struct sna *sna,
float f;
} dst;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
sx = r->src.x + op->src.offset[0];
@@ -762,7 +828,7 @@ gen4_emit_composite_primitive_affine_source(struct sna *sna,
} dst;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -815,7 +881,7 @@ gen4_emit_composite_primitive_identity_source_mask(struct sna *sna,
w = r->width;
h = r->height;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
dst.p.x = r->dst.x + r->width;
@@ -1057,10 +1123,7 @@ static int gen4_get_rectangles__flush(struct sna *sna)
if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
return 0;
- gen4_vertex_finish(sna, FALSE);
- sna->render.vertex_index = 0;
-
- return ARRAY_SIZE(sna->render.vertex_data);
+ return gen4_vertex_finish(sna);
}
inline static int gen4_get_rectangles(struct sna *sna,
@@ -1209,6 +1272,9 @@ static void
gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
+ if (sna->render.vertex_size - sna->render.vertex_used < 6*op->floats_per_vertex)
+ gen4_vertex_finish(sna);
+
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
sna->render_state.gen4.floats_per_vertex,
op->floats_per_vertex,
@@ -2843,7 +2909,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
static void
gen4_render_flush(struct sna *sna)
{
- gen4_vertex_finish(sna, TRUE);
+ gen4_vertex_close(sna);
}
static void gen4_render_reset(struct sna *sna)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6347b3c7..573478d3 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -349,37 +349,104 @@ static void gen5_vertex_flush(struct sna *sna)
gen5_magic_ca_pass(sna, sna->render.op);
}
-static void gen5_vertex_finish(struct sna *sna, Bool last)
+static int gen5_vertex_finish(struct sna *sna)
{
struct kgem_bo *bo;
- unsigned int i, delta;
+ unsigned int i;
gen5_vertex_flush(sna);
if (!sna->render.vertex_used)
- return;
+ return sna->render.vertex_size;
/* Note: we only need dword alignment (currently) */
- if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
- DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
- sna->render.vertex_used, sna->kgem.nbatch));
- memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ bo = sna->render.vbo;
+ if (bo) {
+ for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+ if (sna->render.vertex_reloc[i]) {
+ DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+ i, sna->render.vertex_reloc[i]));
+
+ sna->kgem.batch[sna->render.vertex_reloc[i]] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i],
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ 0);
+ sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i]+1,
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ sna->render.vertex_used * 4 - 1);
+ sna->render.vertex_reloc[i] = 0;
+ }
+ }
+
+ sna->render.vertex_used = 0;
+ sna->render.vertex_index = 0;
+ sna->render_state.gen5.vb_id = 0;
+
+ kgem_bo_destroy(&sna->kgem, bo);
+ }
+
+ sna->render.vertices = NULL;
+ sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+ if (sna->render.vbo)
+ sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertices == NULL) {
+ kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+ sna->render.vbo = NULL;
+ return 0;
+ }
+
+ if (sna->render.vertex_used) {
+ memcpy(sna->render.vertices,
sna->render.vertex_data,
- sna->render.vertex_used * 4);
- delta = sna->kgem.nbatch * 4;
- bo = NULL;
- sna->kgem.nbatch += sna->render.vertex_used;
- } else {
- bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
- if (bo && !kgem_bo_write(&sna->kgem, bo,
- sna->render.vertex_data,
- 4*sna->render.vertex_used)) {
- kgem_bo_destroy(&sna->kgem, bo);
- return;
+ sizeof(float)*sna->render.vertex_used);
+ }
+ sna->render.vertex_size = 64 * 1024 - 1;
+ return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen5_vertex_close(struct sna *sna)
+{
+ struct kgem_bo *bo;
+ unsigned int i, delta = 0;
+
+ gen5_vertex_flush(sna);
+ if (!sna->render.vertex_used) {
+ assert(sna->render.vbo == NULL);
+ assert(sna->render.vertices == sna->render.vertex_data);
+ assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+ return;
+ }
+
+ DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+ bo = sna->render.vbo;
+ if (bo == NULL) {
+
+ if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+ DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+ sna->render.vertex_used, sna->kgem.nbatch));
+ memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ sna->render.vertex_data,
+ sna->render.vertex_used * 4);
+ delta = sna->kgem.nbatch * 4;
+ bo = NULL;
+ sna->kgem.nbatch += sna->render.vertex_used;
+ } else {
+ bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+ if (bo && !kgem_bo_write(&sna->kgem, bo,
+ sna->render.vertex_data,
+ 4*sna->render.vertex_used)) {
+ kgem_bo_destroy(&sna->kgem, bo);
+ goto reset;
+ }
+ DBG(("%s: new vbo: %d\n", __FUNCTION__,
+ sna->render.vertex_used));
}
- delta = 0;
- DBG(("%s: new vbo: %d\n", __FUNCTION__,
- sna->render.vertex_used));
}
for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -406,9 +473,14 @@ static void gen5_vertex_finish(struct sna *sna, Bool last)
if (bo)
kgem_bo_destroy(&sna->kgem, bo);
+reset:
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
sna->render_state.gen5.vb_id = 0;
+
+ sna->render.vbo = NULL;
+ sna->render.vertices = sna->render.vertex_data;
+ sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
}
static uint32_t gen5_get_blend(int op,
@@ -727,7 +799,7 @@ gen5_emit_composite_primitive_solid(struct sna *sna,
float f;
} dst;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -759,7 +831,7 @@ gen5_emit_composite_primitive_identity_source(struct sna *sna,
float f;
} dst;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
sx = r->src.x + op->src.offset[0];
@@ -793,7 +865,7 @@ gen5_emit_composite_primitive_affine_source(struct sna *sna,
} dst;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -846,7 +918,7 @@ gen5_emit_composite_primitive_identity_source_mask(struct sna *sna,
w = r->width;
h = r->height;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
dst.p.x = r->dst.x + r->width;
@@ -1082,10 +1154,7 @@ static int gen5_get_rectangles__flush(struct sna *sna)
if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
return 0;
- gen5_vertex_finish(sna, FALSE);
- sna->render.vertex_index = 0;
-
- return ARRAY_SIZE(sna->render.vertex_data);
+ return gen5_vertex_finish(sna);
}
inline static int gen5_get_rectangles(struct sna *sna,
@@ -1094,9 +1163,9 @@ inline static int gen5_get_rectangles(struct sna *sna,
{
int rem = vertex_space(sna);
- if (rem < 3*op->floats_per_vertex) {
+ if (rem < op->floats_per_rect) {
DBG(("flushing vbo for %s: %d < %d\n",
- __FUNCTION__, rem, 3*op->floats_per_vertex));
+ __FUNCTION__, rem, op->floats_per_rect));
rem = gen5_get_rectangles__flush(sna);
if (rem == 0)
return 0;
@@ -1105,8 +1174,8 @@ inline static int gen5_get_rectangles(struct sna *sna,
if (!gen5_rectangle_begin(sna, op))
return 0;
- if (want * op->floats_per_vertex*3 > rem)
- want = rem / (3*op->floats_per_vertex);
+ if (want * op->floats_per_rect > rem)
+ want = rem / op->floats_per_rect;
sna->render.vertex_index += 3*want;
return want;
@@ -1233,6 +1302,9 @@ static void
gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
+ if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+ gen5_vertex_finish(sna);
+
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
sna->render_state.gen5.floats_per_vertex,
op->floats_per_vertex,
@@ -1674,6 +1746,7 @@ gen5_render_video(struct sna *sna,
tmp.u.gen5.ve_id = 1;
tmp.is_affine = TRUE;
tmp.floats_per_vertex = 3;
+ tmp.floats_per_rect = 9;
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
kgem_submit(&sna->kgem);
@@ -2226,6 +2299,7 @@ gen5_render_composite(struct sna *sna,
tmp->floats_per_vertex = 3 + !tmp->is_affine;
}
+ tmp->floats_per_rect = 3*tmp->floats_per_vertex;
tmp->u.gen5.wm_kernel =
gen5_choose_composite_kernel(tmp->op,
@@ -2668,6 +2742,7 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
tmp.is_affine = TRUE;
tmp.floats_per_vertex = 3;
+ tmp.floats_per_rect = 9;
tmp.u.gen5.wm_kernel = WM_KERNEL;
tmp.u.gen5.ve_id = 1;
@@ -2810,6 +2885,7 @@ gen5_render_copy(struct sna *sna, uint8_t alu,
op->base.is_affine = true;
op->base.floats_per_vertex = 3;
+ op->base.floats_per_rect = 9;
op->base.u.gen5.wm_kernel = WM_KERNEL;
op->base.u.gen5.ve_id = 1;
@@ -2957,6 +3033,7 @@ gen5_render_fill_boxes(struct sna *sna,
tmp.is_affine = TRUE;
tmp.floats_per_vertex = 3;
+ tmp.floats_per_rect = 9;
tmp.u.gen5.wm_kernel = WM_KERNEL;
tmp.u.gen5.ve_id = 1;
@@ -3144,6 +3221,7 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
op->base.is_affine = TRUE;
op->base.floats_per_vertex = 3;
+ op->base.floats_per_rect = 9;
op->base.u.gen5.wm_kernel = WM_KERNEL;
op->base.u.gen5.ve_id = 1;
@@ -3229,6 +3307,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
tmp.is_affine = TRUE;
tmp.floats_per_vertex = 3;
+ tmp.floats_per_rect = 9;
tmp.has_component_alpha = 0;
tmp.need_magic_ca_pass = FALSE;
@@ -3269,7 +3348,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
static void
gen5_render_flush(struct sna *sna)
{
- gen5_vertex_finish(sna, TRUE);
+ gen5_vertex_close(sna);
}
static void
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index cd043c31..1ec6f065 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -910,37 +910,105 @@ static void gen6_vertex_flush(struct sna *sna)
gen6_magic_ca_pass(sna, sna->render.op);
}
-static void gen6_vertex_finish(struct sna *sna, Bool last)
+static int gen6_vertex_finish(struct sna *sna)
{
struct kgem_bo *bo;
- unsigned int i, delta;
+ unsigned int i;
gen6_vertex_flush(sna);
if (!sna->render.vertex_used)
- return;
+ return sna->render.vertex_size;
/* Note: we only need dword alignment (currently) */
- if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
- DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
- sna->render.vertex_used, sna->kgem.nbatch));
- memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ bo = sna->render.vbo;
+ if (bo) {
+ for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+ if (sna->render.vertex_reloc[i]) {
+ DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+ i, sna->render.vertex_reloc[i]));
+
+ sna->kgem.batch[sna->render.vertex_reloc[i]] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i],
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ 0);
+ sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i]+1,
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ 0 + sna->render.vertex_used * 4 - 1);
+ sna->render.vertex_reloc[i] = 0;
+ }
+ }
+
+ sna->render.vertex_used = 0;
+ sna->render.vertex_index = 0;
+ sna->render_state.gen6.vb_id = 0;
+
+ kgem_bo_destroy(&sna->kgem, bo);
+ }
+
+ sna->render.vertices = NULL;
+ sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+ if (sna->render.vbo)
+ sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertices == NULL) {
+ kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+ sna->render.vbo = NULL;
+ return 0;
+ }
+
+ kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertex_used) {
+ memcpy(sna->render.vertices,
sna->render.vertex_data,
- sna->render.vertex_used * 4);
- delta = sna->kgem.nbatch * 4;
- bo = NULL;
- sna->kgem.nbatch += sna->render.vertex_used;
- } else {
- bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
- if (bo && !kgem_bo_write(&sna->kgem, bo,
- sna->render.vertex_data,
- 4*sna->render.vertex_used)) {
- kgem_bo_destroy(&sna->kgem, bo);
- return;
+ sizeof(float)*sna->render.vertex_used);
+ }
+ sna->render.vertex_size = 64 * 1024 - 1;
+ return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen6_vertex_close(struct sna *sna)
+{
+ struct kgem_bo *bo;
+ unsigned int i, delta = 0;
+
+ gen6_vertex_flush(sna);
+ if (!sna->render.vertex_used) {
+ assert(sna->render.vbo == NULL);
+ assert(sna->render.vertices == sna->render.vertex_data);
+ assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+ return;
+ }
+
+ DBG(("%s: used=%d / %d\n", __FUNCTION__,
+ sna->render.vertex_used, sna->render.vertex_size));
+
+ bo = sna->render.vbo;
+ if (bo == NULL) {
+ if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+ DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+ sna->render.vertex_used, sna->kgem.nbatch));
+ memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ sna->render.vertex_data,
+ sna->render.vertex_used * 4);
+ delta = sna->kgem.nbatch * 4;
+ bo = NULL;
+ sna->kgem.nbatch += sna->render.vertex_used;
+ } else {
+ bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+ if (bo && !kgem_bo_write(&sna->kgem, bo,
+ sna->render.vertex_data,
+ 4*sna->render.vertex_used)) {
+ kgem_bo_destroy(&sna->kgem, bo);
+ goto reset;
+ }
+ DBG(("%s: new vbo: %d\n", __FUNCTION__,
+ sna->render.vertex_used));
}
- delta = 0;
- DBG(("%s: new vbo: %d\n", __FUNCTION__,
- sna->render.vertex_used));
}
for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -967,9 +1035,14 @@ static void gen6_vertex_finish(struct sna *sna, Bool last)
if (bo)
kgem_bo_destroy(&sna->kgem, bo);
+reset:
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
sna->render_state.gen6.vb_id = 0;
+
+ sna->render.vbo = NULL;
+ sna->render.vertices = sna->render.vertex_data;
+ sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
}
typedef struct gen6_surface_state_padded {
@@ -1134,7 +1207,7 @@ gen6_emit_composite_primitive_solid(struct sna *sna,
float f;
} dst;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -1165,7 +1238,7 @@ gen6_emit_composite_primitive_identity_source(struct sna *sna,
} dst;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -1194,7 +1267,7 @@ gen6_emit_composite_primitive_affine_source(struct sna *sna,
} dst;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -1247,7 +1320,7 @@ gen6_emit_composite_primitive_identity_source_mask(struct sna *sna,
w = r->width;
h = r->height;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
dst.p.x = r->dst.x + r->width;
@@ -1477,10 +1550,7 @@ static int gen6_get_rectangles__flush(struct sna *sna, bool ca)
if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
return 0;
- gen6_vertex_finish(sna, FALSE);
- sna->render.vertex_index = 0;
-
- return ARRAY_SIZE(sna->render.vertex_data);
+ return gen6_vertex_finish(sna);
}
inline static int gen6_get_rectangles(struct sna *sna,
@@ -1595,6 +1665,9 @@ gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
assert (sna->render_state.gen6.vertex_offset == 0);
if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) {
+ if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+ gen6_vertex_finish(sna);
+
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
sna->render_state.gen6.floats_per_vertex,
op->floats_per_vertex,
@@ -2621,7 +2694,7 @@ gen6_emit_composite_spans_identity(struct sna *sna,
int16_t tx = op->base.src.offset[0];
int16_t ty = op->base.src.offset[1];
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 3*5;
dst.p.x = box->x2;
@@ -2663,7 +2736,7 @@ gen6_emit_composite_spans_simple(struct sna *sna,
int16_t tx = op->base.src.offset[0];
int16_t ty = op->base.src.offset[1];
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 3*5;
dst.p.x = box->x2;
@@ -3061,7 +3134,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
}
n -= n_this_time;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9 * n_this_time;
do {
@@ -3746,7 +3819,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
static void gen6_render_flush(struct sna *sna)
{
- gen6_vertex_finish(sna, TRUE);
+ gen6_vertex_close(sna);
}
static void
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 7a5ee842..16c389c6 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1013,37 +1013,104 @@ static void gen7_vertex_flush(struct sna *sna)
gen7_magic_ca_pass(sna, sna->render.op);
}
-static void gen7_vertex_finish(struct sna *sna, Bool last)
+static int gen7_vertex_finish(struct sna *sna)
{
struct kgem_bo *bo;
- unsigned int i, delta;
+ unsigned int i;
gen7_vertex_flush(sna);
if (!sna->render.vertex_used)
- return;
+ return sna->render.vertex_size;
/* Note: we only need dword alignment (currently) */
- if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
- DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
- sna->render.vertex_used, sna->kgem.nbatch));
- memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ bo = sna->render.vbo;
+ if (bo) {
+ for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+ if (sna->render.vertex_reloc[i]) {
+ DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+ i, sna->render.vertex_reloc[i]));
+
+ sna->kgem.batch[sna->render.vertex_reloc[i]] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i],
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ 0);
+ sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+ kgem_add_reloc(&sna->kgem,
+ sna->render.vertex_reloc[i]+1,
+ bo,
+ I915_GEM_DOMAIN_VERTEX << 16,
+ sna->render.vertex_used * 4 - 1);
+ sna->render.vertex_reloc[i] = 0;
+ }
+ }
+
+ sna->render.vertex_used = 0;
+ sna->render.vertex_index = 0;
+ sna->render_state.gen7.vb_id = 0;
+
+ kgem_bo_destroy(&sna->kgem, bo);
+ }
+
+ sna->render.vertices = NULL;
+ sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+ if (sna->render.vbo)
+ sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertices == NULL) {
+ kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+ sna->render.vbo = NULL;
+ return 0;
+ }
+
+ kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo);
+ if (sna->render.vertex_used) {
+ memcpy(sna->render.vertices,
sna->render.vertex_data,
- sna->render.vertex_used * 4);
- delta = sna->kgem.nbatch * 4;
- bo = NULL;
- sna->kgem.nbatch += sna->render.vertex_used;
- } else {
- bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
- if (bo && !kgem_bo_write(&sna->kgem, bo,
- sna->render.vertex_data,
- 4*sna->render.vertex_used)) {
- kgem_bo_destroy(&sna->kgem, bo);
- return;
+ sizeof(float)*sna->render.vertex_used);
+ }
+ sna->render.vertex_size = 64 * 1024 - 1;
+ return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen7_vertex_close(struct sna *sna)
+{
+ struct kgem_bo *bo;
+ unsigned int i, delta = 0;
+
+ gen7_vertex_flush(sna);
+ if (!sna->render.vertex_used) {
+ assert(sna->render.vbo == NULL);
+ assert(sna->render.vertices == sna->render.vertex_data);
+ assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+ return;
+ }
+
+ DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+ bo = sna->render.vbo;
+ if (bo == NULL) {
+ if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+ DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+ sna->render.vertex_used, sna->kgem.nbatch));
+ memcpy(sna->kgem.batch + sna->kgem.nbatch,
+ sna->render.vertex_data,
+ sna->render.vertex_used * 4);
+ delta = sna->kgem.nbatch * 4;
+ bo = NULL;
+ sna->kgem.nbatch += sna->render.vertex_used;
+ } else {
+ bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+ if (bo && !kgem_bo_write(&sna->kgem, bo,
+ sna->render.vertex_data,
+ 4*sna->render.vertex_used)) {
+ kgem_bo_destroy(&sna->kgem, bo);
+ goto reset;
+ }
+ DBG(("%s: new vbo: %d\n", __FUNCTION__,
+ sna->render.vertex_used));
}
- delta = 0;
- DBG(("%s: new vbo: %d\n", __FUNCTION__,
- sna->render.vertex_used));
}
for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -1070,9 +1137,14 @@ static void gen7_vertex_finish(struct sna *sna, Bool last)
if (bo)
kgem_bo_destroy(&sna->kgem, bo);
+reset:
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
sna->render_state.gen7.vb_id = 0;
+
+ sna->render.vbo = NULL;
+ sna->render.vertices = sna->render.vertex_data;
+ sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
}
static void null_create(struct sna_static_stream *stream)
@@ -1231,7 +1303,7 @@ gen7_emit_composite_primitive_solid(struct sna *sna,
float f;
} dst;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -1262,7 +1334,7 @@ gen7_emit_composite_primitive_identity_source(struct sna *sna,
} dst;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -1291,7 +1363,7 @@ gen7_emit_composite_primitive_affine_source(struct sna *sna,
} dst;
float *v;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9;
dst.p.x = r->dst.x + r->width;
@@ -1344,7 +1416,7 @@ gen7_emit_composite_primitive_identity_source_mask(struct sna *sna,
w = r->width;
h = r->height;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 15;
dst.p.x = r->dst.x + r->width;
@@ -1573,10 +1645,7 @@ static int gen7_get_rectangles__flush(struct sna *sna, bool ca)
if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
return 0;
- gen7_vertex_finish(sna, FALSE);
- sna->render.vertex_index = 0;
-
- return ARRAY_SIZE(sna->render.vertex_data);
+ return gen7_vertex_finish(sna);
}
inline static int gen7_get_rectangles(struct sna *sna,
@@ -1690,6 +1759,9 @@ static void
gen7_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
if (op->floats_per_vertex != sna->render_state.gen7.floats_per_vertex) {
+ if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+ gen7_vertex_finish(sna);
+
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
sna->render_state.gen7.floats_per_vertex,
op->floats_per_vertex,
@@ -3022,7 +3094,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
}
n -= n_this_time;
- v = sna->render.vertex_data + sna->render.vertex_used;
+ v = sna->render.vertices + sna->render.vertex_used;
sna->render.vertex_used += 9 * n_this_time;
do {
@@ -3713,7 +3785,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
static void gen7_render_flush(struct sna *sna)
{
- gen7_vertex_finish(sna, TRUE);
+ gen7_vertex_close(sna);
}
static void
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index abad1ee5..979b2b0c 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -239,6 +239,9 @@ void no_render_init(struct sna *sna)
memset (render,0, sizeof (*render));
+ render->vertices = render->vertex_data;
+ render->vertex_size = ARRAY_SIZE(render->vertex_data);
+
render->composite = no_render_composite;
render->copy_boxes = no_render_copy_boxes;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 19dfdfba..abb19dce 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -271,9 +271,13 @@ struct sna_render {
uint16_t vertex_start;
uint16_t vertex_index;
uint16_t vertex_used;
+ uint16_t vertex_size;
uint16_t vertex_reloc[8];
- float vertex_data[16*1024];
+ struct kgem_bo *vbo;
+ float *vertices;
+
+ float vertex_data[1024];
const struct sna_composite_op *op;
};
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index ee55db7d..27f4909b 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -19,17 +19,17 @@ static inline bool need_redirect(struct sna *sna, PixmapPtr dst)
static inline int vertex_space(struct sna *sna)
{
- return ARRAY_SIZE(sna->render.vertex_data) - sna->render.vertex_used;
+ return sna->render.vertex_size - sna->render.vertex_used;
}
static inline void vertex_emit(struct sna *sna, float v)
{
- assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data));
- sna->render.vertex_data[sna->render.vertex_used++] = v;
+ assert(sna->render.vertex_used < sna->render.vertex_size);
+ sna->render.vertices[sna->render.vertex_used++] = v;
}
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
{
- int16_t *v = (int16_t *)&sna->render.vertex_data[sna->render.vertex_used++];
- assert(sna->render.vertex_used <= ARRAY_SIZE(sna->render.vertex_data));
+ int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++];
+ assert(sna->render.vertex_used <= sna->render.vertex_size);
v[0] = x;
v[1] = y;
}