summaryrefslogtreecommitdiff
path: root/src/gallium/auxiliary/cso_cache/cso_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/cso_cache/cso_context.c')
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_context.c861
1 files changed, 504 insertions, 357 deletions
diff --git a/src/gallium/auxiliary/cso_cache/cso_context.c b/src/gallium/auxiliary/cso_cache/cso_context.c
index 97b4da0afdb..e71e400c424 100644
--- a/src/gallium/auxiliary/cso_cache/cso_context.c
+++ b/src/gallium/auxiliary/cso_cache/cso_context.c
@@ -43,13 +43,13 @@
#include "util/u_math.h"
#include "util/u_memory.h"
#include "util/u_vbuf.h"
-#include "tgsi/tgsi_parse.h"
#include "cso_cache/cso_context.h"
#include "cso_cache/cso_cache.h"
#include "cso_cache/cso_hash.h"
#include "cso_context.h"
-
+#include "driver_trace/tr_dump.h"
+#include "util/u_threaded_context.h"
/**
* Per-shader sampler information.
@@ -62,17 +62,19 @@ struct sampler_info
-struct cso_context {
- struct pipe_context *pipe;
+struct cso_context_priv {
+ struct cso_context base;
struct u_vbuf *vbuf;
struct u_vbuf *vbuf_current;
bool always_use_vbuf;
+ bool sampler_format;
- boolean has_geometry_shader;
- boolean has_tessellation;
- boolean has_compute_shader;
- boolean has_streamout;
+ bool has_geometry_shader;
+ bool has_tessellation;
+ bool has_compute_shader;
+ bool has_task_mesh_shader;
+ bool has_streamout;
uint32_t max_fs_samplerviews : 16;
@@ -81,7 +83,7 @@ struct cso_context {
struct sampler_info fragment_samplers_saved;
struct sampler_info compute_samplers_saved;
- struct sampler_info samplers[PIPE_SHADER_TYPES];
+ struct sampler_info samplers[PIPE_SHADER_MESH_TYPES];
/* Temporary number until cso_single_sampler_done is called.
* It tracks the highest sampler seen in cso_single_sampler.
@@ -108,8 +110,8 @@ struct cso_context {
void *compute_shader, *compute_shader_saved;
void *velements, *velements_saved;
struct pipe_query *render_condition, *render_condition_saved;
- uint render_condition_mode, render_condition_mode_saved;
- boolean render_condition_cond, render_condition_cond_saved;
+ enum pipe_render_cond_flag render_condition_mode, render_condition_mode_saved;
+ bool render_condition_cond, render_condition_cond_saved;
bool flatshade_first, flatshade_first_saved;
struct pipe_framebuffer_state fb, fb_saved;
@@ -122,29 +124,30 @@ struct cso_context {
struct cso_cache cache;
};
-struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
-{
- return cso->pipe;
-}
-static inline boolean delete_cso(struct cso_context *ctx,
- void *state, enum cso_cache_type type)
+static inline bool
+delete_cso(struct cso_context_priv *ctx,
+ void *state, enum cso_cache_type type)
{
switch (type) {
case CSO_BLEND:
- if (ctx->blend == ((struct cso_blend*)state)->data)
+ if (ctx->blend == ((struct cso_blend*)state)->data ||
+ ctx->blend_saved == ((struct cso_blend*)state)->data)
return false;
break;
case CSO_DEPTH_STENCIL_ALPHA:
- if (ctx->depth_stencil == ((struct cso_depth_stencil_alpha*)state)->data)
+ if (ctx->depth_stencil == ((struct cso_depth_stencil_alpha*)state)->data ||
+ ctx->depth_stencil_saved == ((struct cso_depth_stencil_alpha*)state)->data)
return false;
break;
case CSO_RASTERIZER:
- if (ctx->rasterizer == ((struct cso_rasterizer*)state)->data)
+ if (ctx->rasterizer == ((struct cso_rasterizer*)state)->data ||
+ ctx->rasterizer_saved == ((struct cso_rasterizer*)state)->data)
return false;
break;
case CSO_VELEMENTS:
- if (ctx->velements == ((struct cso_velements*)state)->data)
+ if (ctx->velements == ((struct cso_velements*)state)->data ||
+ ctx->velements_saved == ((struct cso_velements*)state)->data)
return false;
break;
case CSO_SAMPLER:
@@ -154,21 +157,21 @@ static inline boolean delete_cso(struct cso_context *ctx,
assert(0);
}
- cso_delete_state(ctx->pipe, state, type);
+ cso_delete_state(ctx->base.pipe, state, type);
return true;
}
+
static inline void
sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
int max_size, void *user_data)
{
- struct cso_context *ctx = (struct cso_context *)user_data;
+ struct cso_context_priv *ctx = (struct cso_context_priv *)user_data;
/* if we're approach the maximum size, remove fourth of the entries
* otherwise every subsequent call will go through the same */
- int hash_size = cso_hash_size(hash);
- int max_entries = (max_size > hash_size) ? max_size : hash_size;
+ const int hash_size = cso_hash_size(hash);
+ const int max_entries = (max_size > hash_size) ? max_size : hash_size;
int to_remove = (max_size < max_entries) * max_entries/4;
- struct cso_hash_iter iter;
struct cso_sampler **samplers_to_restore = NULL;
unsigned to_restore = 0;
@@ -179,25 +182,35 @@ sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
return;
if (type == CSO_SAMPLER) {
- int i, j;
-
- samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
+ samplers_to_restore = MALLOC((PIPE_SHADER_MESH_TYPES + 2) * PIPE_MAX_SAMPLERS *
sizeof(*samplers_to_restore));
/* Temporarily remove currently bound sampler states from the hash
* table, to prevent them from being deleted
*/
- for (i = 0; i < PIPE_SHADER_TYPES; i++) {
- for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
+ for (int i = 0; i < PIPE_SHADER_MESH_TYPES; i++) {
+ for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
if (sampler && cso_hash_take(hash, sampler->hash_key))
samplers_to_restore[to_restore++] = sampler;
}
}
+ for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
+ struct cso_sampler *sampler = ctx->fragment_samplers_saved.cso_samplers[j];
+
+ if (sampler && cso_hash_take(hash, sampler->hash_key))
+ samplers_to_restore[to_restore++] = sampler;
+ }
+ for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
+ struct cso_sampler *sampler = ctx->compute_samplers_saved.cso_samplers[j];
+
+ if (sampler && cso_hash_take(hash, sampler->hash_key))
+ samplers_to_restore[to_restore++] = sampler;
+ }
}
- iter = cso_hash_first_node(hash);
+ struct cso_hash_iter iter = cso_hash_first_node(hash);
while (to_remove) {
/*remove elements until we're good */
/*fixme: currently we pick the nodes to remove at random*/
@@ -209,8 +222,9 @@ sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
if (delete_cso(ctx, cso, type)) {
iter = cso_hash_erase(hash, iter);
--to_remove;
- } else
+ } else {
iter = cso_hash_iter_next(iter);
+ }
}
if (type == CSO_SAMPLER) {
@@ -225,50 +239,84 @@ sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
}
}
-static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
+
+static void
+cso_init_vbuf(struct cso_context_priv *cso, unsigned flags)
{
struct u_vbuf_caps caps;
bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
bool needs64b = !(flags & CSO_NO_64B_VERTEX_BUFFERS);
- u_vbuf_get_caps(cso->pipe->screen, &caps, needs64b);
+ u_vbuf_get_caps(cso->base.pipe->screen, &caps, needs64b);
/* Enable u_vbuf if needed. */
if (caps.fallback_always ||
(uses_user_vertex_buffers &&
caps.fallback_only_for_user_vbuffers)) {
- cso->vbuf = u_vbuf_create(cso->pipe, &caps);
- cso->vbuf_current = cso->vbuf;
+ assert(!cso->base.pipe->vbuf);
+ cso->vbuf = u_vbuf_create(cso->base.pipe, &caps);
+ cso->base.pipe->vbuf = cso->vbuf;
cso->always_use_vbuf = caps.fallback_always;
+ cso->vbuf_current = cso->base.pipe->vbuf =
+ caps.fallback_always ? cso->vbuf : NULL;
}
}
+static void
+cso_draw_vbo_default(struct pipe_context *pipe,
+ const struct pipe_draw_info *info,
+ unsigned drawid_offset,
+ const struct pipe_draw_indirect_info *indirect,
+ const struct pipe_draw_start_count_bias *draws,
+ unsigned num_draws)
+{
+ if (pipe->vbuf)
+ u_vbuf_draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
+ else
+ pipe->draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
+}
+
struct cso_context *
cso_create_context(struct pipe_context *pipe, unsigned flags)
{
- struct cso_context *ctx = CALLOC_STRUCT(cso_context);
+ struct cso_context_priv *ctx = CALLOC_STRUCT(cso_context_priv);
if (!ctx)
return NULL;
cso_cache_init(&ctx->cache, pipe);
cso_cache_set_sanitize_callback(&ctx->cache, sanitize_hash, ctx);
- ctx->pipe = pipe;
+ ctx->base.pipe = pipe;
ctx->sample_mask = ~0;
if (!(flags & CSO_NO_VBUF))
cso_init_vbuf(ctx, flags);
+ /* Only drivers using u_threaded_context benefit from the direct call.
+ * This is because drivers can change draw_vbo, but u_threaded_context
+ * never changes it.
+ */
+ if (pipe->draw_vbo == tc_draw_vbo) {
+ if (ctx->vbuf_current)
+ ctx->base.draw_vbo = u_vbuf_draw_vbo;
+ else
+ ctx->base.draw_vbo = pipe->draw_vbo;
+ } else if (ctx->always_use_vbuf) {
+ ctx->base.draw_vbo = u_vbuf_draw_vbo;
+ } else {
+ ctx->base.draw_vbo = cso_draw_vbo_default;
+ }
+
/* Enable for testing: */
if (0) cso_set_maximum_cache_size(&ctx->cache, 4);
if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
- ctx->has_geometry_shader = TRUE;
+ ctx->has_geometry_shader = true;
}
if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
- ctx->has_tessellation = TRUE;
+ ctx->has_tessellation = true;
}
if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
@@ -277,36 +325,52 @@ cso_create_context(struct pipe_context *pipe, unsigned flags)
PIPE_SHADER_CAP_SUPPORTED_IRS);
if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
(1 << PIPE_SHADER_IR_NIR))) {
- ctx->has_compute_shader = TRUE;
+ ctx->has_compute_shader = true;
}
}
+ if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_MESH,
+ PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
+ ctx->has_task_mesh_shader = true;
+ }
if (pipe->screen->get_param(pipe->screen,
PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
- ctx->has_streamout = TRUE;
+ ctx->has_streamout = true;
}
- ctx->max_fs_samplerviews = pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_FRAGMENT,
- PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
+ if (pipe->screen->get_param(pipe->screen,
+ PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
+ PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO)
+ ctx->sampler_format = true;
+
+ ctx->max_fs_samplerviews =
+ pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_FRAGMENT,
+ PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
ctx->max_sampler_seen = -1;
- return ctx;
+ return &ctx->base;
}
-void cso_unbind_context(struct cso_context *ctx)
+
+void
+cso_unbind_context(struct cso_context *cso)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
unsigned i;
- if (ctx->pipe) {
- ctx->pipe->bind_blend_state( ctx->pipe, NULL );
- ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
+ bool dumping = trace_dumping_enabled_locked();
+ if (dumping)
+ trace_dumping_stop_locked();
+ if (ctx->base.pipe) {
+ ctx->base.pipe->bind_blend_state(ctx->base.pipe, NULL);
+ ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, NULL);
{
static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
static struct pipe_shader_buffer ssbos[PIPE_MAX_SHADER_BUFFERS] = { 0 };
static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
- struct pipe_screen *scr = ctx->pipe->screen;
+ struct pipe_screen *scr = ctx->base.pipe->screen;
enum pipe_shader_type sh;
- for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ for (sh = 0; sh < PIPE_SHADER_MESH_TYPES; sh++) {
switch (sh) {
case PIPE_SHADER_GEOMETRY:
if (!ctx->has_geometry_shader)
@@ -321,6 +385,11 @@ void cso_unbind_context(struct cso_context *ctx)
if (!ctx->has_compute_shader)
continue;
break;
+ case PIPE_SHADER_MESH:
+ case PIPE_SHADER_TASK:
+ if (!ctx->has_task_mesh_shader)
+ continue;
+ break;
default:
break;
}
@@ -341,44 +410,51 @@ void cso_unbind_context(struct cso_context *ctx)
assert(maxcb <= PIPE_MAX_CONSTANT_BUFFERS);
assert(maximg <= PIPE_MAX_SHADER_IMAGES);
if (maxsam > 0) {
- ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
+ ctx->base.pipe->bind_sampler_states(ctx->base.pipe, sh, 0, maxsam, zeros);
}
if (maxview > 0) {
- ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, 0, false, views);
+ ctx->base.pipe->set_sampler_views(ctx->base.pipe, sh, 0, maxview, 0, false, views);
}
if (maxssbo > 0) {
- ctx->pipe->set_shader_buffers(ctx->pipe, sh, 0, maxssbo, ssbos, 0);
+ ctx->base.pipe->set_shader_buffers(ctx->base.pipe, sh, 0, maxssbo, ssbos, 0);
}
if (maximg > 0) {
- ctx->pipe->set_shader_images(ctx->pipe, sh, 0, 0, maximg, NULL);
+ ctx->base.pipe->set_shader_images(ctx->base.pipe, sh, 0, 0, maximg, NULL);
}
for (int i = 0; i < maxcb; i++) {
- ctx->pipe->set_constant_buffer(ctx->pipe, sh, i, false, NULL);
+ ctx->base.pipe->set_constant_buffer(ctx->base.pipe, sh, i, false, NULL);
}
}
}
- ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
+ ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe, NULL);
struct pipe_stencil_ref sr = {0};
- ctx->pipe->set_stencil_ref(ctx->pipe, sr);
- ctx->pipe->bind_fs_state( ctx->pipe, NULL );
- ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
- ctx->pipe->bind_vs_state( ctx->pipe, NULL );
- ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
+ ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
+ ctx->base.pipe->bind_fs_state(ctx->base.pipe, NULL);
+ ctx->base.pipe->set_constant_buffer(ctx->base.pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
+ ctx->base.pipe->bind_vs_state(ctx->base.pipe, NULL);
+ ctx->base.pipe->set_constant_buffer(ctx->base.pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
if (ctx->has_geometry_shader) {
- ctx->pipe->bind_gs_state(ctx->pipe, NULL);
+ ctx->base.pipe->bind_gs_state(ctx->base.pipe, NULL);
}
if (ctx->has_tessellation) {
- ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
- ctx->pipe->bind_tes_state(ctx->pipe, NULL);
+ ctx->base.pipe->bind_tcs_state(ctx->base.pipe, NULL);
+ ctx->base.pipe->bind_tes_state(ctx->base.pipe, NULL);
}
if (ctx->has_compute_shader) {
- ctx->pipe->bind_compute_state(ctx->pipe, NULL);
+ ctx->base.pipe->bind_compute_state(ctx->base.pipe, NULL);
+ }
+ if (ctx->has_task_mesh_shader) {
+ ctx->base.pipe->bind_ts_state(ctx->base.pipe, NULL);
+ ctx->base.pipe->bind_ms_state(ctx->base.pipe, NULL);
}
- ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
+ ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, NULL);
if (ctx->has_streamout)
- ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
+ ctx->base.pipe->set_stream_output_targets(ctx->base.pipe, 0, NULL, NULL);
+
+ struct pipe_framebuffer_state fb = {0};
+ ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, &fb);
}
util_unreference_framebuffer_state(&ctx->fb);
@@ -390,28 +466,38 @@ void cso_unbind_context(struct cso_context *ctx)
}
memset(&ctx->samplers, 0, sizeof(ctx->samplers));
- memset(&ctx->nr_so_targets, 0, offsetof(struct cso_context, cache) - offsetof(struct cso_context, nr_so_targets));
+ memset(&ctx->nr_so_targets, 0,
+ offsetof(struct cso_context_priv, cache)
+ - offsetof(struct cso_context_priv, nr_so_targets));
ctx->sample_mask = ~0;
/*
* If the cso context is reused (with the same pipe context),
* need to really make sure the context state doesn't get out of sync.
*/
- ctx->pipe->set_sample_mask(ctx->pipe, ctx->sample_mask);
- if (ctx->pipe->set_min_samples)
- ctx->pipe->set_min_samples(ctx->pipe, ctx->min_samples);
+ ctx->base.pipe->set_sample_mask(ctx->base.pipe, ctx->sample_mask);
+ if (ctx->base.pipe->set_min_samples)
+ ctx->base.pipe->set_min_samples(ctx->base.pipe, ctx->min_samples);
+ if (dumping)
+ trace_dumping_start_locked();
}
+
/**
* Free the CSO context.
*/
-void cso_destroy_context( struct cso_context *ctx )
+void
+cso_destroy_context(struct cso_context *cso)
{
- cso_unbind_context(ctx);
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
+ cso_unbind_context(cso);
cso_cache_delete(&ctx->cache);
if (ctx->vbuf)
u_vbuf_destroy(ctx->vbuf);
- FREE( ctx );
+
+ ctx->base.pipe->vbuf = NULL;
+ FREE(ctx);
}
@@ -420,24 +506,38 @@ void cso_destroy_context( struct cso_context *ctx )
* template, insert it in the cache and return it.
*/
+#define CSO_BLEND_KEY_SIZE_RT0 offsetof(struct pipe_blend_state, rt[1])
+#define CSO_BLEND_KEY_SIZE_ALL_RT sizeof(struct pipe_blend_state)
+
/*
* If the driver returns 0 from the create method then they will assign
* the data member of the cso to be the template itself.
*/
-enum pipe_error cso_set_blend(struct cso_context *ctx,
- const struct pipe_blend_state *templ)
+enum pipe_error
+cso_set_blend(struct cso_context *cso,
+ const struct pipe_blend_state *templ)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
unsigned key_size, hash_key;
struct cso_hash_iter iter;
void *handle;
- key_size = templ->independent_blend_enable ?
- sizeof(struct pipe_blend_state) :
- (char *)&(templ->rt[1]) - (char *)templ;
- hash_key = cso_construct_key((void*)templ, key_size);
- iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
- (void*)templ, key_size);
+ if (templ->independent_blend_enable) {
+ /* This is duplicated with the else block below because we want key_size
+ * to be a literal constant, so that memcpy and the hash computation can
+ * be inlined and unrolled.
+ */
+ hash_key = cso_construct_key(templ, CSO_BLEND_KEY_SIZE_ALL_RT);
+ iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
+ templ, CSO_BLEND_KEY_SIZE_ALL_RT);
+ key_size = CSO_BLEND_KEY_SIZE_ALL_RT;
+ } else {
+ hash_key = cso_construct_key(templ, CSO_BLEND_KEY_SIZE_RT0);
+ iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
+ templ, CSO_BLEND_KEY_SIZE_RT0);
+ key_size = CSO_BLEND_KEY_SIZE_RT0;
+ }
if (cso_hash_iter_is_null(iter)) {
struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
@@ -446,7 +546,7 @@ enum pipe_error cso_set_blend(struct cso_context *ctx,
memset(&cso->state, 0, sizeof cso->state);
memcpy(&cso->state, templ, key_size);
- cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
+ cso->data = ctx->base.pipe->create_blend_state(ctx->base.pipe, &cso->state);
iter = cso_insert_state(&ctx->cache, hash_key, CSO_BLEND, cso);
if (cso_hash_iter_is_null(iter)) {
@@ -455,47 +555,48 @@ enum pipe_error cso_set_blend(struct cso_context *ctx,
}
handle = cso->data;
- }
- else {
+ } else {
handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
}
if (ctx->blend != handle) {
ctx->blend = handle;
- ctx->pipe->bind_blend_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_blend_state(ctx->base.pipe, handle);
}
return PIPE_OK;
}
+
static void
-cso_save_blend(struct cso_context *ctx)
+cso_save_blend(struct cso_context_priv *ctx)
{
assert(!ctx->blend_saved);
ctx->blend_saved = ctx->blend;
}
+
static void
-cso_restore_blend(struct cso_context *ctx)
+cso_restore_blend(struct cso_context_priv *ctx)
{
if (ctx->blend != ctx->blend_saved) {
ctx->blend = ctx->blend_saved;
- ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
+ ctx->base.pipe->bind_blend_state(ctx->base.pipe, ctx->blend_saved);
}
ctx->blend_saved = NULL;
}
-
enum pipe_error
-cso_set_depth_stencil_alpha(struct cso_context *ctx,
+cso_set_depth_stencil_alpha(struct cso_context *cso,
const struct pipe_depth_stencil_alpha_state *templ)
{
- unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
- unsigned hash_key = cso_construct_key((void*)templ, key_size);
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+ const unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
+ const unsigned hash_key = cso_construct_key(templ, key_size);
struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
hash_key,
CSO_DEPTH_STENCIL_ALPHA,
- (void*)templ, key_size);
+ templ, key_size);
void *handle;
if (cso_hash_iter_is_null(iter)) {
@@ -505,7 +606,7 @@ cso_set_depth_stencil_alpha(struct cso_context *ctx,
return PIPE_ERROR_OUT_OF_MEMORY;
memcpy(&cso->state, templ, sizeof(*templ));
- cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
+ cso->data = ctx->base.pipe->create_depth_stencil_alpha_state(ctx->base.pipe,
&cso->state);
iter = cso_insert_state(&ctx->cache, hash_key,
@@ -516,48 +617,50 @@ cso_set_depth_stencil_alpha(struct cso_context *ctx,
}
handle = cso->data;
- }
- else {
+ } else {
handle = ((struct cso_depth_stencil_alpha *)
cso_hash_iter_data(iter))->data;
}
if (ctx->depth_stencil != handle) {
ctx->depth_stencil = handle;
- ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe, handle);
}
return PIPE_OK;
}
+
static void
-cso_save_depth_stencil_alpha(struct cso_context *ctx)
+cso_save_depth_stencil_alpha(struct cso_context_priv *ctx)
{
assert(!ctx->depth_stencil_saved);
ctx->depth_stencil_saved = ctx->depth_stencil;
}
+
static void
-cso_restore_depth_stencil_alpha(struct cso_context *ctx)
+cso_restore_depth_stencil_alpha(struct cso_context_priv *ctx)
{
if (ctx->depth_stencil != ctx->depth_stencil_saved) {
ctx->depth_stencil = ctx->depth_stencil_saved;
- ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
+ ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe,
ctx->depth_stencil_saved);
}
ctx->depth_stencil_saved = NULL;
}
-
-enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
- const struct pipe_rasterizer_state *templ)
+enum pipe_error
+cso_set_rasterizer(struct cso_context *cso,
+ const struct pipe_rasterizer_state *templ)
{
- unsigned key_size = sizeof(struct pipe_rasterizer_state);
- unsigned hash_key = cso_construct_key((void*)templ, key_size);
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+ const unsigned key_size = sizeof(struct pipe_rasterizer_state);
+ const unsigned hash_key = cso_construct_key(templ, key_size);
struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
hash_key,
CSO_RASTERIZER,
- (void*)templ, key_size);
+ templ, key_size);
void *handle = NULL;
/* We can't have both point_quad_rasterization (sprites) and point_smooth
@@ -571,7 +674,7 @@ enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
return PIPE_ERROR_OUT_OF_MEMORY;
memcpy(&cso->state, templ, sizeof(*templ));
- cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
+ cso->data = ctx->base.pipe->create_rasterizer_state(ctx->base.pipe, &cso->state);
iter = cso_insert_state(&ctx->cache, hash_key, CSO_RASTERIZER, cso);
if (cso_hash_iter_is_null(iter)) {
@@ -580,8 +683,7 @@ enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
}
handle = cso->data;
- }
- else {
+ } else {
handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
}
@@ -590,127 +692,148 @@ enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
ctx->flatshade_first = templ->flatshade_first;
if (ctx->vbuf)
u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
- ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, handle);
}
return PIPE_OK;
}
+
static void
-cso_save_rasterizer(struct cso_context *ctx)
+cso_save_rasterizer(struct cso_context_priv *ctx)
{
assert(!ctx->rasterizer_saved);
ctx->rasterizer_saved = ctx->rasterizer;
ctx->flatshade_first_saved = ctx->flatshade_first;
}
+
static void
-cso_restore_rasterizer(struct cso_context *ctx)
+cso_restore_rasterizer(struct cso_context_priv *ctx)
{
if (ctx->rasterizer != ctx->rasterizer_saved) {
ctx->rasterizer = ctx->rasterizer_saved;
ctx->flatshade_first = ctx->flatshade_first_saved;
if (ctx->vbuf)
u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
- ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
+ ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, ctx->rasterizer_saved);
}
ctx->rasterizer_saved = NULL;
}
-void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
+void
+cso_set_fragment_shader_handle(struct cso_context *cso, void *handle)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
if (ctx->fragment_shader != handle) {
ctx->fragment_shader = handle;
- ctx->pipe->bind_fs_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_fs_state(ctx->base.pipe, handle);
}
}
+
static void
-cso_save_fragment_shader(struct cso_context *ctx)
+cso_save_fragment_shader(struct cso_context_priv *ctx)
{
assert(!ctx->fragment_shader_saved);
ctx->fragment_shader_saved = ctx->fragment_shader;
}
+
static void
-cso_restore_fragment_shader(struct cso_context *ctx)
+cso_restore_fragment_shader(struct cso_context_priv *ctx)
{
if (ctx->fragment_shader_saved != ctx->fragment_shader) {
- ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
+ ctx->base.pipe->bind_fs_state(ctx->base.pipe, ctx->fragment_shader_saved);
ctx->fragment_shader = ctx->fragment_shader_saved;
}
ctx->fragment_shader_saved = NULL;
}
-void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
+void
+cso_set_vertex_shader_handle(struct cso_context *cso, void *handle)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
if (ctx->vertex_shader != handle) {
ctx->vertex_shader = handle;
- ctx->pipe->bind_vs_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_vs_state(ctx->base.pipe, handle);
}
}
+
static void
-cso_save_vertex_shader(struct cso_context *ctx)
+cso_save_vertex_shader(struct cso_context_priv *ctx)
{
assert(!ctx->vertex_shader_saved);
ctx->vertex_shader_saved = ctx->vertex_shader;
}
+
static void
-cso_restore_vertex_shader(struct cso_context *ctx)
+cso_restore_vertex_shader(struct cso_context_priv *ctx)
{
if (ctx->vertex_shader_saved != ctx->vertex_shader) {
- ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
+ ctx->base.pipe->bind_vs_state(ctx->base.pipe, ctx->vertex_shader_saved);
ctx->vertex_shader = ctx->vertex_shader_saved;
}
ctx->vertex_shader_saved = NULL;
}
-void cso_set_framebuffer(struct cso_context *ctx,
- const struct pipe_framebuffer_state *fb)
+void
+cso_set_framebuffer(struct cso_context *cso,
+ const struct pipe_framebuffer_state *fb)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
util_copy_framebuffer_state(&ctx->fb, fb);
- ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
+ ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, fb);
}
}
+
static void
-cso_save_framebuffer(struct cso_context *ctx)
+cso_save_framebuffer(struct cso_context_priv *ctx)
{
util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
}
+
static void
-cso_restore_framebuffer(struct cso_context *ctx)
+cso_restore_framebuffer(struct cso_context_priv *ctx)
{
if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
- ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
+ ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, &ctx->fb);
util_unreference_framebuffer_state(&ctx->fb_saved);
}
}
-void cso_set_viewport(struct cso_context *ctx,
- const struct pipe_viewport_state *vp)
+void
+cso_set_viewport(struct cso_context *cso,
+ const struct pipe_viewport_state *vp)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
ctx->vp = *vp;
- ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
+ ctx->base.pipe->set_viewport_states(ctx->base.pipe, 0, 1, vp);
}
}
+
/**
* Setup viewport state for given width and height (position is always (0,0)).
* Invert the Y axis if 'invert' is true.
*/
void
cso_set_viewport_dims(struct cso_context *ctx,
- float width, float height, boolean invert)
+ float width, float height, bool invert)
{
struct pipe_viewport_state vp;
vp.scale[0] = width * 0.5f;
@@ -726,94 +849,115 @@ cso_set_viewport_dims(struct cso_context *ctx,
cso_set_viewport(ctx, &vp);
}
+
static void
-cso_save_viewport(struct cso_context *ctx)
+cso_save_viewport(struct cso_context_priv *ctx)
{
ctx->vp_saved = ctx->vp;
}
static void
-cso_restore_viewport(struct cso_context *ctx)
+cso_restore_viewport(struct cso_context_priv *ctx)
{
if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
ctx->vp = ctx->vp_saved;
- ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
+ ctx->base.pipe->set_viewport_states(ctx->base.pipe, 0, 1, &ctx->vp);
}
}
-void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
+
+void
+cso_set_sample_mask(struct cso_context *cso, unsigned sample_mask)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
if (ctx->sample_mask != sample_mask) {
ctx->sample_mask = sample_mask;
- ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
+ ctx->base.pipe->set_sample_mask(ctx->base.pipe, sample_mask);
}
}
+
static void
-cso_save_sample_mask(struct cso_context *ctx)
+cso_save_sample_mask(struct cso_context_priv *ctx)
{
ctx->sample_mask_saved = ctx->sample_mask;
}
+
static void
-cso_restore_sample_mask(struct cso_context *ctx)
+cso_restore_sample_mask(struct cso_context_priv *ctx)
{
- cso_set_sample_mask(ctx, ctx->sample_mask_saved);
+ cso_set_sample_mask(&ctx->base, ctx->sample_mask_saved);
}
-void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
+
+void
+cso_set_min_samples(struct cso_context *cso, unsigned min_samples)
{
- if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
+ if (ctx->min_samples != min_samples && ctx->base.pipe->set_min_samples) {
ctx->min_samples = min_samples;
- ctx->pipe->set_min_samples(ctx->pipe, min_samples);
+ ctx->base.pipe->set_min_samples(ctx->base.pipe, min_samples);
}
}
+
static void
-cso_save_min_samples(struct cso_context *ctx)
+cso_save_min_samples(struct cso_context_priv *ctx)
{
ctx->min_samples_saved = ctx->min_samples;
}
+
static void
-cso_restore_min_samples(struct cso_context *ctx)
+cso_restore_min_samples(struct cso_context_priv *ctx)
{
- cso_set_min_samples(ctx, ctx->min_samples_saved);
+ cso_set_min_samples(&ctx->base, ctx->min_samples_saved);
}
-void cso_set_stencil_ref(struct cso_context *ctx,
- const struct pipe_stencil_ref sr)
+
+void
+cso_set_stencil_ref(struct cso_context *cso,
+ const struct pipe_stencil_ref sr)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
if (memcmp(&ctx->stencil_ref, &sr, sizeof(ctx->stencil_ref))) {
ctx->stencil_ref = sr;
- ctx->pipe->set_stencil_ref(ctx->pipe, sr);
+ ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
}
}
+
static void
-cso_save_stencil_ref(struct cso_context *ctx)
+cso_save_stencil_ref(struct cso_context_priv *ctx)
{
ctx->stencil_ref_saved = ctx->stencil_ref;
}
static void
-cso_restore_stencil_ref(struct cso_context *ctx)
+cso_restore_stencil_ref(struct cso_context_priv *ctx)
{
if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
sizeof(ctx->stencil_ref))) {
ctx->stencil_ref = ctx->stencil_ref_saved;
- ctx->pipe->set_stencil_ref(ctx->pipe, ctx->stencil_ref);
+ ctx->base.pipe->set_stencil_ref(ctx->base.pipe, ctx->stencil_ref);
}
}
-void cso_set_render_condition(struct cso_context *ctx,
- struct pipe_query *query,
- boolean condition,
- enum pipe_render_cond_flag mode)
+
+void
+cso_set_render_condition(struct cso_context *cso,
+ struct pipe_query *query,
+ bool condition,
+ enum pipe_render_cond_flag mode)
{
- struct pipe_context *pipe = ctx->pipe;
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+ struct pipe_context *pipe = ctx->base.pipe;
if (ctx->render_condition != query ||
ctx->render_condition_mode != mode ||
@@ -825,34 +969,40 @@ void cso_set_render_condition(struct cso_context *ctx,
}
}
+
static void
-cso_save_render_condition(struct cso_context *ctx)
+cso_save_render_condition(struct cso_context_priv *ctx)
{
ctx->render_condition_saved = ctx->render_condition;
ctx->render_condition_cond_saved = ctx->render_condition_cond;
ctx->render_condition_mode_saved = ctx->render_condition_mode;
}
+
static void
-cso_restore_render_condition(struct cso_context *ctx)
+cso_restore_render_condition(struct cso_context_priv *ctx)
{
- cso_set_render_condition(ctx, ctx->render_condition_saved,
+ cso_set_render_condition(&ctx->base, ctx->render_condition_saved,
ctx->render_condition_cond_saved,
ctx->render_condition_mode_saved);
}
-void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
+
+void
+cso_set_geometry_shader_handle(struct cso_context *cso, void *handle)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
assert(ctx->has_geometry_shader || !handle);
if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
ctx->geometry_shader = handle;
- ctx->pipe->bind_gs_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_gs_state(ctx->base.pipe, handle);
}
}
+
static void
-cso_save_geometry_shader(struct cso_context *ctx)
+cso_save_geometry_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_geometry_shader) {
return;
@@ -862,32 +1012,37 @@ cso_save_geometry_shader(struct cso_context *ctx)
ctx->geometry_shader_saved = ctx->geometry_shader;
}
+
static void
-cso_restore_geometry_shader(struct cso_context *ctx)
+cso_restore_geometry_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_geometry_shader) {
return;
}
if (ctx->geometry_shader_saved != ctx->geometry_shader) {
- ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
+ ctx->base.pipe->bind_gs_state(ctx->base.pipe, ctx->geometry_shader_saved);
ctx->geometry_shader = ctx->geometry_shader_saved;
}
ctx->geometry_shader_saved = NULL;
}
-void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
+
+void
+cso_set_tessctrl_shader_handle(struct cso_context *cso, void *handle)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
assert(ctx->has_tessellation || !handle);
if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
ctx->tessctrl_shader = handle;
- ctx->pipe->bind_tcs_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_tcs_state(ctx->base.pipe, handle);
}
}
+
static void
-cso_save_tessctrl_shader(struct cso_context *ctx)
+cso_save_tessctrl_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_tessellation) {
return;
@@ -897,32 +1052,38 @@ cso_save_tessctrl_shader(struct cso_context *ctx)
ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
}
+
static void
-cso_restore_tessctrl_shader(struct cso_context *ctx)
+cso_restore_tessctrl_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_tessellation) {
return;
}
if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
- ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
+ ctx->base.pipe->bind_tcs_state(ctx->base.pipe, ctx->tessctrl_shader_saved);
ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
}
ctx->tessctrl_shader_saved = NULL;
}
-void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
+
+void
+cso_set_tesseval_shader_handle(struct cso_context *cso, void *handle)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
assert(ctx->has_tessellation || !handle);
if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
ctx->tesseval_shader = handle;
- ctx->pipe->bind_tes_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_tes_state(ctx->base.pipe, handle);
}
}
+
static void
-cso_save_tesseval_shader(struct cso_context *ctx)
+cso_save_tesseval_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_tessellation) {
return;
@@ -932,32 +1093,37 @@ cso_save_tesseval_shader(struct cso_context *ctx)
ctx->tesseval_shader_saved = ctx->tesseval_shader;
}
+
static void
-cso_restore_tesseval_shader(struct cso_context *ctx)
+cso_restore_tesseval_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_tessellation) {
return;
}
if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
- ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
+ ctx->base.pipe->bind_tes_state(ctx->base.pipe, ctx->tesseval_shader_saved);
ctx->tesseval_shader = ctx->tesseval_shader_saved;
}
ctx->tesseval_shader_saved = NULL;
}
-void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
+
+void
+cso_set_compute_shader_handle(struct cso_context *cso, void *handle)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
assert(ctx->has_compute_shader || !handle);
if (ctx->has_compute_shader && ctx->compute_shader != handle) {
ctx->compute_shader = handle;
- ctx->pipe->bind_compute_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_compute_state(ctx->base.pipe, handle);
}
}
+
static void
-cso_save_compute_shader(struct cso_context *ctx)
+cso_save_compute_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_compute_shader) {
return;
@@ -967,15 +1133,16 @@ cso_save_compute_shader(struct cso_context *ctx)
ctx->compute_shader_saved = ctx->compute_shader;
}
+
static void
-cso_restore_compute_shader(struct cso_context *ctx)
+cso_restore_compute_shader(struct cso_context_priv *ctx)
{
if (!ctx->has_compute_shader) {
return;
}
if (ctx->compute_shader_saved != ctx->compute_shader) {
- ctx->pipe->bind_compute_state(ctx->pipe, ctx->compute_shader_saved);
+ ctx->base.pipe->bind_compute_state(ctx->base.pipe, ctx->compute_shader_saved);
ctx->compute_shader = ctx->compute_shader_saved;
}
ctx->compute_shader_saved = NULL;
@@ -983,7 +1150,7 @@ cso_restore_compute_shader(struct cso_context *ctx)
static void
-cso_save_compute_samplers(struct cso_context *ctx)
+cso_save_compute_samplers(struct cso_context_priv *ctx)
{
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
struct sampler_info *saved = &ctx->compute_samplers_saved;
@@ -995,7 +1162,7 @@ cso_save_compute_samplers(struct cso_context *ctx)
static void
-cso_restore_compute_samplers(struct cso_context *ctx)
+cso_restore_compute_samplers(struct cso_context_priv *ctx)
{
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
struct sampler_info *saved = &ctx->compute_samplers_saved;
@@ -1011,28 +1178,26 @@ cso_restore_compute_samplers(struct cso_context *ctx)
}
}
- cso_single_sampler_done(ctx, PIPE_SHADER_COMPUTE);
+ cso_single_sampler_done(&ctx->base, PIPE_SHADER_COMPUTE);
}
static void
-cso_set_vertex_elements_direct(struct cso_context *ctx,
+cso_set_vertex_elements_direct(struct cso_context_priv *ctx,
const struct cso_velems_state *velems)
{
- unsigned key_size, hash_key;
- struct cso_hash_iter iter;
- void *handle;
-
/* Need to include the count into the stored state data too.
* Otherwise first few count pipe_vertex_elements could be identical
* even if count is different, and there's no guarantee the hash would
* be different in that case neither.
*/
- key_size = sizeof(struct pipe_vertex_element) * velems->count +
- sizeof(unsigned);
- hash_key = cso_construct_key((void*)velems, key_size);
- iter = cso_find_state_template(&ctx->cache, hash_key, CSO_VELEMENTS,
- (void*)velems, key_size);
+ const unsigned key_size =
+ sizeof(struct pipe_vertex_element) * velems->count + sizeof(unsigned);
+ const unsigned hash_key = cso_construct_key((void*)velems, key_size);
+ struct cso_hash_iter iter =
+ cso_find_state_template(&ctx->cache, hash_key, CSO_VELEMENTS,
+ velems, key_size);
+ void *handle;
if (cso_hash_iter_is_null(iter)) {
struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
@@ -1047,7 +1212,7 @@ cso_set_vertex_elements_direct(struct cso_context *ctx,
struct pipe_vertex_element tmp[PIPE_MAX_ATTRIBS];
util_lower_uint64_vertex_elements(&new_elems, &new_count, tmp);
- cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, new_count,
+ cso->data = ctx->base.pipe->create_vertex_elements_state(ctx->base.pipe, new_count,
new_elems);
iter = cso_insert_state(&ctx->cache, hash_key, CSO_VELEMENTS, cso);
@@ -1057,21 +1222,22 @@ cso_set_vertex_elements_direct(struct cso_context *ctx,
}
handle = cso->data;
- }
- else {
+ } else {
handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
}
if (ctx->velements != handle) {
ctx->velements = handle;
- ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
+ ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, handle);
}
}
+
enum pipe_error
-cso_set_vertex_elements(struct cso_context *ctx,
+cso_set_vertex_elements(struct cso_context *cso,
const struct cso_velems_state *velems)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
struct u_vbuf *vbuf = ctx->vbuf_current;
if (vbuf) {
@@ -1083,8 +1249,9 @@ cso_set_vertex_elements(struct cso_context *ctx,
return PIPE_OK;
}
+
static void
-cso_save_vertex_elements(struct cso_context *ctx)
+cso_save_vertex_elements(struct cso_context_priv *ctx)
{
struct u_vbuf *vbuf = ctx->vbuf_current;
@@ -1097,8 +1264,9 @@ cso_save_vertex_elements(struct cso_context *ctx)
ctx->velements_saved = ctx->velements;
}
+
static void
-cso_restore_vertex_elements(struct cso_context *ctx)
+cso_restore_vertex_elements(struct cso_context_priv *ctx)
{
struct u_vbuf *vbuf = ctx->vbuf_current;
@@ -1109,31 +1277,31 @@ cso_restore_vertex_elements(struct cso_context *ctx)
if (ctx->velements != ctx->velements_saved) {
ctx->velements = ctx->velements_saved;
- ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
+ ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, ctx->velements_saved);
}
ctx->velements_saved = NULL;
}
/* vertex buffers */
-void cso_set_vertex_buffers(struct cso_context *ctx,
- unsigned start_slot, unsigned count,
- const struct pipe_vertex_buffer *buffers)
+void
+cso_set_vertex_buffers(struct cso_context *cso,
+ unsigned count,
+ bool take_ownership,
+ const struct pipe_vertex_buffer *buffers)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
struct u_vbuf *vbuf = ctx->vbuf_current;
- if (!count)
- return;
-
if (vbuf) {
- u_vbuf_set_vertex_buffers(vbuf, start_slot, count, 0, false, buffers);
+ u_vbuf_set_vertex_buffers(vbuf, count, take_ownership, buffers);
return;
}
- struct pipe_context *pipe = ctx->pipe;
- pipe->set_vertex_buffers(pipe, start_slot, count, 0, false, buffers);
+ util_set_vertex_buffers(ctx->base.pipe, count, take_ownership, buffers);
}
+
/**
* Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
* for user vertex buffers and user vertex buffers are not set by this call.
@@ -1147,69 +1315,54 @@ void cso_set_vertex_buffers(struct cso_context *ctx,
* u_vbuf, not both.
*/
void
-cso_set_vertex_buffers_and_elements(struct cso_context *ctx,
+cso_set_vertex_buffers_and_elements(struct cso_context *cso,
const struct cso_velems_state *velems,
unsigned vb_count,
- unsigned unbind_trailing_vb_count,
- bool take_ownership,
bool uses_user_vertex_buffers,
const struct pipe_vertex_buffer *vbuffers)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
struct u_vbuf *vbuf = ctx->vbuf;
- struct pipe_context *pipe = ctx->pipe;
+ struct pipe_context *pipe = ctx->base.pipe;
if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
if (!ctx->vbuf_current) {
- /* Unbind all buffers in cso_context, because we'll use u_vbuf. */
- unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
- if (unbind_vb_count)
- pipe->set_vertex_buffers(pipe, 0, 0, unbind_vb_count, false, NULL);
-
/* Unset this to make sure the CSO is re-bound on the next use. */
ctx->velements = NULL;
- ctx->vbuf_current = vbuf;
- unbind_trailing_vb_count = 0;
+ ctx->vbuf_current = pipe->vbuf = vbuf;
+ if (pipe->draw_vbo == tc_draw_vbo)
+ ctx->base.draw_vbo = u_vbuf_draw_vbo;
}
- if (vb_count || unbind_trailing_vb_count) {
- u_vbuf_set_vertex_buffers(vbuf, 0, vb_count,
- unbind_trailing_vb_count,
- take_ownership, vbuffers);
- }
u_vbuf_set_vertex_elements(vbuf, velems);
+ u_vbuf_set_vertex_buffers(vbuf, vb_count, true, vbuffers);
return;
}
if (ctx->vbuf_current) {
- /* Unbind all buffers in u_vbuf, because we'll use cso_context. */
- unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
- if (unbind_vb_count)
- u_vbuf_set_vertex_buffers(vbuf, 0, 0, unbind_vb_count, false, NULL);
-
/* Unset this to make sure the CSO is re-bound on the next use. */
u_vbuf_unset_vertex_elements(vbuf);
- ctx->vbuf_current = NULL;
- unbind_trailing_vb_count = 0;
+ ctx->vbuf_current = pipe->vbuf = NULL;
+ if (pipe->draw_vbo == tc_draw_vbo)
+ ctx->base.draw_vbo = pipe->draw_vbo;
}
- if (vb_count || unbind_trailing_vb_count) {
- pipe->set_vertex_buffers(pipe, 0, vb_count, unbind_trailing_vb_count,
- take_ownership, vbuffers);
- }
cso_set_vertex_elements_direct(ctx, velems);
+ pipe->set_vertex_buffers(pipe, vb_count, vbuffers);
}
-static bool
-cso_set_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
- unsigned idx, const struct pipe_sampler_state *templ)
+
+ALWAYS_INLINE static struct cso_sampler *
+set_sampler(struct cso_context_priv *ctx, enum pipe_shader_type shader_stage,
+ unsigned idx, const struct pipe_sampler_state *templ,
+ size_t key_size)
{
- unsigned key_size = sizeof(struct pipe_sampler_state);
- unsigned hash_key = cso_construct_key((void*)templ, key_size);
+ unsigned hash_key = cso_construct_key(templ, key_size);
struct cso_sampler *cso;
struct cso_hash_iter iter =
cso_find_state_template(&ctx->cache,
hash_key, CSO_SAMPLER,
- (void *) templ, key_size);
+ templ, key_size);
if (cso_hash_iter_is_null(iter)) {
cso = MALLOC(sizeof(struct cso_sampler));
@@ -1217,7 +1370,7 @@ cso_set_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
return false;
memcpy(&cso->state, templ, sizeof(*templ));
- cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
+ cso->data = ctx->base.pipe->create_sampler_state(ctx->base.pipe, &cso->state);
cso->hash_key = hash_key;
iter = cso_insert_state(&ctx->cache, hash_key, CSO_SAMPLER, cso);
@@ -1228,52 +1381,72 @@ cso_set_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
} else {
cso = cso_hash_iter_data(iter);
}
+ return cso;
+}
+
+ALWAYS_INLINE static bool
+cso_set_sampler(struct cso_context_priv *ctx, enum pipe_shader_type shader_stage,
+ unsigned idx, const struct pipe_sampler_state *templ,
+ size_t size)
+{
+ struct cso_sampler *cso = set_sampler(ctx, shader_stage, idx, templ, size);
ctx->samplers[shader_stage].cso_samplers[idx] = cso;
ctx->samplers[shader_stage].samplers[idx] = cso->data;
return true;
}
+
void
-cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
+cso_single_sampler(struct cso_context *cso, enum pipe_shader_type shader_stage,
unsigned idx, const struct pipe_sampler_state *templ)
{
- if (cso_set_sampler(ctx, shader_stage, idx, templ))
- ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+
+ /* The reasons both blocks are duplicated is that we want the size parameter
+ * to be a constant expression to inline and unroll memcmp and hash key
+ * computations.
+ */
+ if (ctx->sampler_format) {
+ if (cso_set_sampler(ctx, shader_stage, idx, templ,
+ sizeof(struct pipe_sampler_state)))
+ ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
+ } else {
+ if (cso_set_sampler(ctx, shader_stage, idx, templ,
+ offsetof(struct pipe_sampler_state, border_color_format)))
+ ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
+ }
}
+
/**
* Send staged sampler state to the driver.
*/
void
-cso_single_sampler_done(struct cso_context *ctx,
+cso_single_sampler_done(struct cso_context *cso,
enum pipe_shader_type shader_stage)
{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
struct sampler_info *info = &ctx->samplers[shader_stage];
if (ctx->max_sampler_seen == -1)
return;
- ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
+ ctx->base.pipe->bind_sampler_states(ctx->base.pipe, shader_stage, 0,
ctx->max_sampler_seen + 1,
info->samplers);
ctx->max_sampler_seen = -1;
}
-/*
- * If the function encouters any errors it will return the
- * last one. Done to always try to set as many samplers
- * as possible.
- */
-void
-cso_set_samplers(struct cso_context *ctx,
- enum pipe_shader_type shader_stage,
- unsigned nr,
- const struct pipe_sampler_state **templates)
+ALWAYS_INLINE static int
+set_samplers(struct cso_context_priv *ctx,
+ enum pipe_shader_type shader_stage,
+ unsigned nr,
+ const struct pipe_sampler_state **templates,
+ size_t key_size)
{
int last = -1;
-
for (unsigned i = 0; i < nr; i++) {
if (!templates[i])
continue;
@@ -1293,25 +1466,52 @@ cso_set_samplers(struct cso_context *ctx,
*/
if (last >= 0 &&
!memcmp(templates[i], templates[last],
- sizeof(struct pipe_sampler_state))) {
+ key_size)) {
ctx->samplers[shader_stage].cso_samplers[i] =
ctx->samplers[shader_stage].cso_samplers[last];
ctx->samplers[shader_stage].samplers[i] =
ctx->samplers[shader_stage].samplers[last];
} else {
/* Look up the sampler state CSO. */
- cso_set_sampler(ctx, shader_stage, i, templates[i]);
+ cso_set_sampler(ctx, shader_stage, i, templates[i], key_size);
}
last = i;
}
+ return last;
+}
+
+
+/*
+ * If the function encouters any errors it will return the
+ * last one. Done to always try to set as many samplers
+ * as possible.
+ */
+void
+cso_set_samplers(struct cso_context *cso,
+ enum pipe_shader_type shader_stage,
+ unsigned nr,
+ const struct pipe_sampler_state **templates)
+{
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+ int last;
+
+ /* ensure sampler size is a constant for memcmp */
+ if (ctx->sampler_format) {
+ last = set_samplers(ctx, shader_stage, nr, templates,
+ sizeof(struct pipe_sampler_state));
+ } else {
+ last = set_samplers(ctx, shader_stage, nr, templates,
+ offsetof(struct pipe_sampler_state, border_color_format));
+ }
ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, last);
- cso_single_sampler_done(ctx, shader_stage);
+ cso_single_sampler_done(&ctx->base, shader_stage);
}
+
static void
-cso_save_fragment_samplers(struct cso_context *ctx)
+cso_save_fragment_samplers(struct cso_context_priv *ctx)
{
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
struct sampler_info *saved = &ctx->fragment_samplers_saved;
@@ -1323,7 +1523,7 @@ cso_save_fragment_samplers(struct cso_context *ctx)
static void
-cso_restore_fragment_samplers(struct cso_context *ctx)
+cso_restore_fragment_samplers(struct cso_context_priv *ctx)
{
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
struct sampler_info *saved = &ctx->fragment_samplers_saved;
@@ -1339,18 +1539,19 @@ cso_restore_fragment_samplers(struct cso_context *ctx)
}
}
- cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
+ cso_single_sampler_done(&ctx->base, PIPE_SHADER_FRAGMENT);
}
void
-cso_set_stream_outputs(struct cso_context *ctx,
+cso_set_stream_outputs(struct cso_context *cso,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offsets)
{
- struct pipe_context *pipe = ctx->pipe;
- uint i;
+ struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
+ struct pipe_context *pipe = ctx->base.pipe;
+ unsigned i;
if (!ctx->has_streamout) {
assert(num_targets == 0);
@@ -1376,28 +1577,28 @@ cso_set_stream_outputs(struct cso_context *ctx,
ctx->nr_so_targets = num_targets;
}
+
static void
-cso_save_stream_outputs(struct cso_context *ctx)
+cso_save_stream_outputs(struct cso_context_priv *ctx)
{
- uint i;
-
if (!ctx->has_streamout) {
return;
}
ctx->nr_so_targets_saved = ctx->nr_so_targets;
- for (i = 0; i < ctx->nr_so_targets; i++) {
+ for (unsigned i = 0; i < ctx->nr_so_targets; i++) {
assert(!ctx->so_targets_saved[i]);
pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
}
}
+
static void
-cso_restore_stream_outputs(struct cso_context *ctx)
+cso_restore_stream_outputs(struct cso_context_priv *ctx)
{
- struct pipe_context *pipe = ctx->pipe;
- uint i;
+ struct pipe_context *pipe = ctx->base.pipe;
+ unsigned i;
unsigned offset[PIPE_MAX_SO_BUFFERS];
if (!ctx->has_streamout) {
@@ -1435,8 +1636,9 @@ cso_restore_stream_outputs(struct cso_context *ctx)
* of CSO_BIT_x flags.
*/
void
-cso_save_state(struct cso_context *cso, unsigned state_mask)
+cso_save_state(struct cso_context *ctx, unsigned state_mask)
{
+ struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
assert(cso->saved_state == 0);
cso->saved_state = state_mask;
@@ -1476,7 +1678,7 @@ cso_save_state(struct cso_context *cso, unsigned state_mask)
if (state_mask & CSO_BIT_VIEWPORT)
cso_save_viewport(cso);
if (state_mask & CSO_BIT_PAUSE_QUERIES)
- cso->pipe->set_active_query_state(cso->pipe, false);
+ cso->base.pipe->set_active_query_state(cso->base.pipe, false);
}
@@ -1484,8 +1686,9 @@ cso_save_state(struct cso_context *cso, unsigned state_mask)
* Restore the state which was saved by cso_save_state().
*/
void
-cso_restore_state(struct cso_context *cso, unsigned unbind)
+cso_restore_state(struct cso_context *ctx, unsigned unbind)
{
+ struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
unsigned state_mask = cso->saved_state;
assert(state_mask);
@@ -1505,15 +1708,15 @@ cso_restore_state(struct cso_context *cso, unsigned unbind)
if (state_mask & CSO_BIT_VERTEX_SHADER)
cso_restore_vertex_shader(cso);
if (unbind & CSO_UNBIND_FS_SAMPLERVIEWS)
- cso->pipe->set_sampler_views(cso->pipe, PIPE_SHADER_FRAGMENT, 0, 0,
+ cso->base.pipe->set_sampler_views(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0,
cso->max_fs_samplerviews, false, NULL);
if (unbind & CSO_UNBIND_FS_SAMPLERVIEW0)
- cso->pipe->set_sampler_views(cso->pipe, PIPE_SHADER_FRAGMENT, 0, 0,
+ cso->base.pipe->set_sampler_views(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1, false, NULL);
if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
cso_restore_fragment_samplers(cso);
if (unbind & CSO_UNBIND_FS_IMAGE0)
- cso->pipe->set_shader_images(cso->pipe, PIPE_SHADER_FRAGMENT, 0, 0, 1, NULL);
+ cso->base.pipe->set_shader_images(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0, 1, NULL);
if (state_mask & CSO_BIT_FRAMEBUFFER)
cso_restore_framebuffer(cso);
if (state_mask & CSO_BIT_BLEND)
@@ -1529,28 +1732,28 @@ cso_restore_state(struct cso_context *cso, unsigned unbind)
if (state_mask & CSO_BIT_VIEWPORT)
cso_restore_viewport(cso);
if (unbind & CSO_UNBIND_VS_CONSTANTS)
- cso->pipe->set_constant_buffer(cso->pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
+ cso->base.pipe->set_constant_buffer(cso->base.pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
if (unbind & CSO_UNBIND_FS_CONSTANTS)
- cso->pipe->set_constant_buffer(cso->pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
+ cso->base.pipe->set_constant_buffer(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
cso_restore_vertex_elements(cso);
- if (unbind & CSO_UNBIND_VERTEX_BUFFER0)
- cso->pipe->set_vertex_buffers(cso->pipe, 0, 0, 1, false, NULL);
if (state_mask & CSO_BIT_STREAM_OUTPUTS)
cso_restore_stream_outputs(cso);
if (state_mask & CSO_BIT_PAUSE_QUERIES)
- cso->pipe->set_active_query_state(cso->pipe, true);
+ cso->base.pipe->set_active_query_state(cso->base.pipe, true);
cso->saved_state = 0;
}
+
/**
* Save all the CSO state items specified by the state_mask bitmask
* of CSO_BIT_COMPUTE_x flags.
*/
void
-cso_save_compute_state(struct cso_context *cso, unsigned state_mask)
+cso_save_compute_state(struct cso_context *ctx, unsigned state_mask)
{
+ struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
assert(cso->saved_compute_state == 0);
cso->saved_compute_state = state_mask;
@@ -1567,8 +1770,9 @@ cso_save_compute_state(struct cso_context *cso, unsigned state_mask)
* Restore the state which was saved by cso_save_compute_state().
*/
void
-cso_restore_compute_state(struct cso_context *cso)
+cso_restore_compute_state(struct cso_context *ctx)
{
+ struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
unsigned state_mask = cso->saved_compute_state;
assert(state_mask);
@@ -1587,65 +1791,7 @@ cso_restore_compute_state(struct cso_context *cso)
/* drawing */
void
-cso_draw_vbo(struct cso_context *cso,
- const struct pipe_draw_info *info,
- unsigned drawid_offset,
- const struct pipe_draw_indirect_info *indirect,
- const struct pipe_draw_start_count_bias draw)
-{
- struct u_vbuf *vbuf = cso->vbuf_current;
-
- /* We can't have both indirect drawing and SO-vertex-count drawing */
- assert(!indirect ||
- indirect->buffer == NULL ||
- indirect->count_from_stream_output == NULL);
-
- /* We can't have SO-vertex-count drawing with an index buffer */
- assert(info->index_size == 0 ||
- !indirect ||
- indirect->count_from_stream_output == NULL);
-
- if (vbuf) {
- u_vbuf_draw_vbo(vbuf, info, drawid_offset, indirect, draw);
- } else {
- struct pipe_context *pipe = cso->pipe;
- pipe->draw_vbo(pipe, info, drawid_offset, indirect, &draw, 1);
- }
-}
-
-/* info->draw_id can be changed by the callee if increment_draw_id is true. */
-void
-cso_multi_draw(struct cso_context *cso,
- struct pipe_draw_info *info,
- unsigned drawid_offset,
- const struct pipe_draw_start_count_bias *draws,
- unsigned num_draws)
-{
- struct u_vbuf *vbuf = cso->vbuf_current;
-
- if (vbuf) {
- /* Increase refcount to be able to use take_index_buffer_ownership with
- * all draws.
- */
- if (num_draws > 1 && info->take_index_buffer_ownership)
- p_atomic_add(&info->index.resource->reference.count, num_draws - 1);
-
- unsigned drawid = drawid_offset;
- for (unsigned i = 0; i < num_draws; i++) {
- u_vbuf_draw_vbo(vbuf, info, drawid, NULL, draws[i]);
-
- if (info->increment_draw_id)
- drawid++;
- }
- } else {
- struct pipe_context *pipe = cso->pipe;
-
- pipe->draw_vbo(pipe, info, drawid_offset, NULL, draws, num_draws);
- }
-}
-
-void
-cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
+cso_draw_arrays(struct cso_context *ctx, unsigned mode, unsigned start, unsigned count)
{
struct pipe_draw_info info;
struct pipe_draw_start_count_bias draw;
@@ -1661,13 +1807,14 @@ cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
draw.count = count;
draw.index_bias = 0;
- cso_draw_vbo(cso, &info, 0, NULL, draw);
+ cso_draw_vbo(ctx, &info, 0, NULL, &draw, 1);
}
+
void
-cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
- uint start, uint count,
- uint start_instance, uint instance_count)
+cso_draw_arrays_instanced(struct cso_context *ctx, unsigned mode,
+ unsigned start, unsigned count,
+ unsigned start_instance, unsigned instance_count)
{
struct pipe_draw_info info;
struct pipe_draw_start_count_bias draw;
@@ -1685,5 +1832,5 @@ cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
draw.count = count;
draw.index_bias = 0;
- cso_draw_vbo(cso, &info, 0, NULL, draw);
+ cso_draw_vbo(ctx, &info, 0, NULL, &draw, 1);
}