summaryrefslogtreecommitdiff
path: root/src/compiler/spirv/vtn_private.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/spirv/vtn_private.h')
-rw-r--r--src/compiler/spirv/vtn_private.h369
1 files changed, 228 insertions, 141 deletions
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
index d95e3c72e81..dcb905dc561 100644
--- a/src/compiler/spirv/vtn_private.h
+++ b/src/compiler/spirv/vtn_private.h
@@ -19,10 +19,6 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
- *
- * Authors:
- * Jason Ekstrand (jason@jlekstrand.net)
- *
*/
#ifndef _VTN_PRIVATE_H_
@@ -37,11 +33,21 @@
#include "spirv.h"
#include "vtn_generator_ids.h"
+extern uint32_t mesa_spirv_debug;
+
+#ifndef NDEBUG
+#define MESA_SPIRV_DEBUG(flag) unlikely(mesa_spirv_debug & (MESA_SPIRV_DEBUG_ ## flag))
+#else
+#define MESA_SPIRV_DEBUG(flag) false
+#endif
+
+#define MESA_SPIRV_DEBUG_STRUCTURED (1u << 0)
+
struct vtn_builder;
struct vtn_decoration;
/* setjmp/longjmp is broken on MinGW: https://sourceforge.net/p/mingw-w64/bugs/406/ */
-#ifdef __MINGW32__
+#if defined(__MINGW32__) && !defined(_UCRT)
#define vtn_setjmp __builtin_setjmp
#define vtn_longjmp __builtin_longjmp
#else
@@ -115,6 +121,17 @@ _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
vtn_fail("%s", #expr); \
} while (0)
+/* These are used to allocate data that can be dropped at the end of
+ * the parsing. Any NIR data structure should keep using the ralloc,
+ * since they will outlive the parsing.
+ */
+#define vtn_alloc(B, TYPE) linear_alloc(B->lin_ctx, TYPE)
+#define vtn_zalloc(B, TYPE) linear_zalloc(B->lin_ctx, TYPE)
+#define vtn_alloc_array(B, TYPE, ELEMS) linear_alloc_array(B->lin_ctx, TYPE, ELEMS)
+#define vtn_zalloc_array(B, TYPE, ELEMS) linear_zalloc_array(B->lin_ctx, TYPE, ELEMS)
+#define vtn_alloc_size(B, SIZE) linear_alloc_child(B->lin_ctx, SIZE)
+#define vtn_zalloc_size(B, SIZE) linear_zalloc_child(B->lin_ctx, SIZE)
+
enum vtn_value_type {
vtn_value_type_invalid = 0,
vtn_value_type_undef,
@@ -130,80 +147,13 @@ enum vtn_value_type {
vtn_value_type_image_pointer,
};
-enum vtn_branch_type {
- vtn_branch_type_none,
- vtn_branch_type_if_merge,
- vtn_branch_type_switch_break,
- vtn_branch_type_switch_fallthrough,
- vtn_branch_type_loop_break,
- vtn_branch_type_loop_continue,
- vtn_branch_type_loop_back_edge,
- vtn_branch_type_discard,
- vtn_branch_type_terminate_invocation,
- vtn_branch_type_ignore_intersection,
- vtn_branch_type_terminate_ray,
- vtn_branch_type_return,
-};
-
-enum vtn_cf_node_type {
- vtn_cf_node_type_block,
- vtn_cf_node_type_if,
- vtn_cf_node_type_loop,
- vtn_cf_node_type_case,
- vtn_cf_node_type_switch,
- vtn_cf_node_type_function,
-};
-
-struct vtn_cf_node {
- struct list_head link;
- struct vtn_cf_node *parent;
- enum vtn_cf_node_type type;
-};
-
-struct vtn_loop {
- struct vtn_cf_node node;
-
- /* The main body of the loop */
- struct list_head body;
-
- /* The "continue" part of the loop. This gets executed after the body
- * and is where you go when you hit a continue.
- */
- struct list_head cont_body;
-
- struct vtn_block *header_block;
- struct vtn_block *cont_block;
- struct vtn_block *break_block;
-
- SpvLoopControlMask control;
-};
-
-struct vtn_if {
- struct vtn_cf_node node;
-
- enum vtn_branch_type then_type;
- struct list_head then_body;
-
- enum vtn_branch_type else_type;
- struct list_head else_body;
-
- struct vtn_block *header_block;
- struct vtn_block *merge_block;
-
- SpvSelectionControlMask control;
-};
+const char *vtn_value_type_to_string(enum vtn_value_type t);
struct vtn_case {
- struct vtn_cf_node node;
+ struct list_head link;
struct vtn_block *block;
- enum vtn_branch_type type;
- struct list_head body;
-
- /* The fallthrough case, if any */
- struct vtn_case *fallthrough;
-
/* The uint32_t values that map to this case */
struct util_dynarray values;
@@ -214,18 +164,8 @@ struct vtn_case {
bool visited;
};
-struct vtn_switch {
- struct vtn_cf_node node;
-
- uint32_t selector;
-
- struct list_head cases;
-
- struct vtn_block *break_block;
-};
-
struct vtn_block {
- struct vtn_cf_node node;
+ struct list_head link;
/** A pointer to the label instruction */
const uint32_t *label;
@@ -236,19 +176,6 @@ struct vtn_block {
/** A pointer to the branch instruction that ends this block */
const uint32_t *branch;
- enum vtn_branch_type branch_type;
-
- /* The CF node for which this is a merge target
- *
- * The SPIR-V spec requires that any given block can be the merge target
- * for at most one merge instruction. If this block is a merge target,
- * this points back to the block containing that merge instruction.
- */
- struct vtn_cf_node *merge_cf_node;
-
- /** Points to the loop that this block starts (if it starts a loop) */
- struct vtn_loop *loop;
-
/** Points to the switch case started by this block (if any) */
struct vtn_case *switch_case;
@@ -257,10 +184,22 @@ struct vtn_block {
/** attached nir_block */
struct nir_block *block;
+
+ /* Inner-most construct that this block is part of. */
+ struct vtn_construct *parent;
+
+ /* Blocks that succeed this block. Used by structured control flow. */
+ struct vtn_successor *successors;
+ unsigned successors_count;
+
+ /* Position of this block in the structured post-order traversal. */
+ unsigned pos;
+
+ bool visited;
};
struct vtn_function {
- struct vtn_cf_node node;
+ struct list_head link;
struct vtn_type *type;
@@ -274,26 +213,29 @@ struct vtn_function {
const uint32_t *end;
+ SpvLinkageType linkage;
SpvFunctionControlMask control;
+
+ unsigned block_count;
+
+ /* Ordering of blocks to be processed by structured control flow. See
+ * vtn_structured_cfg.c for details.
+ */
+ unsigned ordered_blocks_count;
+ struct vtn_block **ordered_blocks;
+
+ /* Structured control flow constructs. See struct vtn_construct. */
+ struct list_head constructs;
};
-#define VTN_DECL_CF_NODE_CAST(_type) \
-static inline struct vtn_##_type * \
-vtn_cf_node_as_##_type(struct vtn_cf_node *node) \
-{ \
- assert(node->type == vtn_cf_node_type_##_type); \
- return (struct vtn_##_type *)node; \
-}
+#define vtn_foreach_function(func, func_list) \
+ list_for_each_entry(struct vtn_function, func, func_list, link)
-VTN_DECL_CF_NODE_CAST(block)
-VTN_DECL_CF_NODE_CAST(loop)
-VTN_DECL_CF_NODE_CAST(if)
-VTN_DECL_CF_NODE_CAST(case)
-VTN_DECL_CF_NODE_CAST(switch)
-VTN_DECL_CF_NODE_CAST(function)
+#define vtn_foreach_case(cse, case_list) \
+ list_for_each_entry(struct vtn_case, cse, case_list, link)
-#define vtn_foreach_cf_node(node, cf_list) \
- list_for_each_entry(struct vtn_cf_node, node, cf_list, link)
+#define vtn_foreach_case_safe(cse, case_list) \
+ list_for_each_entry_safe(struct vtn_case, cse, case_list, link)
typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp,
const uint32_t *, unsigned);
@@ -305,13 +247,26 @@ void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
void vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
+bool vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+void vtn_emit_cf_func_structured(struct vtn_builder *b, struct vtn_function *func,
+ vtn_instruction_handler handler);
+bool vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+void vtn_emit_ret_store(struct vtn_builder *b, const struct vtn_block *block);
+void vtn_build_structured_cfg(struct vtn_builder *b, const uint32_t *words,
+ const uint32_t *end);
+
const uint32_t *
vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
const uint32_t *end, vtn_instruction_handler handler);
struct vtn_ssa_value {
+ bool is_variable;
+
union {
- nir_ssa_def *def;
+ nir_def *def;
+ nir_variable *var;
struct vtn_ssa_value **elems;
};
@@ -336,8 +291,10 @@ enum vtn_base_type {
vtn_base_type_sampler,
vtn_base_type_sampled_image,
vtn_base_type_accel_struct,
+ vtn_base_type_ray_query,
vtn_base_type_function,
vtn_base_type_event,
+ vtn_base_type_cooperative_matrix,
};
struct vtn_type {
@@ -446,6 +403,12 @@ struct vtn_type {
/* Return type for functions */
struct vtn_type *return_type;
};
+
+ /* Members for cooperative matrix types. */
+ struct {
+ struct glsl_cmat_description desc;
+ struct vtn_type *component_type;
+ };
};
};
@@ -479,6 +442,8 @@ struct vtn_access_chain {
/* Access qualifiers */
enum gl_access_qualifier access;
+ bool in_bounds;
+
/** Struct elements and array offsets.
*
* This is an array of 1 so that it can conveniently be created on the
@@ -498,6 +463,7 @@ enum vtn_variable_mode {
vtn_variable_mode_push_constant,
vtn_variable_mode_workgroup,
vtn_variable_mode_cross_workgroup,
+ vtn_variable_mode_task_payload,
vtn_variable_mode_generic,
vtn_variable_mode_constant,
vtn_variable_mode_input,
@@ -510,6 +476,7 @@ enum vtn_variable_mode {
vtn_variable_mode_ray_payload_in,
vtn_variable_mode_hit_attrib,
vtn_variable_mode_shader_record,
+ vtn_variable_mode_node_payload,
};
struct vtn_pointer {
@@ -538,8 +505,8 @@ struct vtn_pointer {
nir_deref_instr *deref;
/** A (block_index, offset) pair representing a UBO or SSBO position. */
- struct nir_ssa_def *block_index;
- struct nir_ssa_def *offset;
+ struct nir_def *block_index;
+ struct nir_def *offset;
/* Access qualifiers */
enum gl_access_qualifier access;
@@ -586,11 +553,14 @@ const struct glsl_type *
vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
enum vtn_variable_mode mode);
+mesa_scope
+vtn_translate_scope(struct vtn_builder *b, SpvScope scope);
+
struct vtn_image_pointer {
nir_deref_instr *image;
- nir_ssa_def *coord;
- nir_ssa_def *sample;
- nir_ssa_def *lod;
+ nir_def *coord;
+ nir_def *sample;
+ nir_def *lod;
};
struct vtn_value {
@@ -604,6 +574,9 @@ struct vtn_value {
/* Valid for vtn_value_type_constant to indicate the value is OpConstantNull. */
bool is_null_constant:1;
+ /* Valid when all the members of the value are undef. */
+ bool is_undef_constant:1;
+
const char *name;
struct vtn_decoration *decoration;
struct vtn_type *type;
@@ -621,29 +594,42 @@ struct vtn_value {
#define VTN_DEC_DECORATION -1
#define VTN_DEC_EXECUTION_MODE -2
+#define VTN_DEC_STRUCT_MEMBER_NAME0 -3
#define VTN_DEC_STRUCT_MEMBER0 0
struct vtn_decoration {
struct vtn_decoration *next;
- /* Specifies how to apply this decoration. Negative values represent a
- * decoration or execution mode. (See the VTN_DEC_ #defines above.)
- * Non-negative values specify that it applies to a structure member.
+ /* Different kinds of decorations are stored in a value,
+ the scope defines what decoration it refers to:
+
+ - VTN_DEC_DECORATION:
+ decoration associated with the value
+ - VTN_DEC_EXECUTION_MODE:
+ an execution mode associated with an entrypoint value
+ - VTN_DEC_STRUCT_MEMBER0 + m:
+ decoration associated with member m of a struct value
+ - VTN_DEC_STRUCT_MEMBER_NAME0 - m:
+ name of m'th member of a struct value
*/
int scope;
+ uint32_t num_operands;
const uint32_t *operands;
struct vtn_value *group;
union {
SpvDecoration decoration;
SpvExecutionMode exec_mode;
+ const char *member_name;
};
};
struct vtn_builder {
nir_builder nb;
+ linear_ctx *lin_ctx;
+
/* Used by vtn_fail to jump back to the beginning of SPIR-V compilation */
jmp_buf fail_jump;
@@ -663,14 +649,6 @@ struct vtn_builder {
int line, col;
/*
- * In SPIR-V, constants are global, whereas in NIR, the load_const
- * instruction we use is per-function. So while we parse each function, we
- * keep a hash table of constants we've resolved to nir_ssa_value's so
- * far, and we lazily resolve them when we see them used in a function.
- */
- struct hash_table *const_table;
-
- /*
* Map from phi instructions (pointer to the start of the instruction)
* to the variable corresponding to it.
*/
@@ -696,8 +674,16 @@ struct vtn_builder {
/* True if we need to fix up CS OpControlBarrier */
bool wa_glslang_cs_barrier;
- /* Workaround discard bugs in HLSL -> SPIR-V compilers */
+ /* True if we need to ignore undef initializers */
+ bool wa_llvm_spirv_ignore_workgroup_initializer;
+
+ /* True if we need to ignore OpReturn after OpEmitMeshTasksEXT. */
+ bool wa_ignore_return_after_emit_mesh_tasks;
+
+ /* True if DemoteToHelperInvocation capability is used by the shader. */
bool uses_demote_to_helper_invocation;
+
+ /* Workaround discard bugs in HLSL -> SPIR-V compilers */
bool convert_discard_to_demote;
gl_shader_stage entry_point_stage;
@@ -705,6 +691,7 @@ struct vtn_builder {
struct vtn_value *entry_point;
struct vtn_value *workgroup_size_builtin;
bool variable_pointers;
+ bool image_gather_bias_lod;
uint32_t *interface_ids;
size_t interface_ids_count;
@@ -725,12 +712,20 @@ struct vtn_builder {
unsigned mem_model;
};
-nir_ssa_def *
+const char *
+vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
+ unsigned word_count, unsigned *words_used);
+
+nir_def *
vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr);
struct vtn_pointer *
-vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
+vtn_pointer_from_ssa(struct vtn_builder *b, nir_def *ssa,
struct vtn_type *ptr_type);
+struct vtn_ssa_value *
+vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
+ const struct glsl_type *type);
+
static inline struct vtn_value *
vtn_untyped_value(struct vtn_builder *b, uint32_t value_id)
{
@@ -771,16 +766,54 @@ vtn_push_value(struct vtn_builder *b, uint32_t value_id,
return &b->values[value_id];
}
+/* These separated fail functions exist so the helpers like vtn_value()
+ * can be inlined with minimal code size impact. This allows the failure
+ * handling to have more detailed output without harming callers.
+ */
+
+void _vtn_fail_value_type_mismatch(struct vtn_builder *b, uint32_t value_id,
+ enum vtn_value_type value_type);
+void _vtn_fail_value_not_pointer(struct vtn_builder *b, uint32_t value_id);
+
static inline struct vtn_value *
vtn_value(struct vtn_builder *b, uint32_t value_id,
enum vtn_value_type value_type)
{
struct vtn_value *val = vtn_untyped_value(b, value_id);
- vtn_fail_if(val->value_type != value_type,
- "SPIR-V id %u is the wrong kind of value", value_id);
+ if (unlikely(val->value_type != value_type))
+ _vtn_fail_value_type_mismatch(b, value_id, value_type);
+ return val;
+}
+
+static inline struct vtn_value *
+vtn_pointer_value(struct vtn_builder *b, uint32_t value_id)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ if (unlikely(val->value_type != vtn_value_type_pointer &&
+ !val->is_null_constant))
+ _vtn_fail_value_not_pointer(b, value_id);
return val;
}
+static inline struct vtn_pointer *
+vtn_value_to_pointer(struct vtn_builder *b, struct vtn_value *value)
+{
+ if (value->is_null_constant) {
+ vtn_assert(glsl_type_is_vector_or_scalar(value->type->type));
+ nir_def *const_ssa =
+ vtn_const_ssa_value(b, value->constant, value->type->type)->def;
+ return vtn_pointer_from_ssa(b, const_ssa, value->type);
+ }
+ vtn_assert(value->value_type == vtn_value_type_pointer);
+ return value->pointer;
+}
+
+static inline struct vtn_pointer *
+vtn_pointer(struct vtn_builder *b, uint32_t value_id)
+{
+ return vtn_value_to_pointer(b, vtn_pointer_value(b, value_id));
+}
+
bool
vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
@@ -835,13 +868,23 @@ vtn_get_type(struct vtn_builder *b, uint32_t value_id)
return vtn_value(b, value_id, vtn_value_type_type)->type;
}
+static inline struct vtn_block *
+vtn_block(struct vtn_builder *b, uint32_t value_id)
+{
+ return vtn_value(b, value_id, vtn_value_type_block)->block;
+}
+
struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
struct vtn_value *vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
struct vtn_ssa_value *ssa);
-nir_ssa_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id);
+nir_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id);
struct vtn_value *vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id,
- nir_ssa_def *def);
+ nir_def *def);
+nir_deref_instr *vtn_get_deref_for_id(struct vtn_builder *b, uint32_t value_id);
+nir_deref_instr *vtn_get_deref_for_ssa_value(struct vtn_builder *b, struct vtn_ssa_value *ssa);
+struct vtn_value *vtn_push_var_ssa(struct vtn_builder *b, uint32_t value_id,
+ nir_variable *var);
struct vtn_value *vtn_push_pointer(struct vtn_builder *b,
uint32_t value_id,
@@ -852,7 +895,7 @@ struct vtn_sampled_image {
nir_deref_instr *sampler;
};
-nir_ssa_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
+nir_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
struct vtn_sampled_image si);
void
@@ -861,6 +904,7 @@ vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id,
struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b,
const struct glsl_type *type);
+void vtn_set_ssa_value_var(struct vtn_builder *b, struct vtn_ssa_value *ssa, nir_variable *var);
struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b,
struct vtn_ssa_value *src);
@@ -869,9 +913,9 @@ nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b,
struct vtn_pointer *ptr);
-nir_ssa_def *
+nir_def *
vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
- nir_ssa_def **index_out);
+ nir_def **index_out);
nir_deref_instr *
vtn_get_call_payload_for_location(struct vtn_builder *b, uint32_t location_id);
@@ -927,6 +971,8 @@ void vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w,
void vtn_handle_no_contraction(struct vtn_builder *b, struct vtn_value *val);
+void vtn_handle_fp_fast_math(struct vtn_builder *b, struct vtn_value *val);
+
void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
@@ -945,6 +991,9 @@ struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count,
void vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
unsigned count);
+void vtn_handle_debug_text(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+
void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
@@ -991,6 +1040,13 @@ SpvMemorySemanticsMask vtn_mode_to_memory_semantics(enum vtn_variable_mode mode)
void vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
SpvMemorySemanticsMask semantics);
+bool vtn_value_is_relaxed_precision(struct vtn_builder *b, struct vtn_value *val);
+nir_def *
+vtn_mediump_downconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_def *def);
+struct vtn_ssa_value *
+vtn_mediump_downconvert_value(struct vtn_builder *b, struct vtn_ssa_value *src);
+void vtn_mediump_upconvert_value(struct vtn_builder *b, struct vtn_ssa_value *value);
+
static inline int
cmp_uint32_t(const void *pa, const void *pb)
{
@@ -1003,4 +1059,35 @@ cmp_uint32_t(const void *pa, const void *pb)
return 0;
}
+void
+vtn_parse_switch(struct vtn_builder *b,
+ const uint32_t *branch,
+ struct list_head *case_list);
+
+bool vtn_get_mem_operands(struct vtn_builder *b, const uint32_t *w, unsigned count,
+ unsigned *idx, SpvMemoryAccessMask *access, unsigned *alignment,
+ SpvScope *dest_scope, SpvScope *src_scope);
+void vtn_emit_make_visible_barrier(struct vtn_builder *b, SpvMemoryAccessMask access,
+ SpvScope scope, enum vtn_variable_mode mode);
+void vtn_emit_make_available_barrier(struct vtn_builder *b, SpvMemoryAccessMask access,
+ SpvScope scope, enum vtn_variable_mode mode);
+
+
+void vtn_handle_cooperative_type(struct vtn_builder *b, struct vtn_value *val,
+ SpvOp opcode, const uint32_t *w, unsigned count);
+void vtn_handle_cooperative_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+void vtn_handle_cooperative_alu(struct vtn_builder *b, struct vtn_value *dest_val,
+ const struct glsl_type *dest_type, SpvOp opcode,
+ const uint32_t *w, unsigned count);
+struct vtn_ssa_value *vtn_cooperative_matrix_extract(struct vtn_builder *b, struct vtn_ssa_value *mat,
+ const uint32_t *indices, unsigned num_indices);
+struct vtn_ssa_value *vtn_cooperative_matrix_insert(struct vtn_builder *b, struct vtn_ssa_value *mat,
+ struct vtn_ssa_value *insert,
+ const uint32_t *indices, unsigned num_indices);
+nir_deref_instr *vtn_create_cmat_temporary(struct vtn_builder *b,
+ const struct glsl_type *t, const char *name);
+
+gl_shader_stage vtn_stage_for_execution_model(SpvExecutionModel model);
+
#endif /* _VTN_PRIVATE_H_ */