summaryrefslogtreecommitdiff
path: root/src/compiler/nir/nir_builder.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir/nir_builder.h')
-rw-r--r--src/compiler/nir/nir_builder.h1638
1 files changed, 928 insertions, 710 deletions
diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h
index 962fdf0fafd..0aeff58861a 100644
--- a/src/compiler/nir/nir_builder.h
+++ b/src/compiler/nir/nir_builder.h
@@ -24,9 +24,13 @@
#ifndef NIR_BUILDER_H
#define NIR_BUILDER_H
-#include "nir_control_flow.h"
#include "util/bitscan.h"
#include "util/half_float.h"
+#include "nir_control_flow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
struct exec_list;
@@ -44,43 +48,71 @@ typedef struct nir_builder {
nir_function_impl *impl;
} nir_builder;
-static inline void
-nir_builder_init(nir_builder *build, nir_function_impl *impl)
+static inline nir_builder
+nir_builder_create(nir_function_impl *impl)
{
- memset(build, 0, sizeof(*build));
- build->exact = false;
- build->impl = impl;
- build->shader = impl->function->shader;
+ nir_builder b;
+ memset(&b, 0, sizeof(b));
+ b.exact = false;
+ b.impl = impl;
+ b.shader = impl->function->shader;
+ return b;
}
-static inline nir_builder MUST_CHECK PRINTFLIKE(3, 4)
-nir_builder_init_simple_shader(gl_shader_stage stage,
- const nir_shader_compiler_options *options,
- const char *name, ...)
+/* Requires the cursor to be inside a nir_function_impl. */
+static inline nir_builder
+nir_builder_at(nir_cursor cursor)
{
- nir_builder b;
+ nir_cf_node *current_block = &nir_cursor_current_block(cursor)->cf_node;
- memset(&b, 0, sizeof(b));
- b.shader = nir_shader_create(NULL, stage, options, NULL);
+ nir_builder b = nir_builder_create(nir_cf_node_get_function(current_block));
+ b.cursor = cursor;
+ return b;
+}
+
+nir_builder MUST_CHECK PRINTFLIKE(3, 4)
+ nir_builder_init_simple_shader(gl_shader_stage stage,
+ const nir_shader_compiler_options *options,
+ const char *name, ...);
- if (name) {
- va_list args;
- va_start(args, name);
- b.shader->info.name = ralloc_vasprintf(b.shader, name, args);
- va_end(args);
+typedef bool (*nir_instr_pass_cb)(struct nir_builder *, nir_instr *, void *);
+typedef bool (*nir_intrinsic_pass_cb)(struct nir_builder *,
+ nir_intrinsic_instr *, void *);
+
+/**
+ * Iterates over all the instructions in a NIR function and calls the given pass
+ * on them.
+ *
+ * The pass should return true if it modified the function. In that case, only
+ * the preserved metadata flags will be preserved in the function impl.
+ *
+ * The builder will be initialized to point at the function impl, but its
+ * cursor is unset.
+ */
+static inline bool
+nir_function_instructions_pass(nir_function_impl *impl,
+ nir_instr_pass_cb pass,
+ nir_metadata preserved,
+ void *cb_data)
+{
+ bool progress = false;
+ nir_builder b = nir_builder_create(impl);
+
+ nir_foreach_block_safe(block, impl) {
+ nir_foreach_instr_safe(instr, block) {
+ progress |= pass(&b, instr, cb_data);
+ }
}
- nir_function *func = nir_function_create(b.shader, "main");
- func->is_entrypoint = true;
- b.exact = false;
- b.impl = nir_function_impl_create(func);
- b.cursor = nir_after_cf_list(&b.impl->body);
+ if (progress) {
+ nir_metadata_preserve(impl, preserved);
+ } else {
+ nir_metadata_preserve(impl, nir_metadata_all);
+ }
- return b;
+ return progress;
}
-typedef bool (*nir_instr_pass_cb)(struct nir_builder *, nir_instr *, void *);
-
/**
* Iterates over all the instructions in a NIR shader and calls the given pass
* on them.
@@ -99,42 +131,58 @@ nir_shader_instructions_pass(nir_shader *shader,
{
bool progress = false;
- nir_foreach_function(function, shader) {
- if (!function->impl)
- continue;
+ nir_foreach_function_impl(impl, shader) {
+ progress |= nir_function_instructions_pass(impl, pass,
+ preserved, cb_data);
+ }
+
+ return progress;
+}
+
+/**
+ * Iterates over all the intrinsics in a NIR shader and calls the given pass on
+ * them.
+ *
+ * The pass should return true if it modified the shader. In that case, only
+ * the preserved metadata flags will be preserved in the function impl.
+ *
+ * The builder will be initialized to point at the function impl, but its
+ * cursor is unset.
+ */
+static inline bool
+nir_shader_intrinsics_pass(nir_shader *shader,
+ nir_intrinsic_pass_cb pass,
+ nir_metadata preserved,
+ void *cb_data)
+{
+ bool progress = false;
+ nir_foreach_function_impl(impl, shader) {
bool func_progress = false;
- nir_builder b;
- nir_builder_init(&b, function->impl);
+ nir_builder b = nir_builder_create(impl);
- nir_foreach_block_safe(block, function->impl) {
+ nir_foreach_block_safe(block, impl) {
nir_foreach_instr_safe(instr, block) {
- func_progress |= pass(&b, instr, cb_data);
+ if (instr->type == nir_instr_type_intrinsic) {
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ func_progress |= pass(&b, intr, cb_data);
+ }
}
}
if (func_progress) {
- nir_metadata_preserve(function->impl, preserved);
+ nir_metadata_preserve(impl, preserved);
progress = true;
} else {
- nir_metadata_preserve(function->impl, nir_metadata_all);
+ nir_metadata_preserve(impl, nir_metadata_all);
}
}
return progress;
}
-static inline void
-nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
-{
- nir_instr_insert(build->cursor, instr);
-
- if (build->update_divergence)
- nir_update_instr_divergence(build->shader, instr);
-
- /* Move the cursor forward. */
- build->cursor = nir_after_instr(instr);
-}
+void nir_builder_instr_insert(nir_builder *build, nir_instr *instr);
+void nir_builder_instr_insert_at_top(nir_builder *build, nir_instr *instr);
static inline nir_instr *
nir_builder_last_instr(nir_builder *build)
@@ -143,121 +191,72 @@ nir_builder_last_instr(nir_builder *build)
return build->cursor.instr;
}
-static inline void
-nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
-{
- nir_cf_node_insert(build->cursor, cf);
-}
+/* General nir_build_alu() taking a variable arg count with NULLs for the rest. */
+nir_def *
+nir_build_alu(nir_builder *build, nir_op op, nir_def *src0,
+ nir_def *src1, nir_def *src2, nir_def *src3);
-static inline bool
-nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
-{
- nir_block *block = nir_cursor_current_block(build->cursor);
- for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
- if (n == cf_node)
- return true;
- }
- return false;
-}
+/* Fixed-arg-count variants to reduce size of codegen. */
+nir_def *
+nir_build_alu1(nir_builder *build, nir_op op, nir_def *src0);
+nir_def *
+nir_build_alu2(nir_builder *build, nir_op op, nir_def *src0,
+ nir_def *src1);
+nir_def *
+nir_build_alu3(nir_builder *build, nir_op op, nir_def *src0,
+ nir_def *src1, nir_def *src2);
+nir_def *
+nir_build_alu4(nir_builder *build, nir_op op, nir_def *src0,
+ nir_def *src1, nir_def *src2, nir_def *src3);
-static inline nir_if *
-nir_push_if_src(nir_builder *build, nir_src condition)
-{
- nir_if *nif = nir_if_create(build->shader);
- nif->condition = condition;
- nir_builder_cf_insert(build, &nif->cf_node);
- build->cursor = nir_before_cf_list(&nif->then_list);
- return nif;
-}
+nir_def *nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs);
-static inline nir_if *
-nir_push_if(nir_builder *build, nir_ssa_def *condition)
-{
- return nir_push_if_src(build, nir_src_for_ssa(condition));
-}
+nir_def *
+nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
+ nir_deref_instr *texture,
+ nir_deref_instr *sampler,
+ unsigned num_extra_srcs,
+ const nir_tex_src *extra_srcs);
-static inline nir_if *
-nir_push_else(nir_builder *build, nir_if *nif)
-{
- if (nif) {
- assert(nir_builder_is_inside_cf(build, &nif->cf_node));
- } else {
- nir_block *block = nir_cursor_current_block(build->cursor);
- nif = nir_cf_node_as_if(block->cf_node.parent);
- }
- build->cursor = nir_before_cf_list(&nif->else_list);
- return nif;
-}
+nir_instr *nir_builder_last_instr(nir_builder *build);
-static inline void
-nir_pop_if(nir_builder *build, nir_if *nif)
-{
- if (nif) {
- assert(nir_builder_is_inside_cf(build, &nif->cf_node));
- } else {
- nir_block *block = nir_cursor_current_block(build->cursor);
- nif = nir_cf_node_as_if(block->cf_node.parent);
- }
- build->cursor = nir_after_cf_node(&nif->cf_node);
-}
+void nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf);
-static inline nir_ssa_def *
-nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
-{
- nir_block *block = nir_cursor_current_block(build->cursor);
- nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
+bool nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node);
- nir_phi_instr *phi = nir_phi_instr_create(build->shader);
- nir_phi_instr_add_src(phi, nir_if_last_then_block(nif), nir_src_for_ssa(then_def));
- nir_phi_instr_add_src(phi, nir_if_last_else_block(nif), nir_src_for_ssa(else_def));
+nir_if *
+nir_push_if(nir_builder *build, nir_def *condition);
- assert(then_def->num_components == else_def->num_components);
- assert(then_def->bit_size == else_def->bit_size);
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- then_def->num_components, then_def->bit_size, NULL);
+nir_if *
+nir_push_else(nir_builder *build, nir_if *nif);
- nir_builder_instr_insert(build, &phi->instr);
+void nir_pop_if(nir_builder *build, nir_if *nif);
- return &phi->dest.ssa;
-}
+nir_def *
+nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def);
-static inline nir_loop *
-nir_push_loop(nir_builder *build)
-{
- nir_loop *loop = nir_loop_create(build->shader);
- nir_builder_cf_insert(build, &loop->cf_node);
- build->cursor = nir_before_cf_list(&loop->body);
- return loop;
-}
+nir_loop *
+nir_push_loop(nir_builder *build);
-static inline void
-nir_pop_loop(nir_builder *build, nir_loop *loop)
-{
- if (loop) {
- assert(nir_builder_is_inside_cf(build, &loop->cf_node));
- } else {
- nir_block *block = nir_cursor_current_block(build->cursor);
- loop = nir_cf_node_as_loop(block->cf_node.parent);
- }
- build->cursor = nir_after_cf_node(&loop->cf_node);
-}
+nir_loop *
+nir_push_continue(nir_builder *build, nir_loop *loop);
-static inline nir_ssa_def *
-nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
+void nir_pop_loop(nir_builder *build, nir_loop *loop);
+
+static inline nir_def *
+nir_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
{
- nir_ssa_undef_instr *undef =
- nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
+ nir_undef_instr *undef =
+ nir_undef_instr_create(build->shader, num_components, bit_size);
if (!undef)
return NULL;
- nir_instr_insert(nir_before_cf_list(&build->impl->body), &undef->instr);
- if (build->update_divergence)
- nir_update_instr_divergence(build->shader, &undef->instr);
+ nir_builder_instr_insert_at_top(build, &undef->instr);
return &undef->def;
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_build_imm(nir_builder *build, unsigned num_components,
unsigned bit_size, const nir_const_value *value)
{
@@ -273,7 +272,7 @@ nir_build_imm(nir_builder *build, unsigned num_components,
return &load_const->def;
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
{
nir_load_const_instr *load_const =
@@ -286,57 +285,57 @@ nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
return &load_const->def;
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_boolN_t(nir_builder *build, bool x, unsigned bit_size)
{
nir_const_value v = nir_const_value_for_bool(x, bit_size);
return nir_build_imm(build, 1, bit_size, &v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_bool(nir_builder *build, bool x)
{
return nir_imm_boolN_t(build, x, 1);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_true(nir_builder *build)
{
return nir_imm_bool(build, true);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_false(nir_builder *build)
{
return nir_imm_bool(build, false);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
{
nir_const_value v = nir_const_value_for_float(x, bit_size);
return nir_build_imm(build, 1, bit_size, &v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_float16(nir_builder *build, float x)
{
return nir_imm_floatN_t(build, x, 16);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_float(nir_builder *build, float x)
{
return nir_imm_floatN_t(build, x, 32);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_double(nir_builder *build, double x)
{
return nir_imm_floatN_t(build, x, 64);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_vec2(nir_builder *build, float x, float y)
{
nir_const_value v[2] = {
@@ -346,7 +345,7 @@ nir_imm_vec2(nir_builder *build, float x, float y)
return nir_build_imm(build, 2, 32, v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_vec3(nir_builder *build, float x, float y, float z)
{
nir_const_value v[3] = {
@@ -357,7 +356,7 @@ nir_imm_vec3(nir_builder *build, float x, float y, float z)
return nir_build_imm(build, 3, 32, v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
{
nir_const_value v[4] = {
@@ -370,7 +369,7 @@ nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
return nir_build_imm(build, 4, 32, v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
{
nir_const_value v[4] = {
@@ -383,26 +382,26 @@ nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
return nir_build_imm(build, 4, 16, v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
{
nir_const_value v = nir_const_value_for_raw_uint(x, bit_size);
return nir_build_imm(build, 1, bit_size, &v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_int(nir_builder *build, int x)
{
return nir_imm_intN_t(build, x, 32);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_int64(nir_builder *build, int64_t x)
{
return nir_imm_intN_t(build, x, 64);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_ivec2(nir_builder *build, int x, int y)
{
nir_const_value v[2] = {
@@ -413,7 +412,7 @@ nir_imm_ivec2(nir_builder *build, int x, int y)
return nir_build_imm(build, 2, 32, v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_ivec3(nir_builder *build, int x, int y, int z)
{
nir_const_value v[3] = {
@@ -425,7 +424,7 @@ nir_imm_ivec3(nir_builder *build, int x, int y, int z)
return nir_build_imm(build, 3, 32, v);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
{
nir_const_value v[4] = {
@@ -438,134 +437,129 @@ nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
return nir_build_imm(build, 4, 32, v);
}
-static inline nir_ssa_def *
-nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
-{
- const nir_op_info *op_info = &nir_op_infos[instr->op];
-
- instr->exact = build->exact;
+nir_def *
+nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr);
- /* Guess the number of components the destination temporary should have
- * based on our input sizes, if it's not fixed for the op.
- */
- unsigned num_components = op_info->output_size;
- if (num_components == 0) {
- for (unsigned i = 0; i < op_info->num_inputs; i++) {
- if (op_info->input_sizes[i] == 0)
- num_components = MAX2(num_components,
- instr->src[i].src.ssa->num_components);
- }
- }
- assert(num_components != 0);
+/* for the couple special cases with more than 4 src args: */
+nir_def *
+nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs);
- /* Figure out the bitwidth based on the source bitwidth if the instruction
- * is variable-width.
- */
- unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
- if (bit_size == 0) {
- for (unsigned i = 0; i < op_info->num_inputs; i++) {
- unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
- if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
- if (bit_size)
- assert(src_bit_size == bit_size);
- else
- bit_size = src_bit_size;
- } else {
- assert(src_bit_size ==
- nir_alu_type_get_type_size(op_info->input_types[i]));
- }
- }
- }
+/* Generic builder for system values. */
+nir_def *
+nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
+ unsigned num_components, unsigned bit_size);
- /* When in doubt, assume 32. */
- if (bit_size == 0)
- bit_size = 32;
+#include "nir_builder_opcodes.h"
+#undef nir_deref_mode_is
- /* Make sure we don't swizzle from outside of our source vector (like if a
- * scalar value was passed into a multiply with a vector).
- */
- for (unsigned i = 0; i < op_info->num_inputs; i++) {
- for (unsigned j = instr->src[i].src.ssa->num_components;
- j < NIR_MAX_VEC_COMPONENTS; j++) {
- instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
- }
- }
+nir_def *
+nir_type_convert(nir_builder *b,
+ nir_def *src,
+ nir_alu_type src_type,
+ nir_alu_type dest_type,
+ nir_rounding_mode rnd);
- nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
- bit_size, NULL);
- instr->dest.write_mask = (1 << num_components) - 1;
+static inline nir_def *
+nir_convert_to_bit_size(nir_builder *b,
+ nir_def *src,
+ nir_alu_type type,
+ unsigned bit_size)
+{
+ return nir_type_convert(b, src, type, (nir_alu_type)(type | bit_size),
+ nir_rounding_mode_undef);
+}
- nir_builder_instr_insert(build, &instr->instr);
+static inline nir_def *
+nir_i2iN(nir_builder *b, nir_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
+}
- return &instr->dest.dest.ssa;
+static inline nir_def *
+nir_u2uN(nir_builder *b, nir_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
}
-static inline nir_ssa_def *
-nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
- nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
+static inline nir_def *
+nir_b2bN(nir_builder *b, nir_def *src, unsigned bit_size)
{
- nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
- if (!instr)
- return NULL;
+ return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
+}
- instr->src[0].src = nir_src_for_ssa(src0);
- if (src1)
- instr->src[1].src = nir_src_for_ssa(src1);
- if (src2)
- instr->src[2].src = nir_src_for_ssa(src2);
- if (src3)
- instr->src[3].src = nir_src_for_ssa(src3);
+static inline nir_def *
+nir_f2fN(nir_builder *b, nir_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
+}
- return nir_builder_alu_instr_finish_and_insert(build, instr);
+static inline nir_def *
+nir_i2b(nir_builder *b, nir_def *src)
+{
+ return nir_ine_imm(b, src, 0);
}
-/* for the couple special cases with more than 4 src args: */
-static inline nir_ssa_def *
-nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs)
+static inline nir_def *
+nir_b2iN(nir_builder *b, nir_def *src, uint32_t bit_size)
{
- const nir_op_info *op_info = &nir_op_infos[op];
- nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
- if (!instr)
- return NULL;
+ return nir_type_convert(b, src, nir_type_bool,
+ (nir_alu_type)(nir_type_int | bit_size),
+ nir_rounding_mode_undef);
+}
- for (unsigned i = 0; i < op_info->num_inputs; i++)
- instr->src[i].src = nir_src_for_ssa(srcs[i]);
+static inline nir_def *
+nir_b2fN(nir_builder *b, nir_def *src, uint32_t bit_size)
+{
+ return nir_type_convert(b, src, nir_type_bool,
+ (nir_alu_type)(nir_type_float | bit_size),
+ nir_rounding_mode_undef);
+}
- return nir_builder_alu_instr_finish_and_insert(build, instr);
+static inline nir_def *
+nir_i2fN(nir_builder *b, nir_def *src, unsigned bit_size)
+{
+ return nir_type_convert(b, src, nir_type_int,
+ (nir_alu_type)(nir_type_float | bit_size),
+ nir_rounding_mode_undef);
}
-/* Generic builder for system values. */
-static inline nir_ssa_def *
-nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
- unsigned num_components, unsigned bit_size)
+static inline nir_def *
+nir_u2fN(nir_builder *b, nir_def *src, unsigned bit_size)
{
- nir_intrinsic_instr *load = nir_intrinsic_instr_create(build->shader, op);
- if (nir_intrinsic_infos[op].dest_components > 0)
- assert(num_components == nir_intrinsic_infos[op].dest_components);
- else
- load->num_components = num_components;
- load->const_index[0] = index;
+ return nir_type_convert(b, src, nir_type_uint,
+ (nir_alu_type)(nir_type_float | bit_size),
+ nir_rounding_mode_undef);
+}
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
- nir_builder_instr_insert(build, &load->instr);
- return &load->dest.ssa;
+static inline nir_def *
+nir_f2uN(nir_builder *b, nir_def *src, unsigned bit_size)
+{
+ return nir_type_convert(b, src, nir_type_float,
+ (nir_alu_type)(nir_type_uint | bit_size),
+ nir_rounding_mode_undef);
}
-#include "nir_builder_opcodes.h"
-#undef nir_deref_mode_is
+static inline nir_def *
+nir_f2iN(nir_builder *b, nir_def *src, unsigned bit_size)
+{
+ return nir_type_convert(b, src, nir_type_float,
+ (nir_alu_type)(nir_type_int | bit_size),
+ nir_rounding_mode_undef);
+}
-static inline nir_ssa_def *
-nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
+static inline nir_def *
+nir_vec(nir_builder *build, nir_def **comp, unsigned num_components)
{
return nir_build_alu_src_arr(build, nir_op_vec(num_components), comp);
}
-static inline nir_ssa_def *
+nir_def *
+nir_vec_scalars(nir_builder *build, nir_scalar *comp, unsigned num_components);
+
+static inline nir_def *
nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
{
- assert(!src.abs && !src.negate);
- if (src.src.is_ssa && src.src.ssa->num_components == num_components) {
+ if (src.src.ssa->num_components == num_components) {
bool any_swizzles = false;
for (unsigned i = 0; i < num_components; i++) {
if (src.swizzle[i] != i)
@@ -576,21 +570,20 @@ nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
}
nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
- nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
- nir_src_bit_size(src.src), NULL);
+ nir_def_init(&mov->instr, &mov->def, num_components,
+ nir_src_bit_size(src.src));
mov->exact = build->exact;
- mov->dest.write_mask = (1 << num_components) - 1;
mov->src[0] = src;
nir_builder_instr_insert(build, &mov->instr);
- return &mov->dest.dest.ssa;
+ return &mov->def;
}
/**
* Construct a mov that reswizzles the source's components.
*/
-static inline nir_ssa_def *
-nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
+static inline nir_def *
+nir_swizzle(nir_builder *build, nir_def *src, const unsigned *swiz,
unsigned num_components)
{
assert(num_components <= NIR_MAX_VEC_COMPONENTS);
@@ -611,18 +604,25 @@ nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
}
/* Selects the right fdot given the number of components in each source. */
-static inline nir_ssa_def *
-nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
+static inline nir_def *
+nir_fdot(nir_builder *build, nir_def *src0, nir_def *src1)
{
assert(src0->num_components == src1->num_components);
switch (src0->num_components) {
- case 1: return nir_fmul(build, src0, src1);
- case 2: return nir_fdot2(build, src0, src1);
- case 3: return nir_fdot3(build, src0, src1);
- case 4: return nir_fdot4(build, src0, src1);
- case 5: return nir_fdot5(build, src0, src1);
- case 8: return nir_fdot8(build, src0, src1);
- case 16: return nir_fdot16(build, src0, src1);
+ case 1:
+ return nir_fmul(build, src0, src1);
+ case 2:
+ return nir_fdot2(build, src0, src1);
+ case 3:
+ return nir_fdot3(build, src0, src1);
+ case 4:
+ return nir_fdot4(build, src0, src1);
+ case 5:
+ return nir_fdot5(build, src0, src1);
+ case 8:
+ return nir_fdot8(build, src0, src1);
+ case 16:
+ return nir_fdot16(build, src0, src1);
default:
unreachable("bad component size");
}
@@ -630,58 +630,72 @@ nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
return NULL;
}
-static inline nir_ssa_def *
-nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+static inline nir_def *
+nir_ball_iequal(nir_builder *b, nir_def *src0, nir_def *src1)
{
switch (src0->num_components) {
- case 1: return nir_ieq(b, src0, src1);
- case 2: return nir_ball_iequal2(b, src0, src1);
- case 3: return nir_ball_iequal3(b, src0, src1);
- case 4: return nir_ball_iequal4(b, src0, src1);
- case 5: return nir_ball_iequal5(b, src0, src1);
- case 8: return nir_ball_iequal8(b, src0, src1);
- case 16: return nir_ball_iequal16(b, src0, src1);
+ case 1:
+ return nir_ieq(b, src0, src1);
+ case 2:
+ return nir_ball_iequal2(b, src0, src1);
+ case 3:
+ return nir_ball_iequal3(b, src0, src1);
+ case 4:
+ return nir_ball_iequal4(b, src0, src1);
+ case 5:
+ return nir_ball_iequal5(b, src0, src1);
+ case 8:
+ return nir_ball_iequal8(b, src0, src1);
+ case 16:
+ return nir_ball_iequal16(b, src0, src1);
default:
unreachable("bad component size");
}
}
-static inline nir_ssa_def *
-nir_ball(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_ball(nir_builder *b, nir_def *src)
{
return nir_ball_iequal(b, src, nir_imm_true(b));
}
-static inline nir_ssa_def *
-nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+static inline nir_def *
+nir_bany_inequal(nir_builder *b, nir_def *src0, nir_def *src1)
{
switch (src0->num_components) {
- case 1: return nir_ine(b, src0, src1);
- case 2: return nir_bany_inequal2(b, src0, src1);
- case 3: return nir_bany_inequal3(b, src0, src1);
- case 4: return nir_bany_inequal4(b, src0, src1);
- case 5: return nir_bany_inequal5(b, src0, src1);
- case 8: return nir_bany_inequal8(b, src0, src1);
- case 16: return nir_bany_inequal16(b, src0, src1);
+ case 1:
+ return nir_ine(b, src0, src1);
+ case 2:
+ return nir_bany_inequal2(b, src0, src1);
+ case 3:
+ return nir_bany_inequal3(b, src0, src1);
+ case 4:
+ return nir_bany_inequal4(b, src0, src1);
+ case 5:
+ return nir_bany_inequal5(b, src0, src1);
+ case 8:
+ return nir_bany_inequal8(b, src0, src1);
+ case 16:
+ return nir_bany_inequal16(b, src0, src1);
default:
unreachable("bad component size");
}
}
-static inline nir_ssa_def *
-nir_bany(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_bany(nir_builder *b, nir_def *src)
{
return nir_bany_inequal(b, src, nir_imm_false(b));
}
-static inline nir_ssa_def *
-nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
+static inline nir_def *
+nir_channel(nir_builder *b, nir_def *def, unsigned c)
{
return nir_swizzle(b, def, &c, 1);
}
-static inline nir_ssa_def *
-nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
+static inline nir_def *
+nir_channels(nir_builder *b, nir_def *def, nir_component_mask_t mask)
{
unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
@@ -694,30 +708,30 @@ nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
return nir_swizzle(b, def, swizzle, num_channels);
}
-static inline nir_ssa_def *
-_nir_select_from_array_helper(nir_builder *b, nir_ssa_def **arr,
- nir_ssa_def *idx,
+static inline nir_def *
+_nir_select_from_array_helper(nir_builder *b, nir_def **arr,
+ nir_def *idx,
unsigned start, unsigned end)
{
if (start == end - 1) {
return arr[start];
} else {
unsigned mid = start + (end - start) / 2;
- return nir_bcsel(b, nir_ilt(b, idx, nir_imm_intN_t(b, mid, idx->bit_size)),
+ return nir_bcsel(b, nir_ilt_imm(b, idx, mid),
_nir_select_from_array_helper(b, arr, idx, start, mid),
_nir_select_from_array_helper(b, arr, idx, mid, end));
}
}
-static inline nir_ssa_def *
-nir_select_from_ssa_def_array(nir_builder *b, nir_ssa_def **arr,
- unsigned arr_len, nir_ssa_def *idx)
+static inline nir_def *
+nir_select_from_ssa_def_array(nir_builder *b, nir_def **arr,
+ unsigned arr_len, nir_def *idx)
{
return _nir_select_from_array_helper(b, arr, idx, 0, arr_len);
}
-static inline nir_ssa_def *
-nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
+static inline nir_def *
+nir_vector_extract(nir_builder *b, nir_def *vec, nir_def *c)
{
nir_src c_src = nir_src_for_ssa(c);
if (nir_src_is_const(c_src)) {
@@ -725,9 +739,9 @@ nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
if (c_const < vec->num_components)
return nir_channel(b, vec, c_const);
else
- return nir_ssa_undef(b, 1, vec->bit_size);
+ return nir_undef(b, 1, vec->bit_size);
} else {
- nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+ nir_def *comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < vec->num_components; i++)
comps[i] = nir_channel(b, vec, i);
return nir_select_from_ssa_def_array(b, comps, vec->num_components, c);
@@ -735,9 +749,9 @@ nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
}
/** Replaces the component of `vec` specified by `c` with `scalar` */
-static inline nir_ssa_def *
-nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
- nir_ssa_def *scalar, unsigned c)
+static inline nir_def *
+nir_vector_insert_imm(nir_builder *b, nir_def *vec,
+ nir_def *scalar, unsigned c)
{
assert(scalar->num_components == 1);
assert(c < vec->num_components);
@@ -759,9 +773,9 @@ nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
}
/** Replaces the component of `vec` specified by `c` with `scalar` */
-static inline nir_ssa_def *
-nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
- nir_ssa_def *c)
+static inline nir_def *
+nir_vector_insert(nir_builder *b, nir_def *vec, nir_def *scalar,
+ nir_def *c)
{
assert(scalar->num_components == 1);
assert(c->num_components == 1);
@@ -777,7 +791,7 @@ nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
- nir_ssa_def *per_comp_idx =
+ nir_def *per_comp_idx =
nir_build_imm(b, vec->num_components,
c->bit_size, per_comp_idx_const);
@@ -789,38 +803,21 @@ nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
}
}
-static inline nir_ssa_def *
-nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
+static inline nir_def *
+nir_replicate(nir_builder *b, nir_def *scalar, unsigned num_components)
{
- if (x->bit_size == dest_bit_size)
- return x;
-
- switch (dest_bit_size) {
- case 64: return nir_i2i64(build, x);
- case 32: return nir_i2i32(build, x);
- case 16: return nir_i2i16(build, x);
- case 8: return nir_i2i8(build, x);
- default: unreachable("Invalid bit size");
- }
-}
+ assert(scalar->num_components == 1);
+ assert(num_components <= NIR_MAX_VEC_COMPONENTS);
-static inline nir_ssa_def *
-nir_u2u(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
-{
- if (x->bit_size == dest_bit_size)
- return x;
+ nir_def *copies[NIR_MAX_VEC_COMPONENTS] = { NULL };
+ for (unsigned i = 0; i < num_components; ++i)
+ copies[i] = scalar;
- switch (dest_bit_size) {
- case 64: return nir_u2u64(build, x);
- case 32: return nir_u2u32(build, x);
- case 16: return nir_u2u16(build, x);
- case 8: return nir_u2u8(build, x);
- default: unreachable("Invalid bit size");
- }
+ return nir_vec(b, copies, num_components);
}
-static inline nir_ssa_def *
-nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_iadd_imm(nir_builder *build, nir_def *x, uint64_t y)
{
assert(x->bit_size <= 64);
y &= BITFIELD64_MASK(x->bit_size);
@@ -832,38 +829,44 @@ nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
}
}
-static inline nir_ssa_def *
-nir_iadd_imm_nuw(nir_builder *b, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_iadd_imm_nuw(nir_builder *b, nir_def *x, uint64_t y)
{
- nir_ssa_def *d = nir_iadd_imm(b, x, y);
+ nir_def *d = nir_iadd_imm(b, x, y);
if (d != x && d->parent_instr->type == nir_instr_type_alu)
nir_instr_as_alu(d->parent_instr)->no_unsigned_wrap = true;
return d;
}
-static inline nir_ssa_def *
-nir_iadd_nuw(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_iadd_nuw(nir_builder *b, nir_def *x, nir_def *y)
{
- nir_ssa_def *d = nir_iadd(b, x, y);
+ nir_def *d = nir_iadd(b, x, y);
nir_instr_as_alu(d->parent_instr)->no_unsigned_wrap = true;
return d;
}
-static inline nir_ssa_def *
-nir_ieq_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_fgt_imm(nir_builder *build, nir_def *src1, double src2)
+{
+ return nir_flt(build, nir_imm_floatN_t(build, src2, src1->bit_size), src1);
+}
+
+static inline nir_def *
+nir_fle_imm(nir_builder *build, nir_def *src1, double src2)
{
- return nir_ieq(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ return nir_fge(build, nir_imm_floatN_t(build, src2, src1->bit_size), src1);
}
/* Use nir_iadd(x, -y) for reversing parameter ordering */
-static inline nir_ssa_def *
-nir_isub_imm(nir_builder *build, uint64_t y, nir_ssa_def *x)
+static inline nir_def *
+nir_isub_imm(nir_builder *build, uint64_t y, nir_def *x)
{
return nir_isub(build, nir_imm_intN_t(build, y, x->bit_size), x);
}
-static inline nir_ssa_def *
-_nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
+static inline nir_def *
+_nir_mul_imm(nir_builder *build, nir_def *x, uint64_t y, bool amul)
{
assert(x->bit_size <= 64);
y &= BITFIELD64_MASK(x->bit_size);
@@ -872,7 +875,8 @@ _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
return nir_imm_intN_t(build, 0, x->bit_size);
} else if (y == 1) {
return x;
- } else if (!build->shader->options->lower_bitops &&
+ } else if ((!build->shader->options ||
+ !build->shader->options->lower_bitops) &&
util_is_power_of_two_or_zero64(y)) {
return nir_ishl(build, x, nir_imm_int(build, ffsll(y) - 1));
} else if (amul) {
@@ -882,32 +886,44 @@ _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
}
}
-static inline nir_ssa_def *
-nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_imul_imm(nir_builder *build, nir_def *x, uint64_t y)
{
return _nir_mul_imm(build, x, y, false);
}
-static inline nir_ssa_def *
-nir_amul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_amul_imm(nir_builder *build, nir_def *x, uint64_t y)
{
return _nir_mul_imm(build, x, y, true);
}
-static inline nir_ssa_def *
-nir_fadd_imm(nir_builder *build, nir_ssa_def *x, double y)
+static inline nir_def *
+nir_fadd_imm(nir_builder *build, nir_def *x, double y)
{
return nir_fadd(build, x, nir_imm_floatN_t(build, y, x->bit_size));
}
-static inline nir_ssa_def *
-nir_fmul_imm(nir_builder *build, nir_ssa_def *x, double y)
+static inline nir_def *
+nir_fsub_imm(nir_builder *build, double x, nir_def *y)
+{
+ return nir_fsub(build, nir_imm_floatN_t(build, x, y->bit_size), y);
+}
+
+static inline nir_def *
+nir_fmul_imm(nir_builder *build, nir_def *x, double y)
{
return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
}
-static inline nir_ssa_def *
-nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_fdiv_imm(nir_builder *build, nir_def *x, double y)
+{
+ return nir_fdiv(build, x, nir_imm_floatN_t(build, y, x->bit_size));
+}
+
+static inline nir_def *
+nir_iand_imm(nir_builder *build, nir_def *x, uint64_t y)
{
assert(x->bit_size <= 64);
y &= BITFIELD64_MASK(x->bit_size);
@@ -921,8 +937,40 @@ nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
}
}
-static inline nir_ssa_def *
-nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+static inline nir_def *
+nir_test_mask(nir_builder *build, nir_def *x, uint64_t mask)
+{
+ assert(mask <= BITFIELD64_MASK(x->bit_size));
+ return nir_ine_imm(build, nir_iand_imm(build, x, mask), 0);
+}
+
+static inline nir_def *
+nir_ior_imm(nir_builder *build, nir_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 0) {
+ return x;
+ } else if (y == BITFIELD64_MASK(x->bit_size)) {
+ return nir_imm_intN_t(build, y, x->bit_size);
+ } else
+ return nir_ior(build, x, nir_imm_intN_t(build, y, x->bit_size));
+}
+
+static inline nir_def *
+nir_ishl_imm(nir_builder *build, nir_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ assert(y < x->bit_size);
+ return nir_ishl(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_def *
+nir_ishr_imm(nir_builder *build, nir_def *x, uint32_t y)
{
if (y == 0) {
return x;
@@ -931,8 +979,8 @@ nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
}
}
-static inline nir_ssa_def *
-nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+static inline nir_def *
+nir_ushr_imm(nir_builder *build, nir_def *x, uint32_t y)
{
if (y == 0) {
return x;
@@ -941,88 +989,145 @@ nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
}
}
-static inline nir_ssa_def *
-nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_imod_imm(nir_builder *build, nir_def *x, uint64_t y)
+{
+ return nir_imod(build, x, nir_imm_intN_t(build, y, x->bit_size));
+}
+
+static inline nir_def *
+nir_udiv_imm(nir_builder *build, nir_def *x, uint64_t y)
{
assert(x->bit_size <= 64);
y &= BITFIELD64_MASK(x->bit_size);
if (y == 1) {
return x;
- } else if (util_is_power_of_two_nonzero(y)) {
+ } else if (util_is_power_of_two_nonzero64(y)) {
return nir_ushr_imm(build, x, ffsll(y) - 1);
} else {
return nir_udiv(build, x, nir_imm_intN_t(build, y, x->bit_size));
}
}
-static inline nir_ssa_def *
+static inline nir_def *
+nir_umod_imm(nir_builder *build, nir_def *x, uint64_t y)
+{
+ assert(y > 0 && y <= u_uintN_max(x->bit_size));
+
+ if (util_is_power_of_two_nonzero64(y)) {
+ return nir_iand_imm(build, x, y - 1);
+ } else {
+ return nir_umod(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
+static inline nir_def *
+nir_ibfe_imm(nir_builder *build, nir_def *x, uint32_t offset, uint32_t size)
+{
+ return nir_ibfe(build, x, nir_imm_int(build, offset), nir_imm_int(build, size));
+}
+
+static inline nir_def *
+nir_ubfe_imm(nir_builder *build, nir_def *x, uint32_t offset, uint32_t size)
+{
+ return nir_ubfe(build, x, nir_imm_int(build, offset), nir_imm_int(build, size));
+}
+
+static inline nir_def *
+nir_ubitfield_extract_imm(nir_builder *build, nir_def *x, uint32_t offset, uint32_t size)
+{
+ return nir_ubitfield_extract(build, x, nir_imm_int(build, offset), nir_imm_int(build, size));
+}
+
+static inline nir_def *
+nir_extract_u8_imm(nir_builder *b, nir_def *a, unsigned i)
+{
+ return nir_extract_u8(b, a, nir_imm_intN_t(b, i, a->bit_size));
+}
+
+static inline nir_def *
+nir_extract_i8_imm(nir_builder *b, nir_def *a, unsigned i)
+{
+ return nir_extract_i8(b, a, nir_imm_intN_t(b, i, a->bit_size));
+}
+
+static inline nir_def *
nir_fclamp(nir_builder *b,
- nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+ nir_def *x, nir_def *min_val, nir_def *max_val)
{
return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_iclamp(nir_builder *b,
- nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+ nir_def *x, nir_def *min_val, nir_def *max_val)
{
return nir_imin(b, nir_imax(b, x, min_val), max_val);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_uclamp(nir_builder *b,
- nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+ nir_def *x, nir_def *min_val, nir_def *max_val)
{
return nir_umin(b, nir_umax(b, x, min_val), max_val);
}
-static inline nir_ssa_def *
-nir_ffma_imm12(nir_builder *build, nir_ssa_def *src0, double src1, double src2)
+static inline nir_def *
+nir_ffma_imm12(nir_builder *build, nir_def *src0, double src1, double src2)
{
- if (build->shader->options->avoid_ternary_with_two_constants)
+ if (build->shader->options &&
+ build->shader->options->avoid_ternary_with_two_constants)
return nir_fadd_imm(build, nir_fmul_imm(build, src0, src1), src2);
else
return nir_ffma(build, src0, nir_imm_floatN_t(build, src1, src0->bit_size),
- nir_imm_floatN_t(build, src2, src0->bit_size));
+ nir_imm_floatN_t(build, src2, src0->bit_size));
}
-static inline nir_ssa_def *
-nir_ffma_imm1(nir_builder *build, nir_ssa_def *src0, double src1, nir_ssa_def *src2)
+static inline nir_def *
+nir_ffma_imm1(nir_builder *build, nir_def *src0, double src1, nir_def *src2)
{
return nir_ffma(build, src0, nir_imm_floatN_t(build, src1, src0->bit_size), src2);
}
-static inline nir_ssa_def *
-nir_ffma_imm2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1, double src2)
+static inline nir_def *
+nir_ffma_imm2(nir_builder *build, nir_def *src0, nir_def *src1, double src2)
{
return nir_ffma(build, src0, src1, nir_imm_floatN_t(build, src2, src0->bit_size));
}
-static inline nir_ssa_def *
-nir_a_minus_bc(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1,
- nir_ssa_def *src2)
+static inline nir_def *
+nir_a_minus_bc(nir_builder *build, nir_def *src0, nir_def *src1,
+ nir_def *src2)
{
return nir_ffma(build, nir_fneg(build, src1), src2, src0);
}
-static inline nir_ssa_def *
-nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
+static inline nir_def *
+nir_pack_bits(nir_builder *b, nir_def *src, unsigned dest_bit_size)
{
assert(src->num_components * src->bit_size == dest_bit_size);
switch (dest_bit_size) {
case 64:
switch (src->bit_size) {
- case 32: return nir_pack_64_2x32(b, src);
- case 16: return nir_pack_64_4x16(b, src);
- default: break;
+ case 32:
+ return nir_pack_64_2x32(b, src);
+ case 16:
+ return nir_pack_64_4x16(b, src);
+ default:
+ break;
}
break;
case 32:
- if (src->bit_size == 16)
- return nir_pack_32_2x16(b, src);
+ switch (src->bit_size) {
+ case 32: return src;
+ case 16: return nir_pack_32_2x16(b, src);
+ case 8: return nir_pack_32_4x8(b, src);
+ default: break;
+ }
+
break;
default:
@@ -1030,35 +1135,43 @@ nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
}
/* If we got here, we have no dedicated unpack opcode. */
- nir_ssa_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
+ nir_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
for (unsigned i = 0; i < src->num_components; i++) {
- nir_ssa_def *val = nir_u2u(b, nir_channel(b, src, i), dest_bit_size);
+ nir_def *val = nir_u2uN(b, nir_channel(b, src, i), dest_bit_size);
val = nir_ishl(b, val, nir_imm_int(b, i * src->bit_size));
dest = nir_ior(b, dest, val);
}
return dest;
}
-static inline nir_ssa_def *
-nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
+static inline nir_def *
+nir_unpack_bits(nir_builder *b, nir_def *src, unsigned dest_bit_size)
{
assert(src->num_components == 1);
- assert(src->bit_size > dest_bit_size);
+ assert(src->bit_size >= dest_bit_size);
const unsigned dest_num_components = src->bit_size / dest_bit_size;
assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
switch (src->bit_size) {
case 64:
switch (dest_bit_size) {
- case 32: return nir_unpack_64_2x32(b, src);
- case 16: return nir_unpack_64_4x16(b, src);
- default: break;
+ case 32:
+ return nir_unpack_64_2x32(b, src);
+ case 16:
+ return nir_unpack_64_4x16(b, src);
+ default:
+ break;
}
break;
case 32:
- if (dest_bit_size == 16)
- return nir_unpack_32_2x16(b, src);
+ switch (dest_bit_size) {
+ case 32: return src;
+ case 16: return nir_unpack_32_2x16(b, src);
+ case 8: return nir_unpack_32_4x8(b, src);
+ default: break;
+ }
+
break;
default:
@@ -1066,10 +1179,10 @@ nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
}
/* If we got here, we have no dedicated unpack opcode. */
- nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
+ nir_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < dest_num_components; i++) {
- nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
- dest_comps[i] = nir_u2u(b, val, dest_bit_size);
+ nir_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
+ dest_comps[i] = nir_u2uN(b, val, dest_bit_size);
}
return nir_vec(b, dest_comps, dest_num_components);
}
@@ -1087,8 +1200,8 @@ nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
* values are involved because that would require pack/unpack to/from a vec8
* which NIR currently does not support.
*/
-static inline nir_ssa_def *
-nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
+static inline nir_def *
+nir_extract_bits(nir_builder *b, nir_def **srcs, unsigned num_srcs,
unsigned first_bit,
unsigned dest_num_components, unsigned dest_bit_size)
{
@@ -1104,7 +1217,7 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
/* We don't want to have to deal with 1-bit values */
assert(common_bit_size >= 8);
- nir_ssa_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
+ nir_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
assert(num_bits / common_bit_size <= ARRAY_SIZE(common_comps));
/* First, unpack to the common bit size and select the components from the
@@ -1117,7 +1230,7 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
const unsigned bit = first_bit + (i * common_bit_size);
while (bit >= src_end_bit) {
src_idx++;
- assert(src_idx < (int) num_srcs);
+ assert(src_idx < (int)num_srcs);
src_start_bit = src_end_bit;
src_end_bit += srcs[src_idx]->bit_size *
srcs[src_idx]->num_components;
@@ -1127,12 +1240,11 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
const unsigned rel_bit = bit - src_start_bit;
const unsigned src_bit_size = srcs[src_idx]->bit_size;
- nir_ssa_def *comp = nir_channel(b, srcs[src_idx],
- rel_bit / src_bit_size);
+ nir_def *comp = nir_channel(b, srcs[src_idx],
+ rel_bit / src_bit_size);
if (srcs[src_idx]->bit_size > common_bit_size) {
- nir_ssa_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
- comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) /
- common_bit_size);
+ nir_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
+ comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) / common_bit_size);
}
common_comps[i] = comp;
}
@@ -1140,10 +1252,10 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
/* Now, re-pack the destination if we have to */
if (dest_bit_size > common_bit_size) {
unsigned common_per_dest = dest_bit_size / common_bit_size;
- nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
+ nir_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < dest_num_components; i++) {
- nir_ssa_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
- common_per_dest);
+ nir_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
+ common_per_dest);
dest_comps[i] = nir_pack_bits(b, unpacked, dest_bit_size);
}
return nir_vec(b, dest_comps, dest_num_components);
@@ -1153,8 +1265,8 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
}
}
-static inline nir_ssa_def *
-nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
+static inline nir_def *
+nir_bitcast_vector(nir_builder *b, nir_def *src, unsigned dest_bit_size)
{
assert((src->bit_size * src->num_components) % dest_bit_size == 0);
const unsigned dest_num_components =
@@ -1164,26 +1276,36 @@ nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
return nir_extract_bits(b, &src, 1, 0, dest_num_components, dest_bit_size);
}
+static inline nir_def *
+nir_trim_vector(nir_builder *b, nir_def *src, unsigned num_components)
+{
+ assert(src->num_components >= num_components);
+ if (src->num_components == num_components)
+ return src;
+
+ return nir_channels(b, src, nir_component_mask(num_components));
+}
+
/**
* Pad a value to N components with undefs of matching bit size.
* If the value already contains >= num_components, it is returned without change.
*/
-static inline nir_ssa_def *
-nir_pad_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
+static inline nir_def *
+nir_pad_vector(nir_builder *b, nir_def *src, unsigned num_components)
{
assert(src->num_components <= num_components);
if (src->num_components == num_components)
return src;
- nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS];
- nir_ssa_def *undef = nir_ssa_undef(b, 1, src->bit_size);
+ nir_scalar components[NIR_MAX_VEC_COMPONENTS];
+ nir_scalar undef = nir_get_scalar(nir_undef(b, 1, src->bit_size), 0);
unsigned i = 0;
for (; i < src->num_components; i++)
- components[i] = nir_channel(b, src, i);
+ components[i] = nir_get_scalar(src, i);
for (; i < num_components; i++)
components[i] = undef;
- return nir_vec(b, components, num_components);
+ return nir_vec_scalars(b, components, num_components);
}
/**
@@ -1191,69 +1313,52 @@ nir_pad_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
* bit size. If the value already contains >= num_components, it is returned
* without change.
*/
-static inline nir_ssa_def *
-nir_pad_vector_imm_int(nir_builder *b, nir_ssa_def *src, uint64_t imm_val,
+static inline nir_def *
+nir_pad_vector_imm_int(nir_builder *b, nir_def *src, uint64_t imm_val,
unsigned num_components)
{
assert(src->num_components <= num_components);
if (src->num_components == num_components)
return src;
- nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS];
- nir_ssa_def *imm = nir_imm_intN_t(b, imm_val, src->bit_size);
+ nir_scalar components[NIR_MAX_VEC_COMPONENTS];
+ nir_scalar imm = nir_get_scalar(nir_imm_intN_t(b, imm_val, src->bit_size), 0);
unsigned i = 0;
for (; i < src->num_components; i++)
- components[i] = nir_channel(b, src, i);
+ components[i] = nir_get_scalar(src, i);
for (; i < num_components; i++)
components[i] = imm;
- return nir_vec(b, components, num_components);
+ return nir_vec_scalars(b, components, num_components);
}
/**
* Pad a value to 4 components with undefs of matching bit size.
* If the value already contains >= 4 components, it is returned without change.
*/
-static inline nir_ssa_def *
-nir_pad_vec4(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_pad_vec4(nir_builder *b, nir_def *src)
{
return nir_pad_vector(b, src, 4);
}
/**
- * Turns a nir_src into a nir_ssa_def * so it can be passed to
- * nir_build_alu()-based builder calls.
- *
- * See nir_ssa_for_alu_src() for alu instructions.
+ * Resizes a vector by either trimming off components or adding undef
+ * components, as needed. Only use this helper if it's actually what you
+ * need. Prefer nir_pad_vector() or nir_trim_vector() instead if you know a
+ * priori which direction you're resizing.
*/
-static inline nir_ssa_def *
-nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
+static inline nir_def *
+nir_resize_vector(nir_builder *b, nir_def *src, unsigned num_components)
{
- if (src.is_ssa && src.ssa->num_components == num_components)
- return src.ssa;
-
- nir_alu_src alu = { NIR_SRC_INIT };
- alu.src = src;
- for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
- alu.swizzle[j] = j;
-
- return nir_mov_alu(build, alu, num_components);
+ if (src->num_components < num_components)
+ return nir_pad_vector(b, src, num_components);
+ else
+ return nir_trim_vector(b, src, num_components);
}
-/**
- * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
- * nir_alu_src's swizzle.
- */
-static inline nir_ssa_def *
-nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
-{
- if (nir_alu_src_is_trivial_ssa(instr, srcn))
- return instr->src[srcn].src.ssa;
-
- nir_alu_src *src = &instr->src[srcn];
- unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
- return nir_mov_alu(build, *src, num_components);
-}
+nir_def *
+nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn);
static inline unsigned
nir_get_ptr_bitsize(nir_shader *shader)
@@ -1273,8 +1378,8 @@ nir_build_deref_var(nir_builder *build, nir_variable *var)
deref->type = var->type;
deref->var = var;
- nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
- nir_get_ptr_bitsize(build->shader), NULL);
+ nir_def_init(&deref->instr, &deref->def, 1,
+ nir_get_ptr_bitsize(build->shader));
nir_builder_instr_insert(build, &deref->instr);
@@ -1283,25 +1388,24 @@ nir_build_deref_var(nir_builder *build, nir_variable *var)
static inline nir_deref_instr *
nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
- nir_ssa_def *index)
+ nir_def *index)
{
assert(glsl_type_is_array(parent->type) ||
glsl_type_is_matrix(parent->type) ||
glsl_type_is_vector(parent->type));
- assert(index->bit_size == parent->dest.ssa.bit_size);
+ assert(index->bit_size == parent->def.bit_size);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_array);
deref->modes = parent->modes;
deref->type = glsl_get_array_element(parent->type);
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->arr.index = nir_src_for_ssa(index);
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@@ -1312,34 +1416,32 @@ static inline nir_deref_instr *
nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
int64_t index)
{
- assert(parent->dest.is_ssa);
- nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
- parent->dest.ssa.bit_size);
+ nir_def *idx_ssa = nir_imm_intN_t(build, index,
+ parent->def.bit_size);
return nir_build_deref_array(build, parent, idx_ssa);
}
static inline nir_deref_instr *
nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
- nir_ssa_def *index)
+ nir_def *index)
{
assert(parent->deref_type == nir_deref_type_array ||
parent->deref_type == nir_deref_type_ptr_as_array ||
parent->deref_type == nir_deref_type_cast);
- assert(index->bit_size == parent->dest.ssa.bit_size);
+ assert(index->bit_size == parent->def.bit_size);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
deref->modes = parent->modes;
deref->type = parent->type;
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->arr.index = nir_src_for_ssa(index);
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@@ -1357,11 +1459,10 @@ nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
deref->modes = parent->modes;
deref->type = glsl_get_array_element(parent->type);
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@@ -1379,12 +1480,11 @@ nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
deref->modes = parent->modes;
deref->type = glsl_get_struct_field(parent->type, index);
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->strct.index = index;
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@@ -1392,9 +1492,12 @@ nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
}
static inline nir_deref_instr *
-nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
- nir_variable_mode modes, const struct glsl_type *type,
- unsigned ptr_stride)
+nir_build_deref_cast_with_alignment(nir_builder *build, nir_def *parent,
+ nir_variable_mode modes,
+ const struct glsl_type *type,
+ unsigned ptr_stride,
+ unsigned align_mul,
+ unsigned align_offset)
{
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_cast);
@@ -1402,10 +1505,12 @@ nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
deref->modes = modes;
deref->type = type;
deref->parent = nir_src_for_ssa(parent);
+ deref->cast.align_mul = align_mul;
+ deref->cast.align_offset = align_offset;
deref->cast.ptr_stride = ptr_stride;
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->num_components, parent->bit_size, NULL);
+ nir_def_init(&deref->instr, &deref->def, parent->num_components,
+ parent->bit_size);
nir_builder_instr_insert(build, &deref->instr);
@@ -1413,6 +1518,15 @@ nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
}
static inline nir_deref_instr *
+nir_build_deref_cast(nir_builder *build, nir_def *parent,
+ nir_variable_mode modes, const struct glsl_type *type,
+ unsigned ptr_stride)
+{
+ return nir_build_deref_cast_with_alignment(build, parent, modes, type,
+ ptr_stride, 0, 0);
+}
+
+static inline nir_deref_instr *
nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent,
uint32_t align_mul, uint32_t align_offset)
{
@@ -1421,14 +1535,13 @@ nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent,
deref->modes = parent->modes;
deref->type = parent->type;
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->cast.ptr_stride = nir_deref_instr_array_stride(deref);
deref->cast.align_mul = align_mul;
deref->cast.align_offset = align_offset;
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@@ -1446,8 +1559,7 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
nir_deref_instr *leader)
{
/* If the derefs would have the same parent, don't make a new one */
- assert(leader->parent.is_ssa);
- if (leader->parent.ssa == &parent->dest.ssa)
+ if (leader->parent.ssa == &parent->def)
return leader;
UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
@@ -1467,9 +1579,8 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
glsl_get_length(leader_parent->type));
if (leader->deref_type == nir_deref_type_array) {
- assert(leader->arr.index.is_ssa);
- nir_ssa_def *index = nir_i2i(b, leader->arr.index.ssa,
- parent->dest.ssa.bit_size);
+ nir_def *index = nir_i2iN(b, leader->arr.index.ssa,
+ parent->def.bit_size);
return nir_build_deref_array(b, parent, index);
} else {
return nir_build_deref_array_wildcard(b, parent);
@@ -1482,42 +1593,40 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
return nir_build_deref_struct(b, parent, leader->strct.index);
+ case nir_deref_type_cast:
+ return nir_build_deref_cast_with_alignment(b, &parent->def,
+ leader->modes,
+ leader->type,
+ leader->cast.ptr_stride,
+ leader->cast.align_mul,
+ leader->cast.align_offset);
+
+ case nir_deref_type_ptr_as_array: {
+ assert(parent->deref_type == nir_deref_type_array ||
+ parent->deref_type == nir_deref_type_ptr_as_array ||
+ parent->deref_type == nir_deref_type_cast);
+ nir_def *index = nir_i2iN(b, leader->arr.index.ssa,
+ parent->def.bit_size);
+ return nir_build_deref_ptr_as_array(b, parent, index);
+ }
+
default:
unreachable("Invalid deref instruction type");
}
+ return NULL;
}
-static inline nir_ssa_def *
-nir_load_reg(nir_builder *build, nir_register *reg)
-{
- return nir_ssa_for_src(build, nir_src_for_reg(reg), reg->num_components);
-}
-
-static inline void
-nir_store_reg(nir_builder *build, nir_register *reg,
- nir_ssa_def *def, nir_component_mask_t write_mask)
-{
- assert(reg->num_components == def->num_components);
- assert(reg->bit_size == def->bit_size);
-
- nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
- mov->src[0].src = nir_src_for_ssa(def);
- mov->dest.dest = nir_dest_for_reg(reg);
- mov->dest.write_mask = write_mask & BITFIELD_MASK(reg->num_components);
- nir_builder_instr_insert(build, &mov->instr);
-}
-
-static inline nir_ssa_def *
+static inline nir_def *
nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
{
return nir_build_load_deref(build, glsl_get_vector_elements(deref->type),
- glsl_get_bit_size(deref->type), &deref->dest.ssa,
+ glsl_get_bit_size(deref->type), &deref->def,
access);
}
#undef nir_load_deref
-static inline nir_ssa_def *
+static inline nir_def *
nir_load_deref(nir_builder *build, nir_deref_instr *deref)
{
return nir_load_deref_with_access(build, deref, (enum gl_access_qualifier)0);
@@ -1525,29 +1634,60 @@ nir_load_deref(nir_builder *build, nir_deref_instr *deref)
static inline void
nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
- nir_ssa_def *value, unsigned writemask,
+ nir_def *value, unsigned writemask,
enum gl_access_qualifier access)
{
writemask &= (1u << value->num_components) - 1u;
- nir_build_store_deref(build, &deref->dest.ssa, value, writemask, access);
+ nir_build_store_deref(build, &deref->def, value, writemask, access);
}
#undef nir_store_deref
static inline void
nir_store_deref(nir_builder *build, nir_deref_instr *deref,
- nir_ssa_def *value, unsigned writemask)
+ nir_def *value, unsigned writemask)
{
nir_store_deref_with_access(build, deref, value, writemask,
(enum gl_access_qualifier)0);
}
static inline void
+nir_build_write_masked_store(nir_builder *b, nir_deref_instr *vec_deref,
+ nir_def *value, unsigned component)
+{
+ assert(value->num_components == 1);
+ unsigned num_components = glsl_get_components(vec_deref->type);
+ assert(num_components > 1 && num_components <= NIR_MAX_VEC_COMPONENTS);
+
+ nir_def *vec =
+ nir_vector_insert_imm(b, nir_undef(b, num_components, value->bit_size),
+ value, component);
+ nir_store_deref(b, vec_deref, vec, (1u << component));
+}
+
+static inline void
+nir_build_write_masked_stores(nir_builder *b, nir_deref_instr *vec_deref,
+ nir_def *value, nir_def *index,
+ unsigned start, unsigned end)
+{
+ if (start == end - 1) {
+ nir_build_write_masked_store(b, vec_deref, value, start);
+ } else {
+ unsigned mid = start + (end - start) / 2;
+ nir_push_if(b, nir_ilt_imm(b, index, mid));
+ nir_build_write_masked_stores(b, vec_deref, value, index, start, mid);
+ nir_push_else(b, NULL);
+ nir_build_write_masked_stores(b, vec_deref, value, index, mid, end);
+ nir_pop_if(b, NULL);
+ }
+}
+
+static inline void
nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
nir_deref_instr *src,
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
- nir_build_copy_deref(build, &dest->dest.ssa, &src->dest.ssa, dest_access, src_access);
+ nir_build_copy_deref(build, &dest->def, &src->def, dest_access, src_access);
}
#undef nir_copy_deref
@@ -1555,38 +1695,38 @@ static inline void
nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
{
nir_copy_deref_with_access(build, dest, src,
- (enum gl_access_qualifier) 0,
- (enum gl_access_qualifier) 0);
+ (enum gl_access_qualifier)0,
+ (enum gl_access_qualifier)0);
}
static inline void
nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
- nir_deref_instr *src, nir_ssa_def *size,
+ nir_deref_instr *src, nir_def *size,
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
- nir_build_memcpy_deref(build, &dest->dest.ssa, &src->dest.ssa,
+ nir_build_memcpy_deref(build, &dest->def, &src->def,
size, dest_access, src_access);
}
#undef nir_memcpy_deref
static inline void
nir_memcpy_deref(nir_builder *build, nir_deref_instr *dest,
- nir_deref_instr *src, nir_ssa_def *size)
+ nir_deref_instr *src, nir_def *size)
{
nir_memcpy_deref_with_access(build, dest, src, size,
(enum gl_access_qualifier)0,
(enum gl_access_qualifier)0);
}
-static inline nir_ssa_def *
+static inline nir_def *
nir_load_var(nir_builder *build, nir_variable *var)
{
return nir_load_deref(build, nir_build_deref_var(build, var));
}
static inline void
-nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
+nir_store_var(nir_builder *build, nir_variable *var, nir_def *value,
unsigned writemask)
{
nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
@@ -1596,12 +1736,46 @@ static inline void
nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
{
nir_copy_deref(build, nir_build_deref_var(build, dest),
- nir_build_deref_var(build, src));
+ nir_build_deref_var(build, src));
+}
+
+static inline nir_def *
+nir_load_array_var(nir_builder *build, nir_variable *var, nir_def *index)
+{
+ nir_deref_instr *deref =
+ nir_build_deref_array(build, nir_build_deref_var(build, var), index);
+ return nir_load_deref(build, deref);
+}
+
+static inline nir_def *
+nir_load_array_var_imm(nir_builder *build, nir_variable *var, int64_t index)
+{
+ nir_deref_instr *deref =
+ nir_build_deref_array_imm(build, nir_build_deref_var(build, var), index);
+ return nir_load_deref(build, deref);
+}
+
+static inline void
+nir_store_array_var(nir_builder *build, nir_variable *var, nir_def *index,
+ nir_def *value, unsigned writemask)
+{
+ nir_deref_instr *deref =
+ nir_build_deref_array(build, nir_build_deref_var(build, var), index);
+ nir_store_deref(build, deref, value, writemask);
+}
+
+static inline void
+nir_store_array_var_imm(nir_builder *build, nir_variable *var, int64_t index,
+ nir_def *value, unsigned writemask)
+{
+ nir_deref_instr *deref =
+ nir_build_deref_array_imm(build, nir_build_deref_var(build, var), index);
+ nir_store_deref(build, deref, value, writemask);
}
#undef nir_load_global
-static inline nir_ssa_def *
-nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
+static inline nir_def *
+nir_load_global(nir_builder *build, nir_def *addr, unsigned align,
unsigned num_components, unsigned bit_size)
{
nir_intrinsic_instr *load =
@@ -1609,16 +1783,15 @@ nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
#undef nir_store_global
static inline void
-nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
- nir_ssa_def *value, nir_component_mask_t write_mask)
+nir_store_global(nir_builder *build, nir_def *addr, unsigned align,
+ nir_def *value, nir_component_mask_t write_mask)
{
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
@@ -1626,14 +1799,14 @@ nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
store->src[0] = nir_src_for_ssa(value);
store->src[1] = nir_src_for_ssa(addr);
nir_intrinsic_set_write_mask(store,
- write_mask & BITFIELD_MASK(value->num_components));
+ write_mask & BITFIELD_MASK(value->num_components));
nir_intrinsic_set_align(store, align, 0);
nir_builder_instr_insert(build, &store->instr);
}
#undef nir_load_global_constant
-static inline nir_ssa_def *
-nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
+static inline nir_def *
+nir_load_global_constant(nir_builder *build, nir_def *addr, unsigned align,
unsigned num_components, unsigned bit_size)
{
nir_intrinsic_instr *load =
@@ -1641,14 +1814,13 @@ nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
#undef nir_load_param
-static inline nir_ssa_def *
+static inline nir_def *
nir_load_param(nir_builder *build, uint32_t param_idx)
{
assert(param_idx < build->impl->function->num_params);
@@ -1656,86 +1828,198 @@ nir_load_param(nir_builder *build, uint32_t param_idx)
return nir_build_load_param(build, param->num_components, param->bit_size, param_idx);
}
-/**
- * This function takes an I/O intrinsic like load/store_input,
- * and emits a sequence that calculates the full offset of that instruction,
- * including a stride to the base and component offsets.
- */
-static inline nir_ssa_def *
-nir_build_calc_io_offset(nir_builder *b,
- nir_intrinsic_instr *intrin,
- nir_ssa_def *base_stride,
- unsigned component_stride)
-{
- /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */
- nir_ssa_def *base_op = nir_imul_imm(b, base_stride, nir_intrinsic_base(intrin));
-
- /* offset should be interpreted in relation to the base,
- * so the instruction effectively reads/writes another input/output
- * when it has an offset
- */
- nir_ssa_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
+#undef nir_decl_reg
+static inline nir_def *
+nir_decl_reg(nir_builder *b, unsigned num_components, unsigned bit_size,
+ unsigned num_array_elems)
+{
+ nir_intrinsic_instr *decl =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_decl_reg);
+ nir_intrinsic_set_num_components(decl, num_components);
+ nir_intrinsic_set_bit_size(decl, bit_size);
+ nir_intrinsic_set_num_array_elems(decl, num_array_elems);
+ nir_intrinsic_set_divergent(decl, true);
+ nir_def_init(&decl->instr, &decl->def, 1, 32);
- /* component is in bytes */
- unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
+ nir_builder_instr_insert_at_top(b, &decl->instr);
- return nir_iadd_imm_nuw(b, nir_iadd_nuw(b, base_op, offset_op), const_op);
+ return &decl->def;
}
-/* calculate a `(1 << value) - 1` in ssa without overflows */
-static inline nir_ssa_def *
-nir_mask(nir_builder *b, nir_ssa_def *bits, unsigned dst_bit_size)
+#undef nir_load_reg
+static inline nir_def *
+nir_load_reg(nir_builder *b, nir_def *reg)
{
- return nir_ushr(b, nir_imm_intN_t(b, -1, dst_bit_size),
- nir_isub_imm(b, dst_bit_size, nir_u2u32(b, bits)));
+ nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
+ unsigned num_components = nir_intrinsic_num_components(decl);
+ unsigned bit_size = nir_intrinsic_bit_size(decl);
+
+ nir_def *res = nir_build_load_reg(b, num_components, bit_size, reg);
+ res->divergent = nir_intrinsic_divergent(decl);
+
+ return res;
}
-static inline nir_ssa_def *
-nir_f2b(nir_builder *build, nir_ssa_def *f)
+#undef nir_store_reg
+static inline void
+nir_store_reg(nir_builder *b, nir_def *value, nir_def *reg)
{
- return nir_f2b1(build, f);
+ ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
+ ASSERTED unsigned num_components = nir_intrinsic_num_components(decl);
+ ASSERTED unsigned bit_size = nir_intrinsic_bit_size(decl);
+
+ assert(value->num_components == num_components);
+ assert(value->bit_size == bit_size);
+
+ nir_build_store_reg(b, value, reg);
}
-static inline nir_ssa_def *
-nir_i2b(nir_builder *build, nir_ssa_def *i)
+static inline nir_tex_src
+nir_tex_src_for_ssa(nir_tex_src_type src_type, nir_def *def)
{
- return nir_i2b1(build, i);
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(def);
+ src.src_type = src_type;
+ return src;
}
-static inline nir_ssa_def *
-nir_b2f(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
+/*
+ * Find a texture source, remove it, and return its nir_def. If the texture
+ * source does not exist, return NULL. This is useful for texture lowering pass
+ * that consume their input sources and produce a new lowered source.
+ */
+static inline nir_def *
+nir_steal_tex_src(nir_tex_instr *tex, nir_tex_src_type type_)
{
- switch (bit_size) {
- case 64: return nir_b2f64(build, b);
- case 32: return nir_b2f32(build, b);
- case 16: return nir_b2f16(build, b);
- default:
- unreachable("Invalid bit-size");
+ int idx = nir_tex_instr_src_index(tex, type_);
+ if (idx < 0)
+ return NULL;
+
+ nir_def *ssa = tex->src[idx].src.ssa;
+ nir_tex_instr_remove_src(tex, idx);
+ return ssa;
+}
+
+static inline nir_def *
+nir_tex_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
+ nir_def *coord)
+{
+ nir_tex_src srcs[] = { nir_tex_src_for_ssa(nir_tex_src_coord, coord) };
+
+ return nir_build_tex_deref_instr(b, nir_texop_tex, t, s,
+ ARRAY_SIZE(srcs), srcs);
+}
+
+static inline nir_def *
+nir_txl_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
+ nir_def *coord, nir_def *lod)
+{
+ nir_tex_src srcs[] = {
+ nir_tex_src_for_ssa(nir_tex_src_coord, coord),
+ nir_tex_src_for_ssa(nir_tex_src_lod, lod),
};
+
+ return nir_build_tex_deref_instr(b, nir_texop_txl, t, s,
+ ARRAY_SIZE(srcs), srcs);
}
-static inline nir_ssa_def *
-nir_b2i(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
+static inline nir_def *
+nir_txl_zero_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
+ nir_def *coord)
{
- switch (bit_size) {
- case 64: return nir_b2i64(build, b);
- case 32: return nir_b2i32(build, b);
- case 16: return nir_b2i16(build, b);
- case 8: return nir_b2i8(build, b);
+ return nir_txl_deref(b, t, s, coord, nir_imm_float(b, 0));
+}
+
+static inline bool
+nir_tex_type_has_lod(const struct glsl_type *tex_type)
+{
+ switch (glsl_get_sampler_dim(tex_type)) {
+ case GLSL_SAMPLER_DIM_1D:
+ case GLSL_SAMPLER_DIM_2D:
+ case GLSL_SAMPLER_DIM_3D:
+ case GLSL_SAMPLER_DIM_CUBE:
+ return true;
default:
- unreachable("Invalid bit-size");
+ return false;
+ }
+}
+
+static inline nir_def *
+nir_txf_deref(nir_builder *b, nir_deref_instr *t,
+ nir_def *coord, nir_def *lod)
+{
+ nir_tex_src srcs[2];
+ unsigned num_srcs = 0;
+
+ srcs[num_srcs++] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
+
+ if (lod == NULL && nir_tex_type_has_lod(t->type))
+ lod = nir_imm_int(b, 0);
+
+ if (lod != NULL)
+ srcs[num_srcs++] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
+
+ return nir_build_tex_deref_instr(b, nir_texop_txf, t, NULL,
+ num_srcs, srcs);
+}
+
+static inline nir_def *
+nir_txf_ms_deref(nir_builder *b, nir_deref_instr *t,
+ nir_def *coord, nir_def *ms_index)
+{
+ nir_tex_src srcs[] = {
+ nir_tex_src_for_ssa(nir_tex_src_coord, coord),
+ nir_tex_src_for_ssa(nir_tex_src_ms_index, ms_index),
};
+
+ return nir_build_tex_deref_instr(b, nir_texop_txf_ms, t, NULL,
+ ARRAY_SIZE(srcs), srcs);
+}
+
+static inline nir_def *
+nir_txs_deref(nir_builder *b, nir_deref_instr *t, nir_def *lod)
+{
+ nir_tex_src srcs[1];
+ unsigned num_srcs = 0;
+
+ if (lod == NULL && nir_tex_type_has_lod(t->type))
+ lod = nir_imm_int(b, 0);
+
+ if (lod != NULL)
+ srcs[num_srcs++] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
+
+ return nir_build_tex_deref_instr(b, nir_texop_txs, t, NULL,
+ num_srcs, srcs);
}
-static inline nir_ssa_def *
+
+static inline nir_def *
+nir_samples_identical_deref(nir_builder *b, nir_deref_instr *t,
+ nir_def *coord)
+{
+ nir_tex_src srcs[] = { nir_tex_src_for_ssa(nir_tex_src_coord, coord) };
+
+ return nir_build_tex_deref_instr(b, nir_texop_samples_identical, t, NULL,
+ ARRAY_SIZE(srcs), srcs);
+}
+
+/* calculate a `(1 << value) - 1` in ssa without overflows */
+static inline nir_def *
+nir_mask(nir_builder *b, nir_def *bits, unsigned dst_bit_size)
+{
+ return nir_ushr(b, nir_imm_intN_t(b, -1, dst_bit_size),
+ nir_isub_imm(b, dst_bit_size, nir_u2u32(b, bits)));
+}
+
+static inline nir_def *
nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
unsigned interp_mode)
{
unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
- nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
+ nir_def_init(&bary->instr, &bary->def, num_components, 32);
nir_intrinsic_set_interp_mode(bary, interp_mode);
nir_builder_instr_insert(build, &bary->instr);
- return &bary->dest.ssa;
+ return &bary->def;
}
static inline void
@@ -1756,127 +2040,61 @@ nir_goto(nir_builder *build, struct nir_block *target)
}
static inline void
-nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
+nir_goto_if(nir_builder *build, struct nir_block *target, nir_def *cond,
struct nir_block *else_target)
{
assert(!build->impl->structured);
nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto_if);
- jump->condition = cond;
+ jump->condition = nir_src_for_ssa(cond);
jump->target = target;
jump->else_target = else_target;
nir_builder_instr_insert(build, &jump->instr);
}
-static inline nir_ssa_def *
-nir_compare_func(nir_builder *b, enum compare_func func,
- nir_ssa_def *src0, nir_ssa_def *src1)
-{
- switch (func) {
- case COMPARE_FUNC_NEVER:
- return nir_imm_int(b, 0);
- case COMPARE_FUNC_ALWAYS:
- return nir_imm_int(b, ~0);
- case COMPARE_FUNC_EQUAL:
- return nir_feq(b, src0, src1);
- case COMPARE_FUNC_NOTEQUAL:
- return nir_fneu(b, src0, src1);
- case COMPARE_FUNC_GREATER:
- return nir_flt(b, src1, src0);
- case COMPARE_FUNC_GEQUAL:
- return nir_fge(b, src0, src1);
- case COMPARE_FUNC_LESS:
- return nir_flt(b, src0, src1);
- case COMPARE_FUNC_LEQUAL:
- return nir_fge(b, src1, src0);
- }
- unreachable("bad compare func");
-}
-
static inline void
-nir_scoped_memory_barrier(nir_builder *b,
- nir_scope scope,
- nir_memory_semantics semantics,
- nir_variable_mode modes)
-{
- nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
-}
-
-static inline nir_ssa_def *
-nir_type_convert(nir_builder *b,
- nir_ssa_def *src,
- nir_alu_type src_type,
- nir_alu_type dest_type)
-{
- assert(nir_alu_type_get_type_size(src_type) == 0 ||
- nir_alu_type_get_type_size(src_type) == src->bit_size);
-
- src_type = (nir_alu_type) (src_type | src->bit_size);
-
- nir_op opcode =
- nir_type_conversion_op(src_type, dest_type, nir_rounding_mode_undef);
-
- return nir_build_alu(b, opcode, src, NULL, NULL, NULL);
-}
-
-static inline nir_ssa_def *
-nir_convert_to_bit_size(nir_builder *b,
- nir_ssa_def *src,
- nir_alu_type type,
- unsigned bit_size)
-{
- return nir_type_convert(b, src, type, (nir_alu_type) (type | bit_size));
-}
-
-static inline nir_ssa_def *
-nir_i2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+nir_build_call(nir_builder *build, nir_function *func, size_t count,
+ nir_def **args)
{
- return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
-}
+ assert(count == func->num_params && "parameter count must match");
+ nir_call_instr *call = nir_call_instr_create(build->shader, func);
-static inline nir_ssa_def *
-nir_u2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
-{
- return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
-}
+ for (unsigned i = 0; i < count; ++i) {
+ call->params[i] = nir_src_for_ssa(args[i]);
+ }
-static inline nir_ssa_def *
-nir_b2bN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
-{
- return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
+ nir_builder_instr_insert(build, &call->instr);
}
-static inline nir_ssa_def *
-nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
-{
- return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
-}
+/*
+ * Call a given nir_function * with a variadic number of nir_def * arguments.
+ *
+ * Defined with __VA_ARGS__ instead of va_list so we can assert the correct
+ * number of parameters are passed in.
+ */
+#define nir_call(build, func, ...) \
+ do { \
+ nir_def *args[] = { __VA_ARGS__ }; \
+ nir_build_call(build, func, ARRAY_SIZE(args), args); \
+ } while (0)
-static inline nir_ssa_def *
-nir_i2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
-{
- return nir_type_convert(b, src, nir_type_int,
- (nir_alu_type) (nir_type_float | bit_size));
-}
+nir_def *
+nir_compare_func(nir_builder *b, enum compare_func func,
+ nir_def *src0, nir_def *src1);
-static inline nir_ssa_def *
-nir_u2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline void
+nir_scoped_memory_barrier(nir_builder *b,
+ mesa_scope scope,
+ nir_memory_semantics semantics,
+ nir_variable_mode modes)
{
- return nir_type_convert(b, src, nir_type_uint,
- (nir_alu_type) (nir_type_float | bit_size));
+ nir_barrier(b, SCOPE_NONE, scope, semantics, modes);
}
-static inline nir_ssa_def *
-nir_f2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
-{
- return nir_type_convert(b, src, nir_type_float,
- (nir_alu_type) (nir_type_uint | bit_size));
-}
+nir_def *
+nir_gen_rect_vertices(nir_builder *b, nir_def *z, nir_def *w);
-static inline nir_ssa_def *
-nir_f2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
-{
- return nir_type_convert(b, src, nir_type_float,
- (nir_alu_type) (nir_type_int | bit_size));
-}
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
#endif /* NIR_BUILDER_H */