summaryrefslogtreecommitdiff
path: root/src/compiler/nir/nir.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir/nir.h')
-rw-r--r--src/compiler/nir/nir.h3871
1 files changed, 2535 insertions, 1336 deletions
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 7ef549c7435..5af53486c75 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -28,56 +28,112 @@
#ifndef NIR_H
#define NIR_H
-#include "util/hash_table.h"
+#include <stdint.h>
+#include "compiler/glsl_types.h"
#include "compiler/glsl/list.h"
-#include "GL/gl.h" /* GLenum */
-#include "util/list.h"
-#include "util/log.h"
-#include "util/ralloc.h"
-#include "util/set.h"
+#include "compiler/shader_enums.h"
+#include "compiler/shader_info.h"
#include "util/bitscan.h"
#include "util/bitset.h"
#include "util/compiler.h"
#include "util/enum_operators.h"
-#include "util/macros.h"
#include "util/format/u_format.h"
-#include "compiler/nir_types.h"
-#include "compiler/shader_enums.h"
-#include "compiler/shader_info.h"
+#include "util/hash_table.h"
+#include "util/list.h"
+#include "util/log.h"
+#include "util/macros.h"
+#include "util/ralloc.h"
+#include "util/set.h"
+#include "util/u_printf.h"
#define XXH_INLINE_ALL
-#include "util/xxhash.h"
#include <stdio.h>
+#include "util/xxhash.h"
#ifndef NDEBUG
-#include "util/debug.h"
+#include "util/u_debug.h"
#endif /* NDEBUG */
#include "nir_opcodes.h"
-#if defined(_WIN32) && !defined(snprintf)
-#define snprintf _snprintf
-#endif
-
#ifdef __cplusplus
extern "C" {
#endif
-#define NIR_FALSE 0u
-#define NIR_TRUE (~0u)
+extern uint32_t nir_debug;
+extern bool nir_debug_print_shader[MESA_SHADER_KERNEL + 1];
+
+#ifndef NDEBUG
+#define NIR_DEBUG(flag) unlikely(nir_debug &(NIR_DEBUG_##flag))
+#else
+#define NIR_DEBUG(flag) false
+#endif
+
+#define NIR_DEBUG_CLONE (1u << 0)
+#define NIR_DEBUG_SERIALIZE (1u << 1)
+#define NIR_DEBUG_NOVALIDATE (1u << 2)
+#define NIR_DEBUG_VALIDATE_SSA_DOMINANCE (1u << 3)
+#define NIR_DEBUG_TGSI (1u << 4)
+#define NIR_DEBUG_PRINT_VS (1u << 5)
+#define NIR_DEBUG_PRINT_TCS (1u << 6)
+#define NIR_DEBUG_PRINT_TES (1u << 7)
+#define NIR_DEBUG_PRINT_GS (1u << 8)
+#define NIR_DEBUG_PRINT_FS (1u << 9)
+#define NIR_DEBUG_PRINT_CS (1u << 10)
+#define NIR_DEBUG_PRINT_TS (1u << 11)
+#define NIR_DEBUG_PRINT_MS (1u << 12)
+#define NIR_DEBUG_PRINT_RGS (1u << 13)
+#define NIR_DEBUG_PRINT_AHS (1u << 14)
+#define NIR_DEBUG_PRINT_CHS (1u << 15)
+#define NIR_DEBUG_PRINT_MHS (1u << 16)
+#define NIR_DEBUG_PRINT_IS (1u << 17)
+#define NIR_DEBUG_PRINT_CBS (1u << 18)
+#define NIR_DEBUG_PRINT_KS (1u << 19)
+#define NIR_DEBUG_PRINT_NO_INLINE_CONSTS (1u << 20)
+#define NIR_DEBUG_PRINT_INTERNAL (1u << 21)
+#define NIR_DEBUG_PRINT_PASS_FLAGS (1u << 22)
+
+#define NIR_DEBUG_PRINT (NIR_DEBUG_PRINT_VS | \
+ NIR_DEBUG_PRINT_TCS | \
+ NIR_DEBUG_PRINT_TES | \
+ NIR_DEBUG_PRINT_GS | \
+ NIR_DEBUG_PRINT_FS | \
+ NIR_DEBUG_PRINT_CS | \
+ NIR_DEBUG_PRINT_TS | \
+ NIR_DEBUG_PRINT_MS | \
+ NIR_DEBUG_PRINT_RGS | \
+ NIR_DEBUG_PRINT_AHS | \
+ NIR_DEBUG_PRINT_CHS | \
+ NIR_DEBUG_PRINT_MHS | \
+ NIR_DEBUG_PRINT_IS | \
+ NIR_DEBUG_PRINT_CBS | \
+ NIR_DEBUG_PRINT_KS)
+
+#define NIR_FALSE 0u
+#define NIR_TRUE (~0u)
#define NIR_MAX_VEC_COMPONENTS 16
#define NIR_MAX_MATRIX_COLUMNS 4
-#define NIR_STREAM_PACKED (1 << 8)
+#define NIR_STREAM_PACKED (1 << 8)
typedef uint16_t nir_component_mask_t;
static inline bool
nir_num_components_valid(unsigned num_components)
{
- return (num_components >= 1 &&
+ return (num_components >= 1 &&
num_components <= 5) ||
- num_components == 8 ||
- num_components == 16;
+ num_components == 8 ||
+ num_components == 16;
+}
+
+static inline nir_component_mask_t
+nir_component_mask(unsigned num_components)
+{
+ assert(nir_num_components_valid(num_components));
+ return (1u << num_components) - 1;
}
+void
+nir_process_debug_variable(void);
+
bool nir_component_mask_can_reinterpret(nir_component_mask_t mask,
unsigned old_bit_size,
unsigned new_bit_size);
@@ -94,75 +150,139 @@ nir_component_mask_reinterpret(nir_component_mask_t mask,
* Note that you have to be a bit careful as the generated cast function
* destroys constness.
*/
-#define NIR_DEFINE_CAST(name, in_type, out_type, field, \
- type_field, type_value) \
-static inline out_type * \
-name(const in_type *parent) \
-{ \
- assert(parent && parent->type_field == type_value); \
- return exec_node_data(out_type, parent, field); \
-}
+#define NIR_DEFINE_CAST(name, in_type, out_type, field, \
+ type_field, type_value) \
+ static inline out_type * \
+ name(const in_type *parent) \
+ { \
+ assert(parent && parent->type_field == type_value); \
+ return exec_node_data(out_type, parent, field); \
+ }
struct nir_function;
struct nir_shader;
struct nir_instr;
struct nir_builder;
-
+struct nir_xfb_info;
/**
* Description of built-in state associated with a uniform
*
- * \sa nir_variable::state_slots
+ * :c:member:`nir_variable.state_slots`
*/
typedef struct {
gl_state_index16 tokens[STATE_LENGTH];
- uint16_t swizzle;
} nir_state_slot;
+/* clang-format off */
typedef enum {
- nir_var_shader_in = (1 << 0),
- nir_var_shader_out = (1 << 1),
- nir_var_shader_temp = (1 << 2),
- nir_var_function_temp = (1 << 3),
- nir_var_uniform = (1 << 4),
- nir_var_mem_ubo = (1 << 5),
- nir_var_system_value = (1 << 6),
- nir_var_mem_ssbo = (1 << 7),
- nir_var_mem_shared = (1 << 8),
- nir_var_mem_global = (1 << 9),
- nir_var_mem_generic = (nir_var_shader_temp |
- nir_var_function_temp |
- nir_var_mem_shared |
- nir_var_mem_global),
- nir_var_mem_push_const = (1 << 10), /* not actually used for variables */
- nir_var_mem_constant = (1 << 11),
+ nir_var_system_value = (1 << 0),
+ nir_var_uniform = (1 << 1),
+ nir_var_shader_in = (1 << 2),
+ nir_var_shader_out = (1 << 3),
+ nir_var_image = (1 << 4),
/** Incoming call or ray payload data for ray-tracing shaders */
- nir_var_shader_call_data = (1 << 12),
+ nir_var_shader_call_data = (1 << 5),
/** Ray hit attributes */
- nir_var_ray_hit_attrib = (1 << 13),
- nir_var_read_only_modes = nir_var_shader_in | nir_var_uniform |
- nir_var_system_value | nir_var_mem_constant |
- nir_var_mem_ubo,
- /** Modes where vector derefs can be indexed as arrays */
- nir_var_vec_indexable_modes = nir_var_mem_ubo | nir_var_mem_ssbo |
+ nir_var_ray_hit_attrib = (1 << 6),
+
+ /* Modes named nir_var_mem_* have explicit data layout */
+ nir_var_mem_ubo = (1 << 7),
+ nir_var_mem_push_const = (1 << 8),
+ nir_var_mem_ssbo = (1 << 9),
+ nir_var_mem_constant = (1 << 10),
+ nir_var_mem_task_payload = (1 << 11),
+ nir_var_mem_node_payload = (1 << 12),
+ nir_var_mem_node_payload_in = (1 << 13),
+
+ /* Generic modes intentionally come last. See encode_dref_modes() in
+ * nir_serialize.c for more details.
+ */
+ nir_var_shader_temp = (1 << 14),
+ nir_var_function_temp = (1 << 15),
+ nir_var_mem_shared = (1 << 16),
+ nir_var_mem_global = (1 << 17),
+
+ nir_var_mem_generic = (nir_var_shader_temp |
+ nir_var_function_temp |
+ nir_var_mem_shared |
+ nir_var_mem_global),
+
+ nir_var_read_only_modes = nir_var_shader_in | nir_var_uniform |
+ nir_var_system_value | nir_var_mem_constant |
+ nir_var_mem_ubo,
+ /* Modes where vector derefs can be indexed as arrays. nir_var_shader_out
+ * is only for mesh stages. nir_var_system_value is only for kernel stages.
+ */
+ nir_var_vec_indexable_modes = nir_var_shader_temp | nir_var_function_temp |
+ nir_var_mem_ubo | nir_var_mem_ssbo |
nir_var_mem_shared | nir_var_mem_global |
- nir_var_mem_push_const,
- nir_num_variable_modes = 14,
- nir_var_all = (1 << nir_num_variable_modes) - 1,
+ nir_var_mem_push_const | nir_var_mem_task_payload |
+ nir_var_shader_out | nir_var_system_value,
+ nir_num_variable_modes = 18,
+ nir_var_all = (1 << nir_num_variable_modes) - 1,
} nir_variable_mode;
MESA_DEFINE_CPP_ENUM_BITFIELD_OPERATORS(nir_variable_mode)
+/* clang-format on */
/**
* Rounding modes.
*/
typedef enum {
nir_rounding_mode_undef = 0,
- nir_rounding_mode_rtne = 1, /* round to nearest even */
- nir_rounding_mode_ru = 2, /* round up */
- nir_rounding_mode_rd = 3, /* round down */
- nir_rounding_mode_rtz = 4, /* round towards zero */
+ nir_rounding_mode_rtne = 1, /* round to nearest even */
+ nir_rounding_mode_ru = 2, /* round up */
+ nir_rounding_mode_rd = 3, /* round down */
+ nir_rounding_mode_rtz = 4, /* round towards zero */
} nir_rounding_mode;
+/**
+ * Ray query values that can read from a RayQueryKHR object.
+ */
+typedef enum {
+ nir_ray_query_value_intersection_type,
+ nir_ray_query_value_intersection_t,
+ nir_ray_query_value_intersection_instance_custom_index,
+ nir_ray_query_value_intersection_instance_id,
+ nir_ray_query_value_intersection_instance_sbt_index,
+ nir_ray_query_value_intersection_geometry_index,
+ nir_ray_query_value_intersection_primitive_index,
+ nir_ray_query_value_intersection_barycentrics,
+ nir_ray_query_value_intersection_front_face,
+ nir_ray_query_value_intersection_object_ray_direction,
+ nir_ray_query_value_intersection_object_ray_origin,
+ nir_ray_query_value_intersection_object_to_world,
+ nir_ray_query_value_intersection_world_to_object,
+ nir_ray_query_value_intersection_candidate_aabb_opaque,
+ nir_ray_query_value_tmin,
+ nir_ray_query_value_flags,
+ nir_ray_query_value_world_ray_direction,
+ nir_ray_query_value_world_ray_origin,
+ nir_ray_query_value_intersection_triangle_vertex_positions
+} nir_ray_query_value;
+
+/**
+ * Intel resource flags
+ */
+typedef enum {
+ nir_resource_intel_bindless = 1u << 0,
+ nir_resource_intel_pushable = 1u << 1,
+ nir_resource_intel_sampler = 1u << 2,
+ nir_resource_intel_non_uniform = 1u << 3,
+ nir_resource_intel_sampler_embedded = 1u << 4,
+} nir_resource_data_intel;
+
+/**
+ * Which components to interpret as signed in cmat_muladd.
+ * See 'Cooperative Matrix Operands' in SPV_KHR_cooperative_matrix.
+ */
+typedef enum {
+ NIR_CMAT_A_SIGNED = 1u << 0,
+ NIR_CMAT_B_SIGNED = 1u << 1,
+ NIR_CMAT_C_SIGNED = 1u << 2,
+ NIR_CMAT_RESULT_SIGNED = 1u << 3,
+} nir_cmat_signed;
+
typedef union {
bool b;
float f32;
@@ -178,10 +298,10 @@ typedef union {
} nir_const_value;
#define nir_const_value_to_array(arr, c, components, m) \
-{ \
- for (unsigned i = 0; i < components; ++i) \
- arr[i] = c[i].m; \
-} while (false)
+ do { \
+ for (unsigned i = 0; i < components; ++i) \
+ arr[i] = c[i].m; \
+ } while (false)
static inline nir_const_value
nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
@@ -189,6 +309,7 @@ nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
nir_const_value v;
memset(&v, 0, sizeof(v));
+ /* clang-format off */
switch (bit_size) {
case 1: v.b = x; break;
case 8: v.u8 = x; break;
@@ -198,6 +319,7 @@ nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
default:
unreachable("Invalid bit size");
}
+ /* clang-format on */
return v;
}
@@ -205,9 +327,6 @@ nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
static inline nir_const_value
nir_const_value_for_int(int64_t i, unsigned bit_size)
{
- nir_const_value v;
- memset(&v, 0, sizeof(v));
-
assert(bit_size <= 64);
if (bit_size < 64) {
assert(i >= (-(1ll << (bit_size - 1))));
@@ -220,9 +339,6 @@ nir_const_value_for_int(int64_t i, unsigned bit_size)
static inline nir_const_value
nir_const_value_for_uint(uint64_t u, unsigned bit_size)
{
- nir_const_value v;
- memset(&v, 0, sizeof(v));
-
assert(bit_size <= 64);
if (bit_size < 64)
assert(u < (1ull << bit_size));
@@ -243,6 +359,7 @@ nir_const_value nir_const_value_for_float(double b, unsigned bit_size);
static inline int64_t
nir_const_value_as_int(nir_const_value value, unsigned bit_size)
{
+ /* clang-format off */
switch (bit_size) {
/* int1_t uses 0/-1 convention */
case 1: return -(int)value.b;
@@ -253,11 +370,13 @@ nir_const_value_as_int(nir_const_value value, unsigned bit_size)
default:
unreachable("Invalid bit size");
}
+ /* clang-format on */
}
static inline uint64_t
nir_const_value_as_uint(nir_const_value value, unsigned bit_size)
{
+ /* clang-format off */
switch (bit_size) {
case 1: return value.b;
case 8: return value.u8;
@@ -267,6 +386,7 @@ nir_const_value_as_uint(nir_const_value value, unsigned bit_size)
default:
unreachable("Invalid bit size");
}
+ /* clang-format on */
}
static inline bool
@@ -288,11 +408,14 @@ typedef struct nir_constant {
* Value of the constant.
*
* The field used to back the values supplied by the constant is determined
- * by the type associated with the \c nir_variable. Constants may be
+ * by the type associated with the ``nir_variable``. Constants may be
* scalars, vectors, or matrices.
*/
nir_const_value values[NIR_MAX_VEC_COMPONENTS];
+ /* Indicates all the values are 0s which can enable some optimizations */
+ bool is_null_constant;
+
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
*/
@@ -303,17 +426,18 @@ typedef struct nir_constant {
} nir_constant;
/**
- * \brief Layout qualifiers for gl_FragDepth.
+ * Layout qualifiers for gl_FragDepth.
*
* The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
* with a layout qualifier.
*/
typedef enum {
- nir_depth_layout_none, /**< No depth layout is specified. */
- nir_depth_layout_any,
- nir_depth_layout_greater,
- nir_depth_layout_less,
- nir_depth_layout_unchanged
+ /** No depth layout is specified. */
+ nir_depth_layout_none,
+ nir_depth_layout_any,
+ nir_depth_layout_greater,
+ nir_depth_layout_less,
+ nir_depth_layout_unchanged
} nir_depth_layout;
/**
@@ -326,6 +450,12 @@ typedef enum {
nir_var_declared_normally = 0,
/**
+ * Variable is an implicitly declared built-in that has not been explicitly
+ * re-declared by the shader.
+ */
+ nir_var_declared_implicitly,
+
+ /**
* Variable is implicitly generated by the compiler and should not be
* visible via the API.
*/
@@ -354,23 +484,35 @@ typedef struct nir_variable {
/**
* Storage class of the variable.
*
- * \sa nir_variable_mode
+ * :c:struct:`nir_variable_mode`
*/
- unsigned mode:14;
+ unsigned mode : 18;
/**
* Is the variable read-only?
*
- * This is set for variables declared as \c const, shader inputs,
+ * This is set for variables declared as ``const``, shader inputs,
* and uniforms.
*/
- unsigned read_only:1;
- unsigned centroid:1;
- unsigned sample:1;
- unsigned patch:1;
- unsigned invariant:1;
+ unsigned read_only : 1;
+ unsigned centroid : 1;
+ unsigned sample : 1;
+ unsigned patch : 1;
+ unsigned invariant : 1;
+
+ /**
+ * Was an 'invariant' qualifier explicitly set in the shader?
+ *
+ * This is used to cross validate glsl qualifiers.
+ */
+ unsigned explicit_invariant:1;
+
+ /**
+ * Is the variable a ray query?
+ */
+ unsigned ray_query : 1;
- /**
+ /**
* Precision qualifier.
*
* In desktop GLSL we do not care about precision qualifiers at all, in
@@ -381,7 +523,17 @@ typedef struct nir_variable {
* have the same precision value and the checks we add in the compiler
* for this field will never break a desktop shader compile.
*/
- unsigned precision:2;
+ unsigned precision : 2;
+
+ /**
+ * Has this variable been statically assigned?
+ *
+ * This answers whether the variable was assigned in any path of
+ * the shader during ast_to_hir. This doesn't answer whether it is
+ * still written after dead code removal, nor is it maintained in
+ * non-ast_to_hir.cpp (GLSL parsing) paths.
+ */
+ unsigned assigned : 1;
/**
* Can this variable be coalesced with another?
@@ -392,7 +544,7 @@ typedef struct nir_variable {
* result in a load/store of the variable with an indirect offset which
* the backend may not be able to handle.
*/
- unsigned cannot_coalesce:1;
+ unsigned cannot_coalesce : 1;
/**
* When separate shader programs are enabled, only input/outputs between
@@ -402,83 +554,140 @@ typedef struct nir_variable {
* This is also used to make sure xfb varyings that are unused by the
* fragment shader are not removed.
*/
- unsigned always_active_io:1;
+ unsigned always_active_io : 1;
/**
* Interpolation mode for shader inputs / outputs
*
- * \sa glsl_interp_mode
+ * :c:enum:`glsl_interp_mode`
*/
- unsigned interpolation:3;
+ unsigned interpolation : 3;
/**
* If non-zero, then this variable may be packed along with other variables
* into a single varying slot, so this offset should be applied when
* accessing components. For example, an offset of 1 means that the x
* component of this variable is actually stored in component y of the
- * location specified by \c location.
+ * location specified by ``location``.
*/
- unsigned location_frac:2;
+ unsigned location_frac : 2;
/**
* If true, this variable represents an array of scalars that should
* be tightly packed. In other words, consecutive array elements
* should be stored one component apart, rather than one slot apart.
*/
- unsigned compact:1;
+ unsigned compact : 1;
/**
* Whether this is a fragment shader output implicitly initialized with
* the previous contents of the specified render target at the
* framebuffer location corresponding to this shader invocation.
*/
- unsigned fb_fetch_output:1;
+ unsigned fb_fetch_output : 1;
/**
* Non-zero if this variable is considered bindless as defined by
* ARB_bindless_texture.
*/
- unsigned bindless:1;
+ unsigned bindless : 1;
/**
* Was an explicit binding set in the shader?
*/
- unsigned explicit_binding:1;
+ unsigned explicit_binding : 1;
/**
* Was the location explicitly set in the shader?
*
- * If the location is explicitly set in the shader, it \b cannot be changed
- * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
+ * If the location is explicitly set in the shader, it **cannot** be changed
+ * by the linker or by the API (e.g., calls to ``glBindAttribLocation`` have
* no effect).
*/
- unsigned explicit_location:1;
+ unsigned explicit_location : 1;
+
+ /* Was the array implicitly sized during linking */
+ unsigned implicit_sized_array : 1;
+
+ /**
+ * Highest element accessed with a constant array index
+ *
+ * Not used for non-array variables. -1 is never accessed.
+ */
+ int max_array_access;
+
+ /**
+ * Does this variable have an initializer?
+ *
+ * This is used by the linker to cross-validiate initializers of global
+ * variables.
+ */
+ unsigned has_initializer:1;
+
+ /**
+ * Is the initializer created by the compiler (glsl_zero_init)
+ */
+ unsigned is_implicit_initializer:1;
+
+ /**
+ * Is this varying used by transform feedback?
+ *
+ * This is used by the linker to decide if it's safe to pack the varying.
+ */
+ unsigned is_xfb : 1;
+
+ /**
+ * Is this varying used only by transform feedback?
+ *
+ * This is used by the linker to decide if its safe to pack the varying.
+ */
+ unsigned is_xfb_only : 1;
/**
* Was a transfer feedback buffer set in the shader?
*/
- unsigned explicit_xfb_buffer:1;
+ unsigned explicit_xfb_buffer : 1;
/**
* Was a transfer feedback stride set in the shader?
*/
- unsigned explicit_xfb_stride:1;
+ unsigned explicit_xfb_stride : 1;
/**
* Was an explicit offset set in the shader?
*/
- unsigned explicit_offset:1;
+ unsigned explicit_offset : 1;
/**
* Layout of the matrix. Uses glsl_matrix_layout values.
*/
- unsigned matrix_layout:2;
+ unsigned matrix_layout : 2;
/**
* Non-zero if this variable was created by lowering a named interface
* block.
*/
- unsigned from_named_ifc_block:1;
+ unsigned from_named_ifc_block : 1;
+
+ /**
+ * Unsized array buffer variable.
+ */
+ unsigned from_ssbo_unsized_array : 1;
+
+ /**
+ * Non-zero if the variable must be a shader input. This is useful for
+ * constraints on function parameters.
+ */
+ unsigned must_be_shader_input : 1;
+
+ /**
+ * Has this variable been used for reading or writing?
+ *
+ * Several GLSL semantic checks require knowledge of whether or not a
+ * variable has been used. For example, it is an error to redeclare a
+ * variable as invariant after it has been used.
+ */
+ unsigned used:1;
/**
* How the variable was declared. See nir_var_declaration_type.
@@ -486,27 +695,33 @@ typedef struct nir_variable {
* This is used to detect variables generated by the compiler, so should
* not be visible via the API.
*/
- unsigned how_declared:2;
+ unsigned how_declared : 2;
/**
* Is this variable per-view? If so, we know it must be an array with
* size corresponding to the number of views.
*/
- unsigned per_view:1;
+ unsigned per_view : 1;
/**
* Whether the variable is per-primitive.
* Can be use by Mesh Shader outputs and corresponding Fragment Shader inputs.
*/
- unsigned per_primitive:1;
+ unsigned per_primitive : 1;
+
+ /**
+ * Whether the variable is declared to indicate that a fragment shader
+ * input will not have interpolated values.
+ */
+ unsigned per_vertex : 1;
/**
- * \brief Layout qualifier for gl_FragDepth. See nir_depth_layout.
+ * Layout qualifier for gl_FragDepth. See nir_depth_layout.
*
- * This is not equal to \c ir_depth_layout_none if and only if this
- * variable is \c gl_FragDepth and a layout qualifier is specified.
+ * This is not equal to ``ir_depth_layout_none`` if and only if this
+ * variable is ``gl_FragDepth`` and a layout qualifier is specified.
*/
- unsigned depth_layout:3;
+ unsigned depth_layout : 3;
/**
* Vertex stream output identifier.
@@ -514,7 +729,7 @@ typedef struct nir_variable {
* For packed outputs, NIR_STREAM_PACKED is set and bits [2*i+1,2*i]
* indicate the stream of the i-th component.
*/
- unsigned stream:9;
+ unsigned stream : 9;
/**
* See gl_access_qualifier.
@@ -522,12 +737,12 @@ typedef struct nir_variable {
* Access flags for memory variables (SSBO/global), image uniforms, and
* bindless images in uniforms/inputs/outputs.
*/
- unsigned access:8;
+ unsigned access : 9;
/**
* Descriptor set binding for sampler or UBO.
*/
- unsigned descriptor_set:5;
+ unsigned descriptor_set : 5;
/**
* output index for dual source blending.
@@ -546,15 +761,15 @@ typedef struct nir_variable {
*
* The precise meaning of this field depends on the nature of the variable.
*
- * - Vertex shader input: one of the values from \c gl_vert_attrib.
- * - Vertex shader output: one of the values from \c gl_varying_slot.
- * - Geometry shader input: one of the values from \c gl_varying_slot.
- * - Geometry shader output: one of the values from \c gl_varying_slot.
- * - Fragment shader input: one of the values from \c gl_varying_slot.
- * - Fragment shader output: one of the values from \c gl_frag_result.
- * - Task shader output: one of the values from \c gl_varying_slot.
- * - Mesh shader input: one of the values from \c gl_varying_slot.
- * - Mesh shader output: one of the values from \c gl_varying_slot.
+ * - Vertex shader input: one of the values from ``gl_vert_attrib``.
+ * - Vertex shader output: one of the values from ``gl_varying_slot``.
+ * - Geometry shader input: one of the values from ``gl_varying_slot``.
+ * - Geometry shader output: one of the values from ``gl_varying_slot``.
+ * - Fragment shader input: one of the values from ``gl_varying_slot``.
+ * - Fragment shader output: one of the values from ``gl_frag_result``.
+ * - Task shader output: one of the values from ``gl_varying_slot``.
+ * - Mesh shader input: one of the values from ``gl_varying_slot``.
+ * - Mesh shader output: one of the values from ``gl_varying_slot``.
* - Uniforms: Per-stage uniform slot number for default uniform block.
* - Uniforms: Index within the uniform block definition for UBO members.
* - Non-UBO Uniforms: uniform slot number.
@@ -565,6 +780,9 @@ typedef struct nir_variable {
*/
int location;
+ /** Required alignment of this variable */
+ unsigned alignment;
+
/**
* The actual location of the variable in the IR. Only valid for inputs,
* outputs, uniforms (including samplers and images), and for UBO and SSBO
@@ -597,7 +815,7 @@ typedef struct nir_variable {
/**
* Transform feedback buffer.
*/
- uint16_t buffer:2;
+ uint16_t buffer : 2;
/**
* Transform feedback stride.
@@ -605,6 +823,9 @@ typedef struct nir_variable {
uint16_t stride;
} xfb;
};
+
+ /** Name of the node this payload will be enqueued to. */
+ const char *node_name;
} data;
/**
@@ -619,18 +840,19 @@ typedef struct nir_variable {
/**
* Built-in state that backs this uniform
*
- * Once set at variable creation, \c state_slots must remain invariant.
+ * Once set at variable creation, ``state_slots`` must remain invariant.
* This is because, ideally, this array would be shared by all clones of
* this variable in the IR tree. In other words, we'd really like for it
* to be a fly-weight.
*
- * If the variable is not a uniform, \c num_state_slots will be zero and
- * \c state_slots will be \c NULL.
+ * If the variable is not a uniform, ``num_state_slots`` will be zero and
+ * ``state_slots`` will be ``NULL``.
+ *
+ * Number of state slots used.
*/
- /*@{*/
- uint16_t num_state_slots; /**< Number of state slots used */
- nir_state_slot *state_slots; /**< State descriptors. */
- /*@}*/
+ uint16_t num_state_slots;
+ /** State descriptors. */
+ nir_state_slot *state_slots;
/**
* Constant expression assigned in the initializer of the variable
@@ -651,9 +873,9 @@ typedef struct nir_variable {
/**
* For variables that are in an interface block or are an instance of an
- * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
+ * interface block, this is the ``GLSL_TYPE_INTERFACE`` type for that block.
*
- * \sa ir_variable::location
+ * ``ir_variable.location``
*/
const struct glsl_type *interface_type;
@@ -689,11 +911,11 @@ _nir_shader_variable_has_mode(nir_variable *var, unsigned modes)
nir_foreach_variable_in_list_safe(var, &(shader)->variables)
#define nir_foreach_variable_with_modes(var, shader, modes) \
- nir_foreach_variable_in_shader(var, shader) \
+ nir_foreach_variable_in_shader(var, shader) \
if (_nir_shader_variable_has_mode(var, modes))
#define nir_foreach_variable_with_modes_safe(var, shader, modes) \
- nir_foreach_variable_in_shader_safe(var, shader) \
+ nir_foreach_variable_in_shader_safe(var, shader) \
if (_nir_shader_variable_has_mode(var, modes))
#define nir_foreach_shader_in_variable(var, shader) \
@@ -714,46 +936,19 @@ _nir_shader_variable_has_mode(nir_variable *var, unsigned modes)
#define nir_foreach_uniform_variable_safe(var, shader) \
nir_foreach_variable_with_modes_safe(var, shader, nir_var_uniform)
+#define nir_foreach_image_variable(var, shader) \
+ nir_foreach_variable_with_modes(var, shader, nir_var_image)
+
+#define nir_foreach_image_variable_safe(var, shader) \
+ nir_foreach_variable_with_modes_safe(var, shader, nir_var_image)
+
static inline bool
nir_variable_is_global(const nir_variable *var)
{
return var->data.mode != nir_var_function_temp;
}
-typedef struct nir_register {
- struct exec_node node;
-
- unsigned num_components; /** < number of vector components */
- unsigned num_array_elems; /** < size of array (0 for no array) */
-
- /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
- uint8_t bit_size;
-
- /**
- * True if this register may have different values in different SIMD
- * invocations of the shader.
- */
- bool divergent;
-
- /** generic register index. */
- unsigned index;
-
- /** set of nir_srcs where this register is used (read from) */
- struct list_head uses;
-
- /** set of nir_dests where this register is defined (written to) */
- struct list_head defs;
-
- /** set of nir_ifs where this register is used as a condition */
- struct list_head if_uses;
-} nir_register;
-
-#define nir_foreach_register(reg, reg_list) \
- foreach_list_typed(nir_register, reg, node, reg_list)
-#define nir_foreach_register_safe(reg, reg_list) \
- foreach_list_typed_safe(nir_register, reg, node, reg_list)
-
-typedef enum PACKED {
+typedef enum ENUM_PACKED {
nir_instr_type_alu,
nir_instr_type_deref,
nir_instr_type_call,
@@ -761,14 +956,13 @@ typedef enum PACKED {
nir_instr_type_intrinsic,
nir_instr_type_load_const,
nir_instr_type_jump,
- nir_instr_type_ssa_undef,
+ nir_instr_type_undef,
nir_instr_type_phi,
nir_instr_type_parallel_copy,
} nir_instr_type;
typedef struct nir_instr {
struct exec_node node;
- struct list_head gc_node;
struct nir_block *block;
nir_instr_type type;
@@ -813,22 +1007,19 @@ nir_instr_is_last(const nir_instr *instr)
return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node));
}
-typedef struct nir_ssa_def {
+typedef struct nir_def {
/** Instruction which produces this SSA value. */
nir_instr *parent_instr;
/** set of nir_instrs where this register is used (read from) */
struct list_head uses;
- /** set of nir_ifs where this register is used as a condition */
- struct list_head if_uses;
-
/** generic SSA definition index. */
unsigned index;
uint8_t num_components;
- /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
+ /* The bit-size of each channel; must be one of 1, 8, 16, 32, or 64 */
uint8_t bit_size;
/**
@@ -836,175 +1027,164 @@ typedef struct nir_ssa_def {
* invocations of the shader. This is set by nir_divergence_analysis.
*/
bool divergent;
-} nir_ssa_def;
+} nir_def;
struct nir_src;
+struct nir_if;
-typedef struct {
- nir_register *reg;
- struct nir_src *indirect; /** < NULL for no indirect offset */
- unsigned base_offset;
+typedef struct nir_src {
+ /* Instruction or if-statement that consumes this value as a source. This
+ * should only be accessed through nir_src_* helpers.
+ *
+ * Internally, it is a tagged pointer to a nir_instr or nir_if.
+ */
+ uintptr_t _parent;
- /* TODO use-def chain goes here */
-} nir_reg_src;
+ struct list_head use_link;
+ nir_def *ssa;
+} nir_src;
-typedef struct {
- nir_instr *parent_instr;
- struct list_head def_link;
+/* Layout of the _parent pointer. Bottom bit is set for nir_if parents (clear
+ * for nir_instr parents). Remaining bits are the pointer.
+ */
+#define NIR_SRC_PARENT_IS_IF (0x1)
+#define NIR_SRC_PARENT_MASK (~((uintptr_t) NIR_SRC_PARENT_IS_IF))
- nir_register *reg;
- struct nir_src *indirect; /** < NULL for no indirect offset */
- unsigned base_offset;
+static inline bool
+nir_src_is_if(const nir_src *src)
+{
+ return src->_parent & NIR_SRC_PARENT_IS_IF;
+}
- /* TODO def-use chain goes here */
-} nir_reg_dest;
+static inline nir_instr *
+nir_src_parent_instr(const nir_src *src)
+{
+ assert(!nir_src_is_if(src));
-struct nir_if;
+ /* Because it is not an if, the tag is 0, therefore we do not need to mask */
+ return (nir_instr *)(src->_parent);
+}
-typedef struct nir_src {
- union {
- /** Instruction that consumes this value as a source. */
- nir_instr *parent_instr;
- struct nir_if *parent_if;
- };
+static inline struct nir_if *
+nir_src_parent_if(const nir_src *src)
+{
+ assert(nir_src_is_if(src));
- struct list_head use_link;
+ /* Because it is an if, the tag is 1, so we need to mask */
+ return (struct nir_if *)(src->_parent & NIR_SRC_PARENT_MASK);
+}
- union {
- nir_reg_src reg;
- nir_ssa_def *ssa;
- };
+static inline void
+_nir_src_set_parent(nir_src *src, void *parent, bool is_if)
+{
+ uintptr_t ptr = (uintptr_t) parent;
+ assert((ptr & ~NIR_SRC_PARENT_MASK) == 0 && "pointer must be aligned");
- bool is_ssa;
-} nir_src;
+ if (is_if)
+ ptr |= NIR_SRC_PARENT_IS_IF;
+
+ src->_parent = ptr;
+}
+
+static inline void
+nir_src_set_parent_instr(nir_src *src, nir_instr *parent_instr)
+{
+ _nir_src_set_parent(src, parent_instr, false);
+}
+
+static inline void
+nir_src_set_parent_if(nir_src *src, struct nir_if *parent_if)
+{
+ _nir_src_set_parent(src, parent_if, true);
+}
static inline nir_src
nir_src_init(void)
{
- nir_src src = { { NULL } };
+ nir_src src = { 0 };
return src;
}
#define NIR_SRC_INIT nir_src_init()
-#define nir_foreach_use(src, reg_or_ssa_def) \
+#define nir_foreach_use_including_if(src, reg_or_ssa_def) \
list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
-#define nir_foreach_use_safe(src, reg_or_ssa_def) \
+#define nir_foreach_use_including_if_safe(src, reg_or_ssa_def) \
list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
-#define nir_foreach_if_use(src, reg_or_ssa_def) \
- list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
+#define nir_foreach_use(src, reg_or_ssa_def) \
+ nir_foreach_use_including_if(src, reg_or_ssa_def) \
+ if (!nir_src_is_if(src))
-#define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
- list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
+#define nir_foreach_use_safe(src, reg_or_ssa_def) \
+ nir_foreach_use_including_if_safe(src, reg_or_ssa_def) \
+ if (!nir_src_is_if(src))
-typedef struct {
- union {
- nir_reg_dest reg;
- nir_ssa_def ssa;
- };
+#define nir_foreach_if_use(src, reg_or_ssa_def) \
+ nir_foreach_use_including_if(src, reg_or_ssa_def) \
+ if (nir_src_is_if(src))
- bool is_ssa;
-} nir_dest;
+#define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
+ nir_foreach_use_including_if_safe(src, reg_or_ssa_def) \
+ if (nir_src_is_if(src))
-static inline nir_dest
-nir_dest_init(void)
+static inline bool
+nir_def_used_by_if(const nir_def *def)
{
- nir_dest dest = { { { NULL } } };
- return dest;
-}
-
-#define NIR_DEST_INIT nir_dest_init()
-
-#define nir_foreach_def(dest, reg) \
- list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
+ nir_foreach_if_use(_, def)
+ return true;
-#define nir_foreach_def_safe(dest, reg) \
- list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
+ return false;
+}
-static inline nir_src
-nir_src_for_ssa(nir_ssa_def *def)
+static inline bool
+nir_def_only_used_by_if(const nir_def *def)
{
- nir_src src = NIR_SRC_INIT;
-
- src.is_ssa = true;
- src.ssa = def;
+ nir_foreach_use(_, def)
+ return false;
- return src;
+ return true;
}
static inline nir_src
-nir_src_for_reg(nir_register *reg)
+nir_src_for_ssa(nir_def *def)
{
nir_src src = NIR_SRC_INIT;
- src.is_ssa = false;
- src.reg.reg = reg;
- src.reg.indirect = NULL;
- src.reg.base_offset = 0;
+ src.ssa = def;
return src;
}
-static inline nir_dest
-nir_dest_for_reg(nir_register *reg)
-{
- nir_dest dest = NIR_DEST_INIT;
-
- dest.reg.reg = reg;
-
- return dest;
-}
-
static inline unsigned
nir_src_bit_size(nir_src src)
{
- return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
+ return src.ssa->bit_size;
}
static inline unsigned
nir_src_num_components(nir_src src)
{
- return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
+ return src.ssa->num_components;
}
static inline bool
nir_src_is_const(nir_src src)
{
- return src.is_ssa &&
- src.ssa->parent_instr->type == nir_instr_type_load_const;
+ return src.ssa->parent_instr->type == nir_instr_type_load_const;
}
static inline bool
nir_src_is_undef(nir_src src)
{
- return src.is_ssa &&
- src.ssa->parent_instr->type == nir_instr_type_ssa_undef;
+ return src.ssa->parent_instr->type == nir_instr_type_undef;
}
static inline bool
nir_src_is_divergent(nir_src src)
{
- return src.is_ssa ? src.ssa->divergent : src.reg.reg->divergent;
-}
-
-static inline unsigned
-nir_dest_bit_size(nir_dest dest)
-{
- return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
-}
-
-static inline unsigned
-nir_dest_num_components(nir_dest dest)
-{
- return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components;
-}
-
-static inline bool
-nir_dest_is_divergent(nir_dest dest)
-{
- return dest.is_ssa ? dest.ssa.divergent : dest.reg.reg->divergent;
+ return src.ssa->divergent;
}
/* Are all components the same, ie. .xxxx */
@@ -1027,33 +1207,12 @@ nir_is_sequential_comp_swizzle(uint8_t *swiz, unsigned nr_comp)
return true;
}
-void nir_src_copy(nir_src *dest, const nir_src *src);
-void nir_dest_copy(nir_dest *dest, const nir_dest *src);
-
-typedef struct {
+/***/
+typedef struct nir_alu_src {
/** Base source */
nir_src src;
/**
- * \name input modifiers
- */
- /*@{*/
- /**
- * For inputs interpreted as floating point, flips the sign bit. For
- * inputs interpreted as integers, performs the two's complement negation.
- */
- bool negate;
-
- /**
- * Clears the sign bit for floating point values, and computes the integer
- * absolute value for integers. Note that the negate modifier acts after
- * the absolute value modifier, therefore if both are set then all inputs
- * will become negative.
- */
- bool abs;
- /*@}*/
-
- /**
* For each input component, says which component of the register it is
* chosen from.
*
@@ -1065,33 +1224,14 @@ typedef struct {
uint8_t swizzle[NIR_MAX_VEC_COMPONENTS];
} nir_alu_src;
-typedef struct {
- /** Base destination */
- nir_dest dest;
-
- /**
- * Saturate output modifier
- *
- * Only valid for opcodes that output floating-point numbers. Clamps the
- * output to between 0.0 and 1.0 inclusive.
- */
- bool saturate;
-
- /**
- * Write-mask
- *
- * Ignored if dest.is_ssa is true
- */
- unsigned write_mask : NIR_MAX_VEC_COMPONENTS;
-} nir_alu_dest;
-
/** NIR sized and unsized types
*
* The values in this enum are carefully chosen so that the sized type is
* just the unsized type OR the number of bits.
*/
-typedef enum PACKED {
- nir_type_invalid = 0, /* Not a valid type */
+/* clang-format off */
+typedef enum ENUM_PACKED {
+ nir_type_invalid = 0, /* Not a valid type */
nir_type_int = 2,
nir_type_uint = 4,
nir_type_bool = 6,
@@ -1114,8 +1254,9 @@ typedef enum PACKED {
nir_type_float32 = 32 | nir_type_float,
nir_type_float64 = 64 | nir_type_float,
} nir_alu_type;
+/* clang-format on */
-#define NIR_ALU_TYPE_SIZE_MASK 0x79
+#define NIR_ALU_TYPE_SIZE_MASK 0x79
#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
static inline unsigned
@@ -1130,60 +1271,8 @@ nir_alu_type_get_base_type(nir_alu_type type)
return (nir_alu_type)(type & NIR_ALU_TYPE_BASE_TYPE_MASK);
}
-static inline nir_alu_type
-nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type)
-{
- switch (base_type) {
- case GLSL_TYPE_BOOL:
- return nir_type_bool1;
- break;
- case GLSL_TYPE_UINT:
- return nir_type_uint32;
- break;
- case GLSL_TYPE_INT:
- return nir_type_int32;
- break;
- case GLSL_TYPE_UINT16:
- return nir_type_uint16;
- break;
- case GLSL_TYPE_INT16:
- return nir_type_int16;
- break;
- case GLSL_TYPE_UINT8:
- return nir_type_uint8;
- case GLSL_TYPE_INT8:
- return nir_type_int8;
- case GLSL_TYPE_UINT64:
- return nir_type_uint64;
- break;
- case GLSL_TYPE_INT64:
- return nir_type_int64;
- break;
- case GLSL_TYPE_FLOAT:
- return nir_type_float32;
- break;
- case GLSL_TYPE_FLOAT16:
- return nir_type_float16;
- break;
- case GLSL_TYPE_DOUBLE:
- return nir_type_float64;
- break;
-
- case GLSL_TYPE_SAMPLER:
- case GLSL_TYPE_IMAGE:
- case GLSL_TYPE_ATOMIC_UINT:
- case GLSL_TYPE_STRUCT:
- case GLSL_TYPE_INTERFACE:
- case GLSL_TYPE_ARRAY:
- case GLSL_TYPE_VOID:
- case GLSL_TYPE_SUBROUTINE:
- case GLSL_TYPE_FUNCTION:
- case GLSL_TYPE_ERROR:
- return nir_type_invalid;
- }
-
- unreachable("unknown type");
-}
+nir_alu_type
+nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type);
static inline nir_alu_type
nir_get_nir_type_for_glsl_type(const struct glsl_type *type)
@@ -1191,128 +1280,172 @@ nir_get_nir_type_for_glsl_type(const struct glsl_type *type)
return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type));
}
-static inline enum glsl_base_type
-nir_get_glsl_base_type_for_nir_type(nir_alu_type base_type)
-{
- switch (base_type) {
- case nir_type_bool1:
- return GLSL_TYPE_BOOL;
- case nir_type_uint32:
- return GLSL_TYPE_UINT;
- case nir_type_int32:
- return GLSL_TYPE_INT;
- case nir_type_uint16:
- return GLSL_TYPE_UINT16;
- case nir_type_int16:
- return GLSL_TYPE_INT16;
- case nir_type_uint8:
- return GLSL_TYPE_UINT8;
- case nir_type_int8:
- return GLSL_TYPE_INT8;
- case nir_type_uint64:
- return GLSL_TYPE_UINT64;
- case nir_type_int64:
- return GLSL_TYPE_INT64;
- case nir_type_float32:
- return GLSL_TYPE_FLOAT;
- case nir_type_float16:
- return GLSL_TYPE_FLOAT16;
- case nir_type_float64:
- return GLSL_TYPE_DOUBLE;
-
- default: unreachable("Not a sized nir_alu_type");
- }
-}
+enum glsl_base_type
+nir_get_glsl_base_type_for_nir_type(nir_alu_type base_type);
nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
nir_rounding_mode rnd);
-static inline nir_op
-nir_op_vec(unsigned components)
-{
- switch (components) {
- case 1: return nir_op_mov;
- case 2: return nir_op_vec2;
- case 3: return nir_op_vec3;
- case 4: return nir_op_vec4;
- case 5: return nir_op_vec5;
- case 8: return nir_op_vec8;
- case 16: return nir_op_vec16;
- default: unreachable("bad component count");
+/**
+ * Atomic intrinsics perform different operations depending on the value of
+ * their atomic_op constant index. nir_atomic_op defines the operations.
+ */
+typedef enum {
+ nir_atomic_op_iadd,
+ nir_atomic_op_imin,
+ nir_atomic_op_umin,
+ nir_atomic_op_imax,
+ nir_atomic_op_umax,
+ nir_atomic_op_iand,
+ nir_atomic_op_ior,
+ nir_atomic_op_ixor,
+ nir_atomic_op_xchg,
+ nir_atomic_op_fadd,
+ nir_atomic_op_fmin,
+ nir_atomic_op_fmax,
+ nir_atomic_op_cmpxchg,
+ nir_atomic_op_fcmpxchg,
+ nir_atomic_op_inc_wrap,
+ nir_atomic_op_dec_wrap,
+} nir_atomic_op;
+
+static inline nir_alu_type
+nir_atomic_op_type(nir_atomic_op op)
+{
+ switch (op) {
+ case nir_atomic_op_imin:
+ case nir_atomic_op_imax:
+ return nir_type_int;
+
+ case nir_atomic_op_fadd:
+ case nir_atomic_op_fmin:
+ case nir_atomic_op_fmax:
+ case nir_atomic_op_fcmpxchg:
+ return nir_type_float;
+
+ case nir_atomic_op_iadd:
+ case nir_atomic_op_iand:
+ case nir_atomic_op_ior:
+ case nir_atomic_op_ixor:
+ case nir_atomic_op_xchg:
+ case nir_atomic_op_cmpxchg:
+ case nir_atomic_op_umin:
+ case nir_atomic_op_umax:
+ case nir_atomic_op_inc_wrap:
+ case nir_atomic_op_dec_wrap:
+ return nir_type_uint;
}
+
+ unreachable("Invalid nir_atomic_op");
}
+/** Returns nir_op_vec<num_components> or nir_op_mov if num_components == 1
+ *
+ * This is subtly different from nir_op_is_vec() which returns false for
+ * nir_op_mov. Returning nir_op_mov from nir_op_vec() when num_components == 1
+ * makes sense under the assumption that the num_components of the resulting
+ * nir_def will same as what is passed in here because a single-component mov
+ * is effectively a vec1. However, if alu->def.num_components > 1, nir_op_mov
+ * has different semantics from nir_op_vec* so so code which detects "is this
+ * a vec?" typically needs to handle nir_op_mov separate from nir_op_vecN.
+ *
+ * In the unlikely case where you can handle nir_op_vecN and nir_op_mov
+ * together, use nir_op_is_vec_or_mov().
+ */
+nir_op
+nir_op_vec(unsigned num_components);
+
+/** Returns true if this op is one of nir_op_vec*
+ *
+ * Returns false for nir_op_mov. See nir_op_vec() for more details.
+ */
+bool
+nir_op_is_vec(nir_op op);
+
static inline bool
-nir_op_is_vec(nir_op op)
+nir_op_is_vec_or_mov(nir_op op)
{
- switch (op) {
- case nir_op_mov:
- case nir_op_vec2:
- case nir_op_vec3:
- case nir_op_vec4:
- case nir_op_vec5:
- case nir_op_vec8:
- case nir_op_vec16:
- return true;
- default:
- return false;
- }
+ return op == nir_op_mov || nir_op_is_vec(op);
+}
+
+static inline bool
+nir_is_float_control_signed_zero_preserve(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_PRESERVE_FP64);
+}
+
+static inline bool
+nir_is_float_control_inf_preserve(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_INF_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_INF_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_INF_PRESERVE_FP64);
+}
+
+static inline bool
+nir_is_float_control_nan_preserve(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_NAN_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_NAN_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_NAN_PRESERVE_FP64);
}
static inline bool
nir_is_float_control_signed_zero_inf_nan_preserve(unsigned execution_mode, unsigned bit_size)
{
- return (16 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16) ||
- (32 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32) ||
- (64 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
}
static inline bool
nir_is_denorm_flush_to_zero(unsigned execution_mode, unsigned bit_size)
{
- return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16) ||
- (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) ||
- (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
}
static inline bool
nir_is_denorm_preserve(unsigned execution_mode, unsigned bit_size)
{
- return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP16) ||
- (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) ||
- (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP64);
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP64);
}
static inline bool
nir_is_rounding_mode_rtne(unsigned execution_mode, unsigned bit_size)
{
- return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
- (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
- (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
}
static inline bool
nir_is_rounding_mode_rtz(unsigned execution_mode, unsigned bit_size)
{
- return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
- (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
- (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
}
static inline bool
nir_has_any_rounding_mode_rtz(unsigned execution_mode)
{
- return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
- (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
- (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
+ return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
}
static inline bool
nir_has_any_rounding_mode_rtne(unsigned execution_mode)
{
- return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
- (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
- (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
+ return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
}
static inline nir_rounding_mode
@@ -1354,6 +1487,19 @@ typedef enum {
* Operation is associative
*/
NIR_OP_IS_ASSOCIATIVE = (1 << 1),
+
+ /**
+ * Operation where src[0] is used to select src[1] on true or src[2] false.
+ * src[0] may be Boolean, or it may be another type used in an implicit
+ * comparison.
+ */
+ NIR_OP_IS_SELECTION = (1 << 2),
+
+ /**
+ * Operation where a screen-space derivative is taken of src[0]. Must not be
+ * moved into non-uniform control flow.
+ */
+ NIR_OP_IS_DERIVATIVE = (1 << 3),
} nir_op_algebraic_property;
/* vec16 is the widest ALU op in NIR, making the max number of input of ALU
@@ -1361,6 +1507,7 @@ typedef enum {
*/
#define NIR_ALU_MAX_INPUTS NIR_MAX_VEC_COMPONENTS
+/***/
typedef struct nir_op_info {
/** Name of the NIR ALU opcode */
const char *name;
@@ -1402,9 +1549,7 @@ typedef struct nir_op_info {
uint8_t input_sizes[NIR_ALU_MAX_INPUTS];
/**
- * The type of vector that each input takes. Note that negate and
- * absolute value are only allowed on inputs with int or float type and
- * behave differently on the two.
+ * The type of vector that each input takes.
*/
nir_alu_type input_types[NIR_ALU_MAX_INPUTS];
@@ -1418,6 +1563,19 @@ typedef struct nir_op_info {
/** Metadata for each nir_op, indexed by opcode */
extern const nir_op_info nir_op_infos[nir_num_opcodes];
+static inline bool
+nir_op_is_selection(nir_op op)
+{
+ return (nir_op_infos[op].algebraic_properties & NIR_OP_IS_SELECTION) != 0;
+}
+
+static inline bool
+nir_op_is_derivative(nir_op op)
+{
+ return (nir_op_infos[op].algebraic_properties & NIR_OP_IS_DERIVATIVE) != 0;
+}
+
+/***/
typedef struct nir_alu_instr {
/** Base instruction */
nir_instr instr;
@@ -1433,93 +1591,50 @@ typedef struct nir_alu_instr {
* it must ensure that the resulting value is bit-for-bit identical to the
* original.
*/
- bool exact:1;
+ bool exact : 1;
/**
* Indicates that this instruction doese not cause signed integer wrapping
* to occur, in the form of overflow or underflow.
*/
- bool no_signed_wrap:1;
+ bool no_signed_wrap : 1;
/**
* Indicates that this instruction does not cause unsigned integer wrapping
* to occur, in the form of overflow or underflow.
*/
- bool no_unsigned_wrap:1;
+ bool no_unsigned_wrap : 1;
/** Destination */
- nir_alu_dest dest;
+ nir_def def;
/** Sources
*
- * The size of the array is given by nir_op_info::num_inputs.
+ * The size of the array is given by :c:member:`nir_op_info.num_inputs`.
*/
nir_alu_src src[];
} nir_alu_instr;
void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src);
-void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src);
-bool nir_alu_instr_is_copy(nir_alu_instr *instr);
+nir_component_mask_t
+nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src);
+/**
+ * Get the number of channels used for a source
+ */
+unsigned
+nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src);
/* is this source channel used? */
static inline bool
nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src,
unsigned channel)
{
- if (nir_op_infos[instr->op].input_sizes[src] > 0)
- return channel < nir_op_infos[instr->op].input_sizes[src];
-
- return (instr->dest.write_mask >> channel) & 1;
+ return channel < nir_ssa_alu_instr_src_components(instr, src);
}
-static inline nir_component_mask_t
-nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src)
-{
- nir_component_mask_t read_mask = 0;
- for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
- if (!nir_alu_instr_channel_used(instr, src, c))
- continue;
-
- read_mask |= (1 << instr->src[src].swizzle[c]);
- }
- return read_mask;
-}
-
-/**
- * Get the number of channels used for a source
- */
-static inline unsigned
-nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
-{
- if (nir_op_infos[instr->op].input_sizes[src] > 0)
- return nir_op_infos[instr->op].input_sizes[src];
-
- return nir_dest_num_components(instr->dest.dest);
-}
-
-static inline bool
-nir_alu_instr_is_comparison(const nir_alu_instr *instr)
-{
- switch (instr->op) {
- case nir_op_flt:
- case nir_op_fge:
- case nir_op_feq:
- case nir_op_fneu:
- case nir_op_ilt:
- case nir_op_ult:
- case nir_op_ige:
- case nir_op_uge:
- case nir_op_ieq:
- case nir_op_ine:
- case nir_op_i2b1:
- case nir_op_f2b1:
- case nir_op_inot:
- return true;
- default:
- return false;
- }
-}
+bool
+nir_alu_instr_is_comparison(const nir_alu_instr *instr);
bool nir_const_value_negative_equal(nir_const_value c1, nir_const_value c2,
nir_alu_type full_type);
@@ -1574,6 +1689,7 @@ typedef struct {
union {
struct {
nir_src index;
+ bool in_bounds;
} arr;
struct {
@@ -1588,9 +1704,15 @@ typedef struct {
};
/** Destination to store the resulting "pointer" */
- nir_dest dest;
+ nir_def def;
} nir_deref_instr;
+/**
+ * Returns true if the cast is trivial, i.e. the source and destination type is
+ * the same.
+ */
+bool nir_deref_cast_is_trivial(nir_deref_instr *cast);
+
/** Returns true if deref might have one of the given modes
*
* For multi-mode derefs, this returns true if any of the possible modes for
@@ -1715,7 +1837,15 @@ nir_deref_instr_get_variable(const nir_deref_instr *instr)
bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
bool nir_deref_instr_is_known_out_of_bounds(nir_deref_instr *instr);
-bool nir_deref_instr_has_complex_use(nir_deref_instr *instr);
+
+typedef enum {
+ nir_deref_instr_has_complex_use_allow_memcpy_src = (1 << 0),
+ nir_deref_instr_has_complex_use_allow_memcpy_dst = (1 << 1),
+ nir_deref_instr_has_complex_use_allow_atomics = (1 << 2),
+} nir_deref_instr_has_complex_use_options;
+
+bool nir_deref_instr_has_complex_use(nir_deref_instr *instr,
+ nir_deref_instr_has_complex_use_options opts);
bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
@@ -1732,7 +1862,7 @@ typedef struct {
#include "nir_intrinsics.h"
-#define NIR_INTRINSIC_MAX_CONST_INDEX 5
+#define NIR_INTRINSIC_MAX_CONST_INDEX 8
/** Represents an intrinsic
*
@@ -1765,7 +1895,7 @@ typedef struct {
nir_intrinsic_op intrinsic;
- nir_dest dest;
+ nir_def def;
/** number of components if this is a vectorized intrinsic
*
@@ -1779,42 +1909,35 @@ typedef struct {
int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
+ /* a variable name associated with this instr; cannot be modified or freed */
+ const char *name;
+
nir_src src[];
} nir_intrinsic_instr;
static inline nir_variable *
-nir_intrinsic_get_var(nir_intrinsic_instr *intrin, unsigned i)
+nir_intrinsic_get_var(const nir_intrinsic_instr *intrin, unsigned i)
{
return nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[i]));
}
typedef enum {
/* Memory ordering. */
- NIR_MEMORY_ACQUIRE = 1 << 0,
- NIR_MEMORY_RELEASE = 1 << 1,
- NIR_MEMORY_ACQ_REL = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE,
+ NIR_MEMORY_ACQUIRE = 1 << 0,
+ NIR_MEMORY_RELEASE = 1 << 1,
+ NIR_MEMORY_ACQ_REL = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE,
/* Memory visibility operations. */
NIR_MEMORY_MAKE_AVAILABLE = 1 << 2,
- NIR_MEMORY_MAKE_VISIBLE = 1 << 3,
+ NIR_MEMORY_MAKE_VISIBLE = 1 << 3,
} nir_memory_semantics;
-typedef enum {
- NIR_SCOPE_NONE,
- NIR_SCOPE_INVOCATION,
- NIR_SCOPE_SUBGROUP,
- NIR_SCOPE_SHADER_CALL,
- NIR_SCOPE_WORKGROUP,
- NIR_SCOPE_QUEUE_FAMILY,
- NIR_SCOPE_DEVICE,
-} nir_scope;
-
/**
- * \name NIR intrinsics semantic flags
+ * NIR intrinsics semantic flags
*
* information about what the compiler can do with the intrinsics.
*
- * \sa nir_intrinsic_info::flags
+ * :c:member:`nir_intrinsic_info.flags`
*/
typedef enum {
/**
@@ -1839,23 +1962,58 @@ typedef enum {
#define NIR_ALIGN_MUL_MAX 0x40000000
typedef struct nir_io_semantics {
- unsigned location:7; /* gl_vert_attrib, gl_varying_slot, or gl_frag_result */
- unsigned num_slots:6; /* max 32, may be pessimistic with const indexing */
- unsigned dual_source_blend_index:1;
- unsigned fb_fetch_output:1; /* for GL_KHR_blend_equation_advanced */
- unsigned gs_streams:8; /* xxyyzzww: 2-bit stream index for each component */
- unsigned medium_precision:1; /* GLSL mediump qualifier */
- unsigned per_view:1;
- unsigned high_16bits:1; /* whether accessing low or high half of the slot */
- unsigned _pad:6;
+ unsigned location : 7; /* gl_vert_attrib, gl_varying_slot, or gl_frag_result */
+ unsigned num_slots : 6; /* max 32, may be pessimistic with const indexing */
+ unsigned dual_source_blend_index : 1;
+ unsigned fb_fetch_output : 1; /* for GL_KHR_blend_equation_advanced */
+ unsigned gs_streams : 8; /* xxyyzzww: 2-bit stream index for each component */
+ unsigned medium_precision : 1; /* GLSL mediump qualifier */
+ unsigned per_view : 1;
+ unsigned high_16bits : 1; /* whether accessing low or high half of the slot */
+ unsigned invariant : 1; /* The variable has the invariant flag set */
+ unsigned high_dvec2 : 1; /* whether accessing the high half of dvec3/dvec4 */
+ /* CLIP_DISTn, LAYER, VIEWPORT, and TESS_LEVEL_* have up to 3 uses:
+ * - an output consumed by the next stage
+ * - a system value output affecting fixed-func hardware, e.g. the clipper
+ * - a transform feedback output written to memory
+ * The following fields disable the first two. Transform feedback is disabled
+ * by transform feedback info.
+ */
+ unsigned no_varying : 1; /* whether this output isn't consumed by the next stage */
+ unsigned no_sysval_output : 1; /* whether this system value output has no
+ effect due to current pipeline states */
+ unsigned interp_explicit_strict : 1; /* preserve original vertex order */
+ unsigned per_primitive : 1; /* Per-primitive FS input (when FS is used with a mesh shader).
+ Note that per-primitive MS outputs are implied by
+ using a dedicated intrinsic, store_per_primitive_output. */
} nir_io_semantics;
+/* Transform feedback info for 2 outputs. nir_intrinsic_store_output contains
+ * this structure twice to support up to 4 outputs. The structure is limited
+ * to 32 bits because it's stored in nir_intrinsic_instr::const_index[].
+ */
+typedef struct nir_io_xfb {
+ struct {
+ /* start_component is equal to the index of out[]; add 2 for io_xfb2 */
+ /* start_component is not relative to nir_intrinsic_component */
+ /* get the stream index from nir_io_semantics */
+ uint8_t num_components : 4; /* max 4; if this is 0, xfb is disabled */
+ uint8_t buffer : 4; /* buffer index, max 3 */
+ uint8_t offset; /* transform feedback buffer offset in dwords,
+ max (1K - 4) bytes */
+ } out[2];
+} nir_io_xfb;
+
+unsigned
+nir_instr_xfb_write_mask(nir_intrinsic_instr *instr);
+
#define NIR_INTRINSIC_MAX_INPUTS 11
typedef struct {
const char *name;
- uint8_t num_srcs; /** < number of register/SSA inputs */
+ /** number of register/SSA inputs */
+ uint8_t num_srcs;
/** number of components of each input register
*
@@ -1902,57 +2060,23 @@ typedef struct {
extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
-static inline unsigned
-nir_intrinsic_src_components(const nir_intrinsic_instr *intr, unsigned srcn)
-{
- const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
- assert(srcn < info->num_srcs);
- if (info->src_components[srcn] > 0)
- return info->src_components[srcn];
- else if (info->src_components[srcn] == 0)
- return intr->num_components;
- else
- return nir_src_num_components(intr->src[srcn]);
-}
+unsigned
+nir_intrinsic_src_components(const nir_intrinsic_instr *intr, unsigned srcn);
-static inline unsigned
-nir_intrinsic_dest_components(nir_intrinsic_instr *intr)
-{
- const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
- if (!info->has_dest)
- return 0;
- else if (info->dest_components)
- return info->dest_components;
- else
- return intr->num_components;
-}
+unsigned
+nir_intrinsic_dest_components(nir_intrinsic_instr *intr);
+
+nir_alu_type
+nir_intrinsic_instr_src_type(const nir_intrinsic_instr *intrin, unsigned src);
+
+nir_alu_type
+nir_intrinsic_instr_dest_type(const nir_intrinsic_instr *intrin);
/**
* Helper to copy const_index[] from src to dst, without assuming they
* match in order.
*/
-static inline void
-nir_intrinsic_copy_const_indices(nir_intrinsic_instr *dst, nir_intrinsic_instr *src)
-{
- if (src->intrinsic == dst->intrinsic) {
- memcpy(dst->const_index, src->const_index, sizeof(dst->const_index));
- return;
- }
-
- const nir_intrinsic_info *src_info = &nir_intrinsic_infos[src->intrinsic];
- const nir_intrinsic_info *dst_info = &nir_intrinsic_infos[dst->intrinsic];
-
- for (unsigned i = 0; i < NIR_INTRINSIC_NUM_INDEX_FLAGS; i++) {
- if (src_info->index_map[i] == 0)
- continue;
-
- /* require that dst instruction also uses the same const_index[]: */
- assert(dst_info->index_map[i] > 0);
-
- dst->const_index[dst_info->index_map[i] - 1] =
- src->const_index[src_info->index_map[i] - 1];
- }
-}
+void nir_intrinsic_copy_const_indices(nir_intrinsic_instr *dst, nir_intrinsic_instr *src);
#include "nir_intrinsics_indices.h"
@@ -1966,6 +2090,22 @@ nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
nir_intrinsic_set_align_offset(intrin, align_offset);
}
+/** Returns a simple alignment for an align_mul/offset pair
+ *
+ * This helper converts from the full mul+offset alignment scheme used by
+ * most NIR intrinsics to a simple alignment. The returned value is the
+ * largest power of two which divides both align_mul and align_offset.
+ * For any offset X which satisfies the complex alignment described by
+ * align_mul/offset, X % align == 0.
+ */
+static inline uint32_t
+nir_combined_align(uint32_t align_mul, uint32_t align_offset)
+{
+ assert(util_is_power_of_two_nonzero(align_mul));
+ assert(align_offset < align_mul);
+ return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
+}
+
/** Returns a simple alignment for a load/store intrinsic offset
*
* Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
@@ -1976,10 +2116,8 @@ nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
static inline unsigned
nir_intrinsic_align(const nir_intrinsic_instr *intrin)
{
- const unsigned align_mul = nir_intrinsic_align_mul(intrin);
- const unsigned align_offset = nir_intrinsic_align_offset(intrin);
- assert(align_offset < align_mul);
- return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
+ return nir_combined_align(nir_intrinsic_align_mul(intrin),
+ nir_intrinsic_align_offset(intrin));
}
static inline bool
@@ -1994,12 +2132,16 @@ nir_image_intrinsic_coord_components(const nir_intrinsic_instr *instr);
/* Converts a image_deref_* intrinsic into a image_* one */
void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
- nir_ssa_def *handle, bool bindless);
+ nir_def *handle, bool bindless);
/* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
static inline bool
nir_intrinsic_can_reorder(nir_intrinsic_instr *instr)
{
+ if (nir_intrinsic_has_access(instr) &&
+ nir_intrinsic_access(instr) & ACCESS_VOLATILE)
+ return false;
+
if (instr->intrinsic == nir_intrinsic_load_deref) {
nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
return nir_deref_mode_is_in_set(deref, nir_var_read_only_modes) ||
@@ -2007,7 +2149,9 @@ nir_intrinsic_can_reorder(nir_intrinsic_instr *instr)
} else if (instr->intrinsic == nir_intrinsic_load_ssbo ||
instr->intrinsic == nir_intrinsic_bindless_image_load ||
instr->intrinsic == nir_intrinsic_image_deref_load ||
- instr->intrinsic == nir_intrinsic_image_load) {
+ instr->intrinsic == nir_intrinsic_image_load ||
+ instr->intrinsic == nir_intrinsic_ald_nv ||
+ instr->intrinsic == nir_intrinsic_load_sysval_nv) {
return nir_intrinsic_access(instr) & ACCESS_CAN_REORDER;
} else {
const nir_intrinsic_info *info =
@@ -2019,11 +2163,27 @@ nir_intrinsic_can_reorder(nir_intrinsic_instr *instr)
bool nir_intrinsic_writes_external_memory(const nir_intrinsic_instr *instr);
+static inline bool
+nir_intrinsic_is_ray_query(nir_intrinsic_op intrinsic)
+{
+ switch (intrinsic) {
+ case nir_intrinsic_rq_confirm_intersection:
+ case nir_intrinsic_rq_generate_intersection:
+ case nir_intrinsic_rq_initialize:
+ case nir_intrinsic_rq_load:
+ case nir_intrinsic_rq_proceed:
+ case nir_intrinsic_rq_terminate:
+ return true;
+ default:
+ return false;
+ }
+}
+
/** Texture instruction source type */
-typedef enum {
+typedef enum nir_tex_src_type {
/** Texture coordinate
*
- * Must have nir_tex_instr::coord_components components.
+ * Must have :c:member:`nir_tex_instr.coord_components` components.
*/
nir_tex_src_coord,
@@ -2044,7 +2204,8 @@ typedef enum {
* Interpolation happens after this conversion so the actual result may be
* anywhere in the range [0.0, 1.0].
*
- * Only valid if nir_tex_instr::is_shadow and must be a float scalar.
+ * Only valid if :c:member:`nir_tex_instr.is_shadow` and must be a float
+ * scalar.
*/
nir_tex_src_comparator,
@@ -2093,17 +2254,20 @@ typedef enum {
/** Texture index offset
*
- * This is added to nir_tex_instr::texture_index. Unless
- * nir_tex_instr::texture_non_uniform is set, this is guaranteed to be
- * dynamically uniform.
+ * This is added to :c:member:`nir_tex_instr.texture_index`. Unless
+ * :c:member:`nir_tex_instr.texture_non_uniform` is set, this is guaranteed
+ * to be dynamically uniform.
*/
nir_tex_src_texture_offset,
/** Dynamically uniform sampler index offset
*
- * This is added to nir_tex_instr::sampler_index. Unless
- * nir_tex_instr::sampler_non_uniform is set, this is guaranteed to be
- * dynamically uniform.
+ * This is added to :c:member:`nir_tex_instr.sampler_index`. Unless
+ * :c:member:`nir_tex_instr.sampler_non_uniform` is set, this is guaranteed to be
+ * dynamically uniform. This should not be present until GLSL ES 3.20, GLSL
+ * 4.00, or ARB_gpu_shader5, because in ES 3.10 and GL 3.30 samplers said
+ * "When aggregated into arrays within a shader, samplers can only be indexed
+ * with a constant integral expression."
*/
nir_tex_src_sampler_offset,
@@ -2138,7 +2302,7 @@ typedef enum {
/**
* Backend-specific vec4 tex src argument.
*
- * Can be used to have NIR optimization (copy propagation, lower_vec_to_movs)
+ * Can be used to have NIR optimization (copy propagation, lower_vec_to_regs)
* apply to the packing of the tex srcs. This lowering must only happen
* after nir_lower_tex().
*
@@ -2154,7 +2318,7 @@ typedef enum {
} nir_tex_src_type;
/** A texture instruction source */
-typedef struct {
+typedef struct nir_tex_src {
/** Base source */
nir_src src;
@@ -2163,30 +2327,55 @@ typedef struct {
} nir_tex_src;
/** Texture instruction opcode */
-typedef enum {
- nir_texop_tex, /**< Regular texture look-up */
- nir_texop_txb, /**< Texture look-up with LOD bias */
- nir_texop_txl, /**< Texture look-up with explicit LOD */
- nir_texop_txd, /**< Texture look-up with partial derivatives */
- nir_texop_txf, /**< Texel fetch with explicit LOD */
- nir_texop_txf_ms, /**< Multisample texture fetch */
- nir_texop_txf_ms_fb, /**< Multisample texture fetch from framebuffer */
- nir_texop_txf_ms_mcs_intel, /**< Multisample compression value fetch */
- nir_texop_txs, /**< Texture size */
- nir_texop_lod, /**< Texture lod query */
- nir_texop_tg4, /**< Texture gather */
- nir_texop_query_levels, /**< Texture levels query */
- nir_texop_texture_samples, /**< Texture samples query */
- nir_texop_samples_identical, /**< Query whether all samples are definitely
- * identical.
- */
- nir_texop_tex_prefetch, /**< Regular texture look-up, eligible for pre-dispatch */
- nir_texop_fragment_fetch, /**< Multisample fragment color texture fetch */
- nir_texop_fragment_mask_fetch,/**< Multisample fragment mask texture fetch */
+typedef enum nir_texop {
+ /** Regular texture look-up */
+ nir_texop_tex,
+ /** Texture look-up with LOD bias */
+ nir_texop_txb,
+ /** Texture look-up with explicit LOD */
+ nir_texop_txl,
+ /** Texture look-up with partial derivatives */
+ nir_texop_txd,
+ /** Texel fetch with explicit LOD */
+ nir_texop_txf,
+ /** Multisample texture fetch */
+ nir_texop_txf_ms,
+ /** Multisample texture fetch from framebuffer */
+ nir_texop_txf_ms_fb,
+ /** Multisample compression value fetch */
+ nir_texop_txf_ms_mcs_intel,
+ /** Texture size */
+ nir_texop_txs,
+ /** Texture lod query */
+ nir_texop_lod,
+ /** Texture gather */
+ nir_texop_tg4,
+ /** Texture levels query */
+ nir_texop_query_levels,
+ /** Texture samples query */
+ nir_texop_texture_samples,
+ /** Query whether all samples are definitely identical. */
+ nir_texop_samples_identical,
+ /** Regular texture look-up, eligible for pre-dispatch */
+ nir_texop_tex_prefetch,
+ /** Multisample fragment color texture fetch */
+ nir_texop_fragment_fetch_amd,
+ /** Multisample fragment mask texture fetch */
+ nir_texop_fragment_mask_fetch_amd,
+ /** Returns a buffer or image descriptor. */
+ nir_texop_descriptor_amd,
+ /** Returns a sampler descriptor. */
+ nir_texop_sampler_descriptor_amd,
+ /** Returns the sampler's LOD bias */
+ nir_texop_lod_bias_agx,
+ /** Maps to TXQ.DIMENSION */
+ nir_texop_hdr_dim_nv,
+ /** Maps to TXQ.TEXTURE_TYPE */
+ nir_texop_tex_type_nv,
} nir_texop;
/** Represents a texture instruction */
-typedef struct {
+typedef struct nir_tex_instr {
/** Base instruction */
nir_instr instr;
@@ -2215,11 +2404,11 @@ typedef struct {
nir_texop op;
/** Destination */
- nir_dest dest;
+ nir_def def;
/** Array of sources
*
- * This array has nir_tex_instr::num_srcs elements
+ * This array has :c:member:`nir_tex_instr.num_srcs` elements
*/
nir_tex_src *src;
@@ -2260,13 +2449,27 @@ typedef struct {
/** Validation needs to know this for gradient component count */
unsigned array_is_lowered_cube : 1;
+ /** True if this tg4 instruction has an implicit LOD or LOD bias, instead of using level 0 */
+ unsigned is_gather_implicit_lod : 1;
+
/** Gather offsets */
int8_t tg4_offsets[4][2];
/** True if the texture index or handle is not dynamically uniform */
bool texture_non_uniform;
- /** True if the sampler index or handle is not dynamically uniform */
+ /** True if the sampler index or handle is not dynamically uniform.
+ *
+ * This may be set when VK_EXT_descriptor_indexing is supported and the
+ * appropriate capability is enabled.
+ *
+ * This should always be false in GLSL (GLSL ES 3.20 says "When aggregated
+ * into arrays within a shader, opaque types can only be indexed with a
+ * dynamically uniform integral expression", and GLSL 4.60 says "When
+ * aggregated into arrays within a shader, [texture, sampler, and
+ * samplerShadow] types can only be indexed with a dynamically uniform
+ * expression, or texture lookup will result in undefined values.").
+ */
bool sampler_non_uniform;
/** The texture index
@@ -2280,6 +2483,7 @@ typedef struct {
*
* The following operations do not require a sampler and, as such, this
* field should be ignored:
+ *
* - nir_texop_txf
* - nir_texop_txf_ms
* - nir_texop_txs
@@ -2291,6 +2495,12 @@ typedef struct {
* then the sampler index is given by sampler_index + sampler_offset.
*/
unsigned sampler_index;
+
+ /* Back-end specific flags, intended to be used in combination with
+ * nir_tex_src_backend1/2 to provide additional hw-specific information
+ * to the back-end compiler.
+ */
+ uint32_t backend_flags;
} nir_tex_instr;
/**
@@ -2299,23 +2509,9 @@ typedef struct {
* Note that the specific hw/driver backend could require to a sampler
* object/configuration packet in any case, for some other reason.
*
- * @see nir_tex_instr::sampler_index.
+ * See also :c:member:`nir_tex_instr.sampler_index`.
*/
-static inline bool
-nir_tex_instr_need_sampler(const nir_tex_instr *instr)
-{
- switch (instr->op) {
- case nir_texop_txf:
- case nir_texop_txf_ms:
- case nir_texop_txs:
- case nir_texop_query_levels:
- case nir_texop_texture_samples:
- case nir_texop_samples_identical:
- return false;
- default:
- return true;
- }
-}
+bool nir_tex_instr_need_sampler(const nir_tex_instr *instr);
/** Returns the number of components returned by this nir_tex_instr
*
@@ -2323,52 +2519,8 @@ nir_tex_instr_need_sampler(const nir_tex_instr *instr)
* about how many components a particular texture op returns. This does not
* include the sparse residency code.
*/
-static inline unsigned
-nir_tex_instr_result_size(const nir_tex_instr *instr)
-{
- switch (instr->op) {
- case nir_texop_txs: {
- unsigned ret;
- switch (instr->sampler_dim) {
- case GLSL_SAMPLER_DIM_1D:
- case GLSL_SAMPLER_DIM_BUF:
- ret = 1;
- break;
- case GLSL_SAMPLER_DIM_2D:
- case GLSL_SAMPLER_DIM_CUBE:
- case GLSL_SAMPLER_DIM_MS:
- case GLSL_SAMPLER_DIM_RECT:
- case GLSL_SAMPLER_DIM_EXTERNAL:
- case GLSL_SAMPLER_DIM_SUBPASS:
- ret = 2;
- break;
- case GLSL_SAMPLER_DIM_3D:
- ret = 3;
- break;
- default:
- unreachable("not reached");
- }
- if (instr->is_array)
- ret++;
- return ret;
- }
-
- case nir_texop_lod:
- return 2;
-
- case nir_texop_texture_samples:
- case nir_texop_query_levels:
- case nir_texop_samples_identical:
- case nir_texop_fragment_mask_fetch:
- return 1;
-
- default:
- if (instr->is_shadow && instr->is_new_style_shadow)
- return 1;
-
- return 4;
- }
-}
+unsigned
+nir_tex_instr_result_size(const nir_tex_instr *instr);
/**
* Returns the destination size of this nir_tex_instr including the sparse
@@ -2385,149 +2537,27 @@ nir_tex_instr_dest_size(const nir_tex_instr *instr)
* Returns true if this texture operation queries something about the texture
* rather than actually sampling it.
*/
-static inline bool
-nir_tex_instr_is_query(const nir_tex_instr *instr)
-{
- switch (instr->op) {
- case nir_texop_txs:
- case nir_texop_lod:
- case nir_texop_texture_samples:
- case nir_texop_query_levels:
- return true;
- case nir_texop_tex:
- case nir_texop_txb:
- case nir_texop_txl:
- case nir_texop_txd:
- case nir_texop_txf:
- case nir_texop_txf_ms:
- case nir_texop_txf_ms_fb:
- case nir_texop_txf_ms_mcs_intel:
- case nir_texop_tg4:
- return false;
- default:
- unreachable("Invalid texture opcode");
- }
-}
+bool
+nir_tex_instr_is_query(const nir_tex_instr *instr);
/** Returns true if this texture instruction does implicit derivatives
*
* This is important as there are extra control-flow rules around derivatives
* and texture instructions which perform them implicitly.
*/
-static inline bool
-nir_tex_instr_has_implicit_derivative(const nir_tex_instr *instr)
-{
- switch (instr->op) {
- case nir_texop_tex:
- case nir_texop_txb:
- case nir_texop_lod:
- return true;
- default:
- return false;
- }
-}
+bool
+nir_tex_instr_has_implicit_derivative(const nir_tex_instr *instr);
/** Returns the ALU type of the given texture instruction source */
-static inline nir_alu_type
-nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src)
-{
- switch (instr->src[src].src_type) {
- case nir_tex_src_coord:
- switch (instr->op) {
- case nir_texop_txf:
- case nir_texop_txf_ms:
- case nir_texop_txf_ms_fb:
- case nir_texop_txf_ms_mcs_intel:
- case nir_texop_samples_identical:
- return nir_type_int;
-
- default:
- return nir_type_float;
- }
-
- case nir_tex_src_lod:
- switch (instr->op) {
- case nir_texop_txs:
- case nir_texop_txf:
- case nir_texop_txf_ms:
- return nir_type_int;
-
- default:
- return nir_type_float;
- }
-
- case nir_tex_src_projector:
- case nir_tex_src_comparator:
- case nir_tex_src_bias:
- case nir_tex_src_min_lod:
- case nir_tex_src_ddx:
- case nir_tex_src_ddy:
- case nir_tex_src_backend1:
- case nir_tex_src_backend2:
- return nir_type_float;
-
- case nir_tex_src_offset:
- case nir_tex_src_ms_index:
- case nir_tex_src_plane:
- return nir_type_int;
-
- case nir_tex_src_ms_mcs_intel:
- case nir_tex_src_texture_deref:
- case nir_tex_src_sampler_deref:
- case nir_tex_src_texture_offset:
- case nir_tex_src_sampler_offset:
- case nir_tex_src_texture_handle:
- case nir_tex_src_sampler_handle:
- return nir_type_uint;
-
- case nir_num_tex_src_types:
- unreachable("nir_num_tex_src_types is not a valid source type");
- }
-
- unreachable("Invalid texture source type");
-}
+nir_alu_type
+nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src);
/**
* Returns the number of components required by the given texture instruction
* source
*/
-static inline unsigned
-nir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src)
-{
- if (instr->src[src].src_type == nir_tex_src_coord)
- return instr->coord_components;
-
- /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs_intel */
- if (instr->src[src].src_type == nir_tex_src_ms_mcs_intel)
- return 4;
-
- if (instr->src[src].src_type == nir_tex_src_ddx ||
- instr->src[src].src_type == nir_tex_src_ddy) {
-
- if (instr->is_array && !instr->array_is_lowered_cube)
- return instr->coord_components - 1;
- else
- return instr->coord_components;
- }
-
- /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
- * the offset, since a cube maps to a single face.
- */
- if (instr->src[src].src_type == nir_tex_src_offset) {
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
- return 2;
- else if (instr->is_array)
- return instr->coord_components - 1;
- else
- return instr->coord_components;
- }
-
- if (instr->src[src].src_type == nir_tex_src_backend1 ||
- instr->src[src].src_type == nir_tex_src_backend2)
- return nir_src_num_components(instr->src[src].src);
-
- return 1;
-}
+unsigned
+nir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src);
/**
* Returns the index of the texture instruction source with the given
@@ -2538,7 +2568,7 @@ nir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type)
{
for (unsigned i = 0; i < instr->num_srcs; i++)
if (instr->src[i].src_type == type)
- return (int) i;
+ return (int)i;
return -1;
}
@@ -2546,7 +2576,7 @@ nir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type)
/** Adds a source to a texture instruction */
void nir_tex_instr_add_src(nir_tex_instr *tex,
nir_tex_src_type src_type,
- nir_src src);
+ nir_def *src);
/** Removes a source from a texture instruction */
void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
@@ -2556,7 +2586,7 @@ bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
typedef struct {
nir_instr instr;
- nir_ssa_def def;
+ nir_def def;
nir_const_value value[];
} nir_load_const_instr;
@@ -2622,8 +2652,8 @@ typedef struct {
typedef struct {
nir_instr instr;
- nir_ssa_def def;
-} nir_ssa_undef_instr;
+ nir_def def;
+} nir_undef_instr;
typedef struct {
struct exec_node node;
@@ -2642,9 +2672,10 @@ typedef struct {
typedef struct {
nir_instr instr;
- struct exec_list srcs; /** < list of nir_phi_src */
+ /** list of nir_phi_src */
+ struct exec_list srcs;
- nir_dest dest;
+ nir_def def;
} nir_phi_instr;
static inline nir_phi_src *
@@ -2661,8 +2692,13 @@ nir_phi_get_src_from_block(nir_phi_instr *phi, struct nir_block *block)
typedef struct {
struct exec_node node;
+ bool src_is_reg;
+ bool dest_is_reg;
nir_src src;
- nir_dest dest;
+ union {
+ nir_def def;
+ nir_src reg;
+ } dest;
} nir_parallel_copy_entry;
#define nir_foreach_parallel_copy_entry(entry, pcopy) \
@@ -2693,101 +2729,115 @@ NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
type, nir_instr_type_intrinsic)
NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
type, nir_instr_type_load_const)
-NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
- type, nir_instr_type_ssa_undef)
+NIR_DEFINE_CAST(nir_instr_as_undef, nir_instr, nir_undef_instr, instr,
+ type, nir_instr_type_undef)
NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
type, nir_instr_type_phi)
NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
nir_parallel_copy_instr, instr,
type, nir_instr_type_parallel_copy)
+#define NIR_DEFINE_SRC_AS_CONST(type, suffix) \
+ static inline type \
+ nir_src_comp_as_##suffix(nir_src src, unsigned comp) \
+ { \
+ assert(nir_src_is_const(src)); \
+ nir_load_const_instr *load = \
+ nir_instr_as_load_const(src.ssa->parent_instr); \
+ assert(comp < load->def.num_components); \
+ return nir_const_value_as_##suffix(load->value[comp], \
+ load->def.bit_size); \
+ } \
+ \
+ static inline type \
+ nir_src_as_##suffix(nir_src src) \
+ { \
+ assert(nir_src_num_components(src) == 1); \
+ return nir_src_comp_as_##suffix(src, 0); \
+ }
-#define NIR_DEFINE_SRC_AS_CONST(type, suffix) \
-static inline type \
-nir_src_comp_as_##suffix(nir_src src, unsigned comp) \
-{ \
- assert(nir_src_is_const(src)); \
- nir_load_const_instr *load = \
- nir_instr_as_load_const(src.ssa->parent_instr); \
- assert(comp < load->def.num_components); \
- return nir_const_value_as_##suffix(load->value[comp], \
- load->def.bit_size); \
-} \
- \
-static inline type \
-nir_src_as_##suffix(nir_src src) \
-{ \
- assert(nir_src_num_components(src) == 1); \
- return nir_src_comp_as_##suffix(src, 0); \
-}
-
-NIR_DEFINE_SRC_AS_CONST(int64_t, int)
-NIR_DEFINE_SRC_AS_CONST(uint64_t, uint)
-NIR_DEFINE_SRC_AS_CONST(bool, bool)
-NIR_DEFINE_SRC_AS_CONST(double, float)
+NIR_DEFINE_SRC_AS_CONST(int64_t, int)
+NIR_DEFINE_SRC_AS_CONST(uint64_t, uint)
+NIR_DEFINE_SRC_AS_CONST(bool, bool)
+NIR_DEFINE_SRC_AS_CONST(double, float)
#undef NIR_DEFINE_SRC_AS_CONST
-
typedef struct {
- nir_ssa_def *def;
+ nir_def *def;
unsigned comp;
-} nir_ssa_scalar;
+} nir_scalar;
static inline bool
-nir_ssa_scalar_is_const(nir_ssa_scalar s)
+nir_scalar_is_const(nir_scalar s)
{
return s.def->parent_instr->type == nir_instr_type_load_const;
}
+static inline bool
+nir_scalar_is_undef(nir_scalar s)
+{
+ return s.def->parent_instr->type == nir_instr_type_undef;
+}
+
static inline nir_const_value
-nir_ssa_scalar_as_const_value(nir_ssa_scalar s)
+nir_scalar_as_const_value(nir_scalar s)
{
assert(s.comp < s.def->num_components);
nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
return load->value[s.comp];
}
-#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
-static inline type \
-nir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \
-{ \
- return nir_const_value_as_##suffix( \
- nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
-}
+#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
+ static inline type \
+ nir_scalar_as_##suffix(nir_scalar s) \
+ { \
+ return nir_const_value_as_##suffix( \
+ nir_scalar_as_const_value(s), s.def->bit_size); \
+ }
-NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
-NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint)
-NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
-NIR_DEFINE_SCALAR_AS_CONST(double, float)
+NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
+NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint)
+NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
+NIR_DEFINE_SCALAR_AS_CONST(double, float)
#undef NIR_DEFINE_SCALAR_AS_CONST
static inline bool
-nir_ssa_scalar_is_alu(nir_ssa_scalar s)
+nir_scalar_is_alu(nir_scalar s)
{
return s.def->parent_instr->type == nir_instr_type_alu;
}
static inline nir_op
-nir_ssa_scalar_alu_op(nir_ssa_scalar s)
+nir_scalar_alu_op(nir_scalar s)
{
return nir_instr_as_alu(s.def->parent_instr)->op;
}
-static inline nir_ssa_scalar
-nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
+static inline bool
+nir_scalar_is_intrinsic(nir_scalar s)
+{
+ return s.def->parent_instr->type == nir_instr_type_intrinsic;
+}
+
+static inline nir_intrinsic_op
+nir_scalar_intrinsic_op(nir_scalar s)
+{
+ return nir_instr_as_intrinsic(s.def->parent_instr)->intrinsic;
+}
+
+static inline nir_scalar
+nir_scalar_chase_alu_src(nir_scalar s, unsigned alu_src_idx)
{
- nir_ssa_scalar out = { NULL, 0 };
+ nir_scalar out = { NULL, 0 };
nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
/* Our component must be written */
assert(s.comp < s.def->num_components);
- assert(alu->dest.write_mask & (1u << s.comp));
- assert(alu->src[alu_src_idx].src.is_ssa);
out.def = alu->src[alu_src_idx].src.ssa;
if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
@@ -2808,16 +2858,34 @@ nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
return out;
}
-nir_ssa_scalar nir_ssa_scalar_chase_movs(nir_ssa_scalar s);
+nir_scalar nir_scalar_chase_movs(nir_scalar s);
+
+static inline nir_scalar
+nir_get_scalar(nir_def *def, unsigned channel)
+{
+ nir_scalar s = { def, channel };
+ return s;
+}
+
+/** Returns a nir_scalar where we've followed the bit-exact mov/vec use chain to the original definition */
+static inline nir_scalar
+nir_scalar_resolved(nir_def *def, unsigned channel)
+{
+ return nir_scalar_chase_movs(nir_get_scalar(def, channel));
+}
-/** Returns a nir_ssa_scalar where we've followed the bit-exact mov/vec use chain to the original definition */
-static inline nir_ssa_scalar
-nir_ssa_scalar_resolved(nir_ssa_def *def, unsigned channel)
+static inline bool
+nir_scalar_equal(nir_scalar s1, nir_scalar s2)
{
- nir_ssa_scalar s = { def, channel };
- return nir_ssa_scalar_chase_movs(s);
+ return s1.def == s2.def && s1.comp == s2.comp;
}
+static inline uint64_t
+nir_alu_src_as_uint(nir_alu_src src)
+{
+ nir_scalar scalar = nir_get_scalar(src.src.ssa, src.swizzle[0]);
+ return nir_scalar_as_uint(scalar);
+}
typedef struct {
bool success;
@@ -2833,7 +2901,6 @@ typedef struct {
nir_binding nir_chase_binding(nir_src rsrc);
nir_variable *nir_get_binding_variable(struct nir_shader *shader, nir_binding binding);
-
/*
* Control flow
*
@@ -2866,11 +2933,17 @@ typedef struct nir_cf_node {
typedef struct nir_block {
nir_cf_node cf_node;
- struct exec_list instr_list; /** < list of nir_instr */
+ /** list of nir_instr */
+ struct exec_list instr_list;
/** generic block index; generated by nir_index_blocks */
unsigned index;
+ /* This indicates whether the block or any parent block is executed
+ * conditionally and whether the condition uses a divergent value.
+ */
+ bool divergent;
+
/*
* Each block can only have up to 2 successors, so we put them in a simple
* array - no need for anything more complicated.
@@ -2882,7 +2955,7 @@ typedef struct nir_block {
/*
* this node's immediate dominator in the dominance tree - set to NULL for
- * the start block.
+ * the start block and any unreachable blocks.
*/
struct nir_block *imm_dom;
@@ -2970,7 +3043,7 @@ nir_block_ends_in_break(nir_block *block)
nir_instr *instr = nir_block_last_instr(block);
return instr->type == nir_instr_type_jump &&
- nir_instr_as_jump(instr)->type == nir_jump_break;
+ nir_instr_as_jump(instr)->type == nir_jump_break;
}
#define nir_foreach_instr(instr, block) \
@@ -2982,23 +3055,73 @@ nir_block_ends_in_break(nir_block *block)
#define nir_foreach_instr_reverse_safe(instr, block) \
foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
+/* Phis come first in the block */
static inline nir_phi_instr *
-nir_block_last_phi_instr(nir_block *block)
+nir_first_phi_in_block(nir_block *block)
{
- nir_phi_instr *last_phi = NULL;
nir_foreach_instr(instr, block) {
if (instr->type == nir_instr_type_phi)
- last_phi = nir_instr_as_phi(instr);
+ return nir_instr_as_phi(instr);
else
- return last_phi;
+ return NULL;
}
+
+ return NULL;
+}
+
+static inline nir_phi_instr *
+nir_next_phi(nir_phi_instr *phi)
+{
+ nir_instr *next = nir_instr_next(&phi->instr);
+
+ if (next && next->type == nir_instr_type_phi)
+ return nir_instr_as_phi(next);
+ else
+ return NULL;
+}
+
+#define nir_foreach_phi(instr, block) \
+ for (nir_phi_instr *instr = nir_first_phi_in_block(block); instr != NULL; \
+ instr = nir_next_phi(instr))
+
+#define nir_foreach_phi_safe(instr, block) \
+ for (nir_phi_instr *instr = nir_first_phi_in_block(block), \
+ *__next = instr ? nir_next_phi(instr) : NULL; \
+ instr != NULL; \
+ instr = __next, __next = instr ? nir_next_phi(instr) : NULL)
+
+static inline nir_phi_instr *
+nir_block_last_phi_instr(nir_block *block)
+{
+ nir_phi_instr *last_phi = NULL;
+ nir_foreach_phi(instr, block)
+ last_phi = instr;
+
return last_phi;
}
typedef enum {
nir_selection_control_none = 0x0,
+
+ /**
+ * Defined by SPIR-V spec 3.22 "Selection Control".
+ * The application prefers to remove control flow.
+ */
nir_selection_control_flatten = 0x1,
+
+ /**
+ * Defined by SPIR-V spec 3.22 "Selection Control".
+ * The application prefers to keep control flow.
+ */
nir_selection_control_dont_flatten = 0x2,
+
+ /**
+ * May be applied by the compiler stack when it knows
+ * that a branch is divergent, and:
+ * - either both the if and else are always taken
+ * - the if or else is empty and the other is always taken
+ */
+ nir_selection_control_divergent_always_taken = 0x3,
} nir_selection_control;
typedef struct nir_if {
@@ -3006,8 +3129,11 @@ typedef struct nir_if {
nir_src condition;
nir_selection_control control;
- struct exec_list then_list; /** < list of nir_cf_node */
- struct exec_list else_list; /** < list of nir_cf_node */
+ /** list of nir_cf_node */
+ struct exec_list then_list;
+
+ /** list of nir_cf_node */
+ struct exec_list else_list;
} nir_if;
typedef struct {
@@ -3042,7 +3168,7 @@ typedef struct {
typedef struct {
/* Induction variable. */
- nir_ssa_def *def;
+ nir_def *def;
/* Init statement with only uniform. */
nir_src *init_src;
@@ -3055,6 +3181,9 @@ typedef struct {
/* Estimated cost (in number of instructions) of the loop */
unsigned instr_cost;
+ /* Contains fp64 ops that will be lowered */
+ bool has_soft_fp64;
+
/* Guessed trip count based on array indexing */
unsigned guessed_trip_count;
@@ -3092,7 +3221,11 @@ typedef enum {
typedef struct {
nir_cf_node cf_node;
- struct exec_list body; /** < list of nir_cf_node */
+ /** list of nir_cf_node */
+ struct exec_list body;
+
+ /** (optional) list of nir_cf_node */
+ struct exec_list continue_list;
nir_loop_info *info;
nir_loop_control control;
@@ -3142,7 +3275,7 @@ typedef enum {
* SSA defs or uses of SSA defs (most passes shouldn't preserve this
* metadata type).
*/
- nir_metadata_live_ssa_defs = 0x4,
+ nir_metadata_live_defs = 0x4,
/** A dummy metadata value to track when a pass forgot to call
* nir_metadata_preserve.
@@ -3192,19 +3325,21 @@ typedef struct {
/** pointer to the function of which this is an implementation */
struct nir_function *function;
- struct exec_list body; /** < list of nir_cf_node */
+ /**
+ * For entrypoints, a pointer to a nir_function_impl which runs before
+ * it, once per draw or dispatch, communicating via store_preamble and
+ * load_preamble intrinsics. If NULL then there is no preamble.
+ */
+ struct nir_function *preamble;
+
+ /** list of nir_cf_node */
+ struct exec_list body;
nir_block *end_block;
/** list for all local variables in the function */
struct exec_list locals;
- /** list of local registers in the function */
- struct exec_list registers;
-
- /** next available local register index */
- unsigned reg_alloc;
-
/** next available SSA value index */
unsigned ssa_alloc;
@@ -3229,13 +3364,13 @@ typedef struct {
ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
nir_start_block(nir_function_impl *impl)
{
- return (nir_block *) impl->body.head_sentinel.next;
+ return (nir_block *)impl->body.head_sentinel.next;
}
ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
nir_impl_last_block(nir_function_impl *impl)
{
- return (nir_block *) impl->body.tail_sentinel.prev;
+ return (nir_block *)impl->body.tail_sentinel.prev;
}
static inline nir_cf_node *
@@ -3321,6 +3456,40 @@ nir_loop_last_block(nir_loop *loop)
return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
}
+static inline bool
+nir_loop_has_continue_construct(const nir_loop *loop)
+{
+ return !exec_list_is_empty(&loop->continue_list);
+}
+
+static inline nir_block *
+nir_loop_first_continue_block(nir_loop *loop)
+{
+ assert(nir_loop_has_continue_construct(loop));
+ struct exec_node *head = exec_list_get_head(&loop->continue_list);
+ return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
+}
+
+static inline nir_block *
+nir_loop_last_continue_block(nir_loop *loop)
+{
+ assert(nir_loop_has_continue_construct(loop));
+ struct exec_node *tail = exec_list_get_tail(&loop->continue_list);
+ return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
+}
+
+/**
+ * Return the target block of a nir_jump_continue statement
+ */
+static inline nir_block *
+nir_loop_continue_target(nir_loop *loop)
+{
+ if (nir_loop_has_continue_construct(loop))
+ return nir_loop_first_continue_block(loop);
+ else
+ return nir_loop_first_block(loop);
+}
+
/**
* Return true if this list of cf_nodes contains a single empty block.
*/
@@ -3339,14 +3508,13 @@ nir_cf_list_is_empty_block(struct exec_list *cf_list)
typedef struct {
uint8_t num_components;
uint8_t bit_size;
-} nir_parameter;
-typedef struct nir_printf_info {
- unsigned num_args;
- unsigned *arg_sizes;
- unsigned string_size;
- char *strings;
-} nir_printf_info;
+ /* True if this paramater is actually the function return variable */
+ bool is_return;
+
+ /* The type of the function param */
+ const struct glsl_type *type;
+} nir_parameter;
typedef struct nir_function {
struct exec_node node;
@@ -3360,10 +3528,36 @@ typedef struct nir_function {
/** The implementation of this function.
*
* If the function is only declared and not implemented, this is NULL.
+ *
+ * Unless setting to NULL or NIR_SERIALIZE_FUNC_HAS_IMPL, set with
+ * nir_function_set_impl to maintain IR invariants.
*/
nir_function_impl *impl;
bool is_entrypoint;
+ /* from SPIR-V linkage, only for libraries */
+ bool is_exported;
+ bool is_preamble;
+ /* from SPIR-V function control */
+ bool should_inline;
+ bool dont_inline; /* from SPIR-V */
+
+ /**
+ * Is this function a subroutine type declaration
+ * e.g. subroutine void type1(float arg1);
+ */
+ bool is_subroutine;
+
+ /**
+ * Is this function associated to a subroutine type
+ * e.g. subroutine (type1, type2) function_name { function_body };
+ * would have num_subroutine_types 2,
+ * and pointers to the type1 and type2 types.
+ */
+ int num_subroutine_types;
+ const struct glsl_type **subroutine_types;
+
+ int subroutine_index;
} nir_function;
typedef enum {
@@ -3373,7 +3567,7 @@ typedef enum {
nir_lower_divmod64 = (1 << 2),
/** Lower all 64-bit umul_high and imul_high opcodes */
nir_lower_imul_high64 = (1 << 3),
- nir_lower_mov64 = (1 << 4),
+ nir_lower_bcsel64 = (1 << 4),
nir_lower_icmp64 = (1 << 5),
nir_lower_iadd64 = (1 << 6),
nir_lower_iabs64 = (1 << 7),
@@ -3389,6 +3583,10 @@ typedef enum {
nir_lower_scan_reduce_bitwise64 = (1 << 17),
nir_lower_scan_reduce_iadd64 = (1 << 18),
nir_lower_vote_ieq64 = (1 << 19),
+ nir_lower_usub_sat64 = (1 << 20),
+ nir_lower_iadd_sat64 = (1 << 21),
+ nir_lower_find_lsb64 = (1 << 22),
+ nir_lower_conv64 = (1 << 23),
} nir_lower_int64_options;
typedef enum {
@@ -3403,7 +3601,10 @@ typedef enum {
nir_lower_dmod = (1 << 8),
nir_lower_dsub = (1 << 9),
nir_lower_ddiv = (1 << 10),
- nir_lower_fp64_full_software = (1 << 11),
+ nir_lower_dsign = (1 << 11),
+ nir_lower_dminmax = (1 << 12),
+ nir_lower_dsat = (1 << 13),
+ nir_lower_fp64_full_software = (1 << 14),
} nir_lower_doubles_options;
typedef enum {
@@ -3413,17 +3614,53 @@ typedef enum {
nir_divergence_view_index_uniform = (1 << 3),
nir_divergence_single_frag_shading_rate_per_subgroup = (1 << 4),
nir_divergence_multiple_workgroup_per_compute_subgroup = (1 << 5),
+ nir_divergence_shader_record_ptr_uniform = (1 << 6),
+ nir_divergence_uniform_load_tears = (1 << 7),
} nir_divergence_options;
typedef enum {
- nir_pack_varying_interp_mode_none = (1 << 0),
- nir_pack_varying_interp_mode_smooth = (1 << 1),
- nir_pack_varying_interp_mode_flat = (1 << 2),
- nir_pack_varying_interp_mode_noperspective = (1 << 3),
- nir_pack_varying_interp_loc_sample = (1 << 16),
- nir_pack_varying_interp_loc_centroid = (1 << 17),
- nir_pack_varying_interp_loc_center = (1 << 18),
-} nir_pack_varying_options;
+ /**
+ * Whether a fragment shader can interpolate the same input multiple times
+ * with different modes (smooth, noperspective) and locations (pixel,
+ * centroid, sample, at_offset, at_sample), excluding the flat mode.
+ *
+ * This matches AMD GPU flexibility and limitations and is a superset of
+ * the GL4 requirement that each input can be interpolated at its specified
+ * location, and then also as centroid, at_offset, and at_sample.
+ */
+ nir_io_has_flexible_input_interpolation_except_flat = BITFIELD_BIT(0),
+
+ /**
+ * nir_opt_varyings compacts (relocates) components of varyings by
+ * rewriting their locations completely, effectively moving components of
+ * varyings between slots. This option forces nir_opt_varyings to make
+ * VARYING_SLOT_POS unused by moving its contents to VARn if the consumer
+ * is not FS. If this option is not set and POS is unused, it moves
+ * components of VARn to POS until it's fully used.
+ */
+ nir_io_dont_use_pos_for_non_fs_varyings = BITFIELD_BIT(1),
+
+ nir_io_16bit_input_output_support = BITFIELD_BIT(2),
+
+ /* Options affecting the GLSL compiler are below. */
+
+ /**
+ * Lower load_deref/store_deref to load_input/store_output/etc. intrinsics.
+ * This is only affects GLSL compilation.
+ */
+ nir_io_glsl_lower_derefs = BITFIELD_BIT(16),
+
+ /**
+ * Run nir_opt_varyings in the GLSL linker. If false, optimize varyings
+ * the old way and lower IO later.
+ *
+ * nir_io_lower_to_intrinsics must be set for this to take effect.
+ *
+ * TODO: remove this and default to enabled once we are sure that this
+ * codepath is solid.
+ */
+ nir_io_glsl_opt_varyings = BITFIELD_BIT(17),
+} nir_io_options;
/** An instruction filtering callback
*
@@ -3431,6 +3668,15 @@ typedef enum {
*/
typedef bool (*nir_instr_filter_cb)(const nir_instr *, const void *);
+/** A vectorization width callback
+ *
+ * Returns the maximum vectorization width per instruction.
+ * 0, if the instruction must not be modified.
+ *
+ * The vectorization width must be a power of 2.
+ */
+typedef uint8_t (*nir_vectorize_cb)(const nir_instr *, const void *);
+
typedef struct nir_shader_compiler_options {
bool lower_fdiv;
bool lower_ffma16;
@@ -3448,24 +3694,18 @@ typedef struct nir_shader_compiler_options {
bool lower_fsqrt;
bool lower_sincos;
bool lower_fmod;
- /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
+ /** Lowers ibitfield_extract/ubitfield_extract. */
bool lower_bitfield_extract;
- /** Lowers ibitfield_extract/ubitfield_extract to compares, shifts. */
- bool lower_bitfield_extract_to_shifts;
- /** Lowers bitfield_insert to bfi/bfm */
+ /** Lowers bitfield_insert. */
bool lower_bitfield_insert;
- /** Lowers bitfield_insert to compares, and shifts. */
- bool lower_bitfield_insert_to_shifts;
- /** Lowers bitfield_insert to bfm/bitfield_select. */
- bool lower_bitfield_insert_to_bitfield_select;
/** Lowers bitfield_reverse to shifts. */
bool lower_bitfield_reverse;
/** Lowers bit_count to shifts. */
bool lower_bit_count;
- /** Lowers ifind_msb to compare and ufind_msb */
+ /** Lowers ifind_msb. */
bool lower_ifind_msb;
- /** Lowers ifind_msb and ufind_msb to reverse variants */
- bool lower_find_msb_to_reverse;
+ /** Lowers ufind_msb. */
+ bool lower_ufind_msb;
/** Lowers find_lsb to ufind_msb and logic ops */
bool lower_find_lsb;
bool lower_uadd_carry;
@@ -3526,6 +3766,14 @@ typedef struct nir_shader_compiler_options {
bool lower_ftrunc;
+ /** Lowers fround_even to ffract+feq+csel.
+ *
+ * Not correct in that it doesn't correctly handle the "_even" part of the
+ * rounding, but good enough for DX9 array indexing handling on DX9-class
+ * hardware.
+ */
+ bool lower_fround_even;
+
bool lower_ldexp;
bool lower_pack_half_2x16;
@@ -3587,8 +3835,14 @@ typedef struct nir_shader_compiler_options {
*/
bool optimize_sample_mask_in;
- bool lower_cs_local_index_from_id;
- bool lower_cs_local_id_from_index;
+ /**
+ * Optimize boolean reductions of quad broadcasts. This should only be enabled if
+ * nir_intrinsic_reduce supports INCLUDE_HELPERS.
+ */
+ bool optimize_quad_vote_to_reduce;
+
+ bool lower_cs_local_index_to_id;
+ bool lower_cs_local_id_to_index;
/* Prevents lowering global_invocation_id to be in terms of workgroup_id */
bool has_cs_global_id;
@@ -3608,7 +3862,7 @@ typedef struct nir_shader_compiler_options {
* If this flag is set, the lowering will be applied to all bit-sizes of
* these instructions.
*
- * \sa ::lower_hadd64
+ * :c:member:`lower_hadd64`
*/
bool lower_hadd;
@@ -3619,28 +3873,25 @@ typedef struct nir_shader_compiler_options {
* If this flag is set, the lowering will be applied to only 64-bit
* versions of these instructions.
*
- * \sa ::lower_hadd
+ * :c:member:`lower_hadd`
*/
bool lower_hadd64;
/**
- * Set if nir_op_uadd_sat and nir_op_usub_sat should be lowered to simple
- * arithmetic.
+ * Set if nir_op_uadd_sat should be lowered to simple arithmetic.
*
* If this flag is set, the lowering will be applied to all bit-sizes of
* these instructions.
- *
- * \sa ::lower_usub_sat64
*/
bool lower_uadd_sat;
/**
- * Set if only 64-bit nir_op_usub_sat should be lowered to simple
- * arithmetic.
+ * Set if nir_op_usub_sat should be lowered to simple arithmetic.
*
- * \sa ::lower_add_sat
+ * If this flag is set, the lowering will be applied to all bit-sizes of
+ * these instructions.
*/
- bool lower_usub_sat64;
+ bool lower_usub_sat;
/**
* Set if nir_op_iadd_sat and nir_op_isub_sat should be lowered to simple
@@ -3652,15 +3903,26 @@ typedef struct nir_shader_compiler_options {
bool lower_iadd_sat;
/**
+ * Set if imul_32x16 and umul_32x16 should be lowered to simple
+ * arithmetic.
+ */
+ bool lower_mul_32x16;
+
+ /**
* Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
* for IO purposes and would prefer loads/stores be vectorized.
*/
bool vectorize_io;
+ bool vectorize_tess_levels;
bool lower_to_scalar;
nir_instr_filter_cb lower_to_scalar_filter;
/**
- * Whether nir_opt_vectorize should only create 16-bit 2D vectors.
+ * Disables potentially harmful algebraic transformations for architectures
+ * with SIMD-within-a-register semantics.
+ *
+ * Note, to actually vectorize 16bit instructions, use nir_opt_vectorize()
+ * with a suitable callback function.
*/
bool vectorize_vec2_16bit;
@@ -3678,7 +3940,6 @@ typedef struct nir_shader_compiler_options {
*/
bool use_interpolated_input_intrinsics;
-
/**
* Whether nir_lower_io() will lower interpolateAt functions to
* load_interpolated_input intrinsics.
@@ -3691,8 +3952,13 @@ typedef struct nir_shader_compiler_options {
/* Lowers when 32x32->64 bit multiplication is not supported */
bool lower_mul_2x32_64;
- /* Lowers when rotate instruction is not supported */
- bool lower_rotate;
+ /* Indicates that urol and uror are supported */
+ bool has_rotate8;
+ bool has_rotate16;
+ bool has_rotate32;
+
+ /** Backend supports shfr */
+ bool has_shfr32;
/** Backend supports ternary addition */
bool has_iadd3;
@@ -3709,6 +3975,9 @@ typedef struct nir_shader_compiler_options {
* to imul with masked inputs */
bool has_umul24;
+ /** Backend supports 32-bit imad */
+ bool has_imad32;
+
/** Backend supports umad24, if not set umad24 will automatically be lowered
* to imul with masked inputs and iadd */
bool has_umad24;
@@ -3727,23 +3996,66 @@ typedef struct nir_shader_compiler_options {
/** Backend supports pack_32_4x8 or pack_32_4x8_split. */
bool has_pack_32_4x8;
- /** Backend supports txs, if not nir_lower_tex(..) uses txs-free variants
- * for rect texture lowering. */
- bool has_txs;
+ /** Backend supports nir_load_texture_scale and prefers it over txs for nir
+ * lowerings. */
+ bool has_texture_scaling;
- /** Backend supports sdot_4x8 and udot_4x8 opcodes. */
- bool has_dot_4x8;
+ /** Backend supports sdot_4x8_iadd. */
+ bool has_sdot_4x8;
- /** Backend supports sudot_4x8 opcodes. */
+ /** Backend supports udot_4x8_uadd. */
+ bool has_udot_4x8;
+
+ /** Backend supports sudot_4x8_iadd. */
bool has_sudot_4x8;
+ /** Backend supports sdot_4x8_iadd_sat. */
+ bool has_sdot_4x8_sat;
+
+ /** Backend supports udot_4x8_uadd_sat. */
+ bool has_udot_4x8_sat;
+
+ /** Backend supports sudot_4x8_iadd_sat. */
+ bool has_sudot_4x8_sat;
+
/** Backend supports sdot_2x16 and udot_2x16 opcodes. */
bool has_dot_2x16;
- /* Whether to generate only scoped_barrier intrinsics instead of the set of
- * memory and control barrier intrinsics based on GLSL.
+ /** Backend supports fmulz (and ffmaz if lower_ffma32=false) */
+ bool has_fmulz;
+
+ /**
+ * Backend supports fmulz (and ffmaz if lower_ffma32=false) but only if
+ * FLOAT_CONTROLS_DENORM_PRESERVE_FP32 is not set
*/
- bool use_scoped_barrier;
+ bool has_fmulz_no_denorms;
+
+ /** Backend supports 32bit ufind_msb_rev and ifind_msb_rev. */
+ bool has_find_msb_rev;
+
+ /** Backend supports pack_half_2x16_rtz_split. */
+ bool has_pack_half_2x16_rtz;
+
+ /** Backend supports bitz/bitnz. */
+ bool has_bit_test;
+
+ /** Backend supports ubfe/ibfe. */
+ bool has_bfe;
+
+ /** Backend supports bfm. */
+ bool has_bfm;
+
+ /** Backend supports bfi. */
+ bool has_bfi;
+
+ /** Backend supports bitfield_select. */
+ bool has_bitfield_select;
+
+ /** Backend supports uclz. */
+ bool has_uclz;
+
+ /** Backend support msad_u4x8. */
+ bool has_msad;
/**
* Is this the Intel vec4 backend?
@@ -3769,6 +4081,7 @@ typedef struct nir_shader_compiler_options {
unsigned max_unroll_iterations;
unsigned max_unroll_iterations_aggressive;
+ unsigned max_unroll_iterations_fp64;
bool lower_uniforms_to_ubo;
@@ -3777,25 +4090,111 @@ typedef struct nir_shader_compiler_options {
* vectorized IO can pack more varyings when linking. */
bool linker_ignore_precision;
+ /* Specifies if indirect sampler array access will trigger forced loop
+ * unrolling.
+ */
+ bool force_indirect_unrolling_sampler;
+
+ /* Some older drivers don't support GLSL versions with the concept of flat
+ * varyings and also don't support integers. This setting helps us avoid
+ * marking varyings as flat and potentially having them changed to ints via
+ * varying packing.
+ */
+ bool no_integers;
+
/**
* Specifies which type of indirectly accessed variables should force
* loop unrolling.
*/
nir_variable_mode force_indirect_unrolling;
+ bool driver_functions;
+
nir_lower_int64_options lower_int64_options;
nir_lower_doubles_options lower_doubles_options;
nir_divergence_options divergence_analysis_options;
/**
- * Support pack varyings with different interpolation location
- * (center, centroid, sample) and mode (flat, noperspective, smooth)
- * into same slot.
+ * The masks of shader stages that support indirect indexing with
+ * load_input and store_output intrinsics. It's used by
+ * nir_lower_io_passes.
*/
- nir_pack_varying_options pack_varying_options;
+ uint8_t support_indirect_inputs;
+ uint8_t support_indirect_outputs;
+
+ /**
+ * Remove varying loaded from uniform, let fragment shader load the
+ * uniform directly. GPU passing varying by memory can benifit from it
+ * for sure; but GPU passing varying by on chip resource may not.
+ * Because it saves on chip resource but may increase memory pressure when
+ * fragment task is far more than vertex one, so better left it disabled.
+ */
+ bool lower_varying_from_uniform;
+
+ /** store the variable offset into the instrinsic range_base instead
+ * of adding it to the image index.
+ */
+ bool lower_image_offset_to_range_base;
+
+ /** store the variable offset into the instrinsic range_base instead
+ * of adding it to the atomic source
+ */
+ bool lower_atomic_offset_to_range_base;
+
+ /** Don't convert medium-precision casts (e.g. f2fmp) into concrete
+ * type casts (e.g. f2f16).
+ */
+ bool preserve_mediump;
+
+ /** lowers fquantize2f16 to alu ops. */
+ bool lower_fquantize2f16;
+
+ /** Lower f2f16 to f2f16_rtz when execution mode is not rtne. */
+ bool force_f2f16_rtz;
+
+ /** Lower VARYING_SLOT_LAYER in FS to SYSTEM_VALUE_LAYER_ID. */
+ bool lower_layer_fs_input_to_sysval;
+
+ /** clip/cull distance and tess level arrays use compact semantics */
+ bool compact_arrays;
+
+ /** Options determining lowering and behavior of inputs and outputs. */
+ nir_io_options io_options;
+
+ /** Driver callback where drivers can define how to lower mediump.
+ * Used by nir_lower_io_passes.
+ */
+ void (*lower_mediump_io)(struct nir_shader *nir);
+
+ /**
+ * Return the maximum cost of an expression that's written to a shader
+ * output that can be moved into the next shader to remove that output.
+ *
+ * Currently only uniform expressions are moved. A uniform expression is
+ * any ALU expression sourcing only constants, uniforms, and UBO loads.
+ *
+ * Set to NULL or return 0 if you only want to propagate constants from
+ * outputs to inputs.
+ *
+ * Drivers can set the maximum cost based on the types of consecutive
+ * shaders or shader SHA1s.
+ *
+ * Drivers should also set "varying_estimate_instr_cost".
+ */
+ unsigned (*varying_expression_max_cost)(struct nir_shader *consumer,
+ struct nir_shader *producer);
+
+ /**
+ * Return the cost of an instruction that could be moved into the next
+ * shader. If the cost of all instructions in an expression is <=
+ * varying_expression_max_cost(), the instruction is moved.
+ */
+ unsigned (*varying_estimate_instr_cost)(struct nir_instr *instr);
} nir_shader_compiler_options;
typedef struct nir_shader {
+ gc_ctx *gctx;
+
/** list of uniforms (nir_variable) */
struct exec_list variables;
@@ -3809,9 +4208,8 @@ typedef struct nir_shader {
/** Various bits of compile-time information about a given shader */
struct shader_info info;
- struct exec_list functions; /** < list of nir_function */
-
- struct list_head gc_list; /** < list of all nir_instrs allocated on the shader but not yet freed. */
+ /** list of nir_function */
+ struct exec_list functions;
/**
* The size of the variable space for load_input_*, load_uniform_*, etc.
@@ -3820,6 +4218,9 @@ typedef struct nir_shader {
*/
unsigned num_inputs, num_uniforms, num_outputs;
+ /** Size in bytes of required implicitly bound global memory */
+ unsigned global_mem_size;
+
/** Size in bytes of required scratch space */
unsigned scratch_size;
@@ -3835,15 +4236,67 @@ typedef struct nir_shader {
/** Size of the constant data associated with the shader, in bytes */
unsigned constant_data_size;
+ struct nir_xfb_info *xfb_info;
+
unsigned printf_info_count;
- nir_printf_info *printf_info;
+ u_printf_info *printf_info;
} nir_shader;
#define nir_foreach_function(func, shader) \
foreach_list_typed(nir_function, func, node, &(shader)->functions)
+#define nir_foreach_function_safe(func, shader) \
+ foreach_list_typed_safe(nir_function, func, node, &(shader)->functions)
+
+static inline nir_function *
+nir_foreach_function_with_impl_first(const nir_shader *shader)
+{
+ foreach_list_typed(nir_function, func, node, &shader->functions) {
+ if (func->impl != NULL)
+ return func;
+ }
+
+ return NULL;
+}
+
+static inline nir_function_impl *
+nir_foreach_function_with_impl_next(nir_function **it)
+{
+ foreach_list_typed_from(nir_function, func, node, _, (*it)->node.next) {
+ if (func->impl != NULL) {
+ *it = func;
+ return func->impl;
+ }
+ }
+
+ return NULL;
+}
+
+#define nir_foreach_function_with_impl(it, impl_it, shader) \
+ for (nir_function *it = nir_foreach_function_with_impl_first(shader); \
+ it != NULL; \
+ it = NULL) \
+ \
+ for (nir_function_impl *impl_it = it->impl; \
+ impl_it != NULL; \
+ impl_it = nir_foreach_function_with_impl_next(&it))
+
+/* Equivalent to
+ *
+ * nir_foreach_function(func, shader) {
+ * if (func->impl != NULL) {
+ * ...
+ * }
+ * }
+ *
+ * Carefully written to ensure break/continue work in the user code.
+ */
+
+#define nir_foreach_function_impl(it, shader) \
+ nir_foreach_function_with_impl(_func_##it, it, shader)
+
static inline nir_function_impl *
-nir_shader_get_entrypoint(nir_shader *shader)
+nir_shader_get_entrypoint(const nir_shader *shader)
{
nir_function *func = NULL;
@@ -3865,35 +4318,29 @@ nir_shader_get_entrypoint(nir_shader *shader)
return func->impl;
}
-typedef struct nir_liveness_bounds {
- uint32_t start;
- uint32_t end;
-} nir_liveness_bounds;
+static inline nir_function *
+nir_shader_get_function_for_name(const nir_shader *shader, const char *name)
+{
+ nir_foreach_function(func, shader) {
+ if (strcmp(func->name, name) == 0)
+ return func;
+ }
-typedef struct nir_instr_liveness {
- /**
- * nir_instr->index for the start and end of a single live interval for SSA
- * defs. ssa values last used by a nir_if condition will have an interval
- * ending at the first instruction after the last one before the if
- * condition.
- *
- * Indexed by def->index (impl->ssa_alloc elements).
- */
- struct nir_liveness_bounds *defs;
-} nir_instr_liveness;
+ return NULL;
+}
-nir_instr_liveness *
-nir_live_ssa_defs_per_instr(nir_function_impl *impl);
+/*
+ * After all functions are forcibly inlined, these passes remove redundant
+ * functions from a shader and library respectively.
+ */
+void nir_remove_non_entrypoints(nir_shader *shader);
+void nir_remove_non_exported(nir_shader *shader);
nir_shader *nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
const nir_shader_compiler_options *options,
shader_info *si);
-nir_register *nir_local_reg_create(nir_function_impl *impl);
-
-void nir_reg_remove(nir_register *reg);
-
/** Adds a variable to the appropriate list in nir_shader */
void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
@@ -3914,6 +4361,26 @@ nir_variable *nir_local_variable_create(nir_function_impl *impl,
const struct glsl_type *type,
const char *name);
+/** Creates a uniform builtin state variable. */
+nir_variable *
+nir_state_variable_create(nir_shader *shader,
+ const struct glsl_type *type,
+ const char *name,
+ const gl_state_index16 tokens[STATE_LENGTH]);
+
+/* Gets the variable for the given mode and location, creating it (with the given
+ * type) if necessary.
+ */
+nir_variable *
+nir_get_variable_with_location(nir_shader *shader, nir_variable_mode mode, int location,
+ const struct glsl_type *type);
+
+/* Creates a variable for the given mode and location.
+ */
+nir_variable *
+nir_create_variable_with_location(nir_shader *shader, nir_variable_mode mode, int location,
+ const struct glsl_type *type);
+
nir_variable *nir_find_variable_with_location(nir_shader *shader,
nir_variable_mode mode,
unsigned location);
@@ -3922,6 +4389,12 @@ nir_variable *nir_find_variable_with_driver_location(nir_shader *shader,
nir_variable_mode mode,
unsigned location);
+nir_variable *nir_find_state_variable(nir_shader *s,
+ gl_state_index16 tokens[STATE_LENGTH]);
+
+nir_variable *nir_find_sampler_variable_with_tex_index(nir_shader *shader,
+ unsigned texture_index);
+
void nir_sort_variables_with_modes(nir_shader *shader,
int (*compar)(const nir_variable *,
const nir_variable *),
@@ -3930,6 +4403,13 @@ void nir_sort_variables_with_modes(nir_shader *shader,
/** creates a function and adds it to the shader's list of functions */
nir_function *nir_function_create(nir_shader *shader, const char *name);
+static inline void
+nir_function_set_impl(nir_function *func, nir_function_impl *impl)
+{
+ func->impl = impl;
+ impl->function = func;
+}
+
nir_function_impl *nir_function_impl_create(nir_function *func);
/** creates a function_impl that isn't tied to any particular function */
nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
@@ -3969,13 +4449,14 @@ nir_call_instr *nir_call_instr_create(nir_shader *shader,
nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
-nir_phi_src *nir_phi_instr_add_src(nir_phi_instr *instr, nir_block *pred, nir_src src);
+nir_phi_src *nir_phi_instr_add_src(nir_phi_instr *instr,
+ nir_block *pred, nir_def *src);
nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
-nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
- unsigned num_components,
- unsigned bit_size);
+nir_undef_instr *nir_undef_instr_create(nir_shader *shader,
+ unsigned num_components,
+ unsigned bit_size);
nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
@@ -4075,16 +4556,15 @@ nir_after_block_before_jump(nir_block *block)
}
static inline nir_cursor
-nir_before_src(nir_src *src, bool is_if_condition)
+nir_before_src(nir_src *src)
{
- if (is_if_condition) {
+ if (nir_src_is_if(src)) {
nir_block *prev_block =
- nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node));
- assert(!nir_block_ends_in_jump(prev_block));
+ nir_cf_node_as_block(nir_cf_node_prev(&nir_src_parent_if(src)->cf_node));
return nir_after_block(prev_block);
- } else if (src->parent_instr->type == nir_instr_type_phi) {
+ } else if (nir_src_parent_instr(src)->type == nir_instr_type_phi) {
#ifndef NDEBUG
- nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr);
+ nir_phi_instr *cond_phi = nir_instr_as_phi(nir_src_parent_instr(src));
bool found = false;
nir_foreach_phi_src(phi_src, cond_phi) {
if (phi_src->src.ssa == src->ssa) {
@@ -4094,13 +4574,13 @@ nir_before_src(nir_src *src, bool is_if_condition)
}
assert(found);
#endif
- /* The LIST_ENTRY macro is a generic container-of macro, it just happens
+ /* The list_entry() macro is a generic container-of macro, it just happens
* to have a more specific name.
*/
- nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src);
+ nir_phi_src *phi_src = list_entry(src, nir_phi_src, src);
return nir_after_block_before_jump(phi_src->pred);
} else {
- return nir_before_instr(src->parent_instr);
+ return nir_before_instr(nir_src_parent_instr(src));
}
}
@@ -4168,6 +4648,18 @@ nir_after_cf_list(struct exec_list *cf_list)
return nir_after_cf_node(last_node);
}
+static inline nir_cursor
+nir_before_impl(nir_function_impl *impl)
+{
+ return nir_before_cf_list(&impl->body);
+}
+
+static inline nir_cursor
+nir_after_impl(nir_function_impl *impl)
+{
+ return nir_after_cf_list(&impl->body);
+}
+
/**
* Insert a NIR instruction at the given cursor.
*
@@ -4247,14 +4739,10 @@ nir_cursor nir_instr_free_and_dce(nir_instr *instr);
/** @} */
-nir_ssa_def *nir_instr_ssa_def(nir_instr *instr);
+nir_def *nir_instr_def(nir_instr *instr);
-typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
-typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
+typedef bool (*nir_foreach_def_cb)(nir_def *def, void *state);
typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
-bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
- void *state);
-static inline bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
static inline bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
bool nir_foreach_phi_src_leaving_block(nir_block *instr,
nir_foreach_src_cb cb,
@@ -4262,96 +4750,116 @@ bool nir_foreach_phi_src_leaving_block(nir_block *instr,
nir_const_value *nir_src_as_const_value(nir_src src);
-#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
-static inline c_type * \
-nir_src_as_ ## name (nir_src src) \
-{ \
- return src.is_ssa && src.ssa->parent_instr->type == type_enum \
- ? cast_macro(src.ssa->parent_instr) : NULL; \
-}
+#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
+ static inline c_type * \
+ nir_src_as_##name(nir_src src) \
+ { \
+ return src.ssa->parent_instr->type == type_enum \
+ ? cast_macro(src.ssa->parent_instr) \
+ : NULL; \
+ }
NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
nir_instr_type_intrinsic, nir_instr_as_intrinsic)
NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
-bool nir_src_is_dynamically_uniform(nir_src src);
+bool nir_src_is_always_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);
static inline void
-nir_instr_rewrite_src_ssa(ASSERTED nir_instr *instr,
- nir_src *src, nir_ssa_def *new_ssa)
+nir_src_rewrite(nir_src *src, nir_def *new_ssa)
{
- assert(src->parent_instr == instr);
- assert(src->is_ssa && src->ssa);
+ assert(src->ssa);
+ assert(nir_src_is_if(src) ? (nir_src_parent_if(src) != NULL) : (nir_src_parent_instr(src) != NULL));
list_del(&src->use_link);
src->ssa = new_ssa;
list_addtail(&src->use_link, &new_ssa->uses);
}
-void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
-void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
+/** Initialize a nir_src
+ *
+ * This is almost never the helper you want to use. This helper assumes that
+ * the source is uninitialized garbage and blasts over it without doing any
+ * tear-down the existing source, including removing it from uses lists.
+ * Using this helper on a source that currently exists in any uses list will
+ * result in linked list corruption. It also assumes that the instruction is
+ * currently live in the IR and adds the source to the uses list for the given
+ * nir_def as part of setup.
+ *
+ * This is pretty much only useful for adding sources to extant instructions
+ * or manipulating parallel copy instructions as part of out-of-SSA.
+ *
+ * When in doubt, use nir_src_rewrite() instead.
+ */
+void nir_instr_init_src(nir_instr *instr, nir_src *src, nir_def *def);
-static inline void
-nir_if_rewrite_condition_ssa(ASSERTED nir_if *if_stmt,
- nir_src *src, nir_ssa_def *new_ssa)
-{
- assert(src->parent_if == if_stmt);
- assert(src->is_ssa && src->ssa);
- list_del(&src->use_link);
- src->ssa = new_ssa;
- list_addtail(&src->use_link, &new_ssa->if_uses);
-}
+/** Clear a nir_src
+ *
+ * This helper clears a nir_src by removing it from any uses lists and
+ * resetting its contents to NIR_SRC_INIT. This is typically used as a
+ * precursor to removing the source from the instruction by adjusting a
+ * num_srcs parameter somewhere or overwriting it with nir_instr_move_src().
+ */
+void nir_instr_clear_src(nir_instr *instr, nir_src *src);
-void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
-void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
- nir_dest new_dest);
+void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
-void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
- unsigned num_components, unsigned bit_size,
- const char *name);
-void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
- unsigned num_components, unsigned bit_size);
+void nir_def_init(nir_instr *instr, nir_def *def,
+ unsigned num_components, unsigned bit_size);
static inline void
-nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
- const struct glsl_type *type,
- const char *name)
+nir_def_init_for_type(nir_instr *instr, nir_def *def,
+ const struct glsl_type *type)
{
assert(glsl_type_is_vector_or_scalar(type));
- nir_ssa_dest_init(instr, dest, glsl_get_components(type),
- glsl_get_bit_size(type), name);
+ nir_def_init(instr, def, glsl_get_components(type),
+ glsl_get_bit_size(type));
}
-void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_ssa_def *new_ssa);
-void nir_ssa_def_rewrite_uses_src(nir_ssa_def *def, nir_src new_src);
-void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_ssa_def *new_ssa,
- nir_instr *after_me);
+void nir_def_rewrite_uses(nir_def *def, nir_def *new_ssa);
+void nir_def_rewrite_uses_src(nir_def *def, nir_src new_src);
+void nir_def_rewrite_uses_after(nir_def *def, nir_def *new_ssa,
+ nir_instr *after_me);
-nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
+nir_component_mask_t nir_src_components_read(const nir_src *src);
+nir_component_mask_t nir_def_components_read(const nir_def *def);
+bool nir_def_all_uses_are_fsat(const nir_def *def);
static inline bool
-nir_ssa_def_is_unused(nir_ssa_def *ssa)
+nir_def_is_unused(nir_def *ssa)
{
- return list_is_empty(&ssa->uses) && list_is_empty(&ssa->if_uses);
+ return list_is_empty(&ssa->uses);
}
+/** Sorts unstructured blocks
+ *
+ * NIR requires that unstructured blocks be sorted in reverse post
+ * depth-first-search order. This is the standard ordering used in the
+ * compiler literature which guarantees dominance. In particular, reverse
+ * post-DFS order guarantees that dominators occur in the list before the
+ * blocks they dominate.
+ *
+ * NOTE: This function also implicitly deletes any unreachable blocks.
+ */
+void nir_sort_unstructured_blocks(nir_function_impl *impl);
-/** Returns the next block, disregarding structure
+/** Returns the next block
*
- * The ordering is deterministic but has no guarantees beyond that. In
- * particular, it is not guaranteed to be dominance-preserving.
+ * For structured control-flow, this follows the same order as
+ * nir_block_cf_tree_next(). For unstructured control-flow the blocks are in
+ * reverse post-DFS order. (See nir_sort_unstructured_blocks() above.)
*/
nir_block *nir_block_unstructured_next(nir_block *block);
nir_block *nir_unstructured_start_block(nir_function_impl *impl);
-#define nir_foreach_block_unstructured(block, impl) \
+#define nir_foreach_block_unstructured(block, impl) \
for (nir_block *block = nir_unstructured_start_block(impl); block != NULL; \
block = nir_block_unstructured_next(block))
-#define nir_foreach_block_unstructured_safe(block, impl) \
+#define nir_foreach_block_unstructured_safe(block, impl) \
for (nir_block *block = nir_unstructured_start_block(impl), \
- *next = nir_block_unstructured_next(block); \
- block != NULL; \
+ *next = nir_block_unstructured_next(block); \
+ block != NULL; \
block = next, next = nir_block_unstructured_next(block))
/*
@@ -4377,33 +4885,42 @@ nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
+/* Gets the block before a CF node in source-code order */
+
+nir_block *nir_cf_node_cf_tree_prev(nir_cf_node *node);
+
/* Macros for loops that visit blocks in source-code order */
-#define nir_foreach_block(block, impl) \
+#define nir_foreach_block(block, impl) \
for (nir_block *block = nir_start_block(impl); block != NULL; \
block = nir_block_cf_tree_next(block))
-#define nir_foreach_block_safe(block, impl) \
- for (nir_block *block = nir_start_block(impl), \
- *next = nir_block_cf_tree_next(block); \
- block != NULL; \
+#define nir_foreach_block_safe(block, impl) \
+ for (nir_block *block = nir_start_block(impl), \
+ *next = nir_block_cf_tree_next(block); \
+ block != NULL; \
block = next, next = nir_block_cf_tree_next(block))
-#define nir_foreach_block_reverse(block, impl) \
+#define nir_foreach_block_reverse(block, impl) \
for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
block = nir_block_cf_tree_prev(block))
-#define nir_foreach_block_reverse_safe(block, impl) \
- for (nir_block *block = nir_impl_last_block(impl), \
- *prev = nir_block_cf_tree_prev(block); \
- block != NULL; \
+#define nir_foreach_block_reverse_safe(block, impl) \
+ for (nir_block *block = nir_impl_last_block(impl), \
+ *prev = nir_block_cf_tree_prev(block); \
+ block != NULL; \
block = prev, prev = nir_block_cf_tree_prev(block))
-#define nir_foreach_block_in_cf_node(block, node) \
+#define nir_foreach_block_in_cf_node(block, node) \
for (nir_block *block = nir_cf_node_cf_tree_first(node); \
- block != nir_cf_node_cf_tree_next(node); \
+ block != nir_cf_node_cf_tree_next(node); \
block = nir_block_cf_tree_next(block))
+#define nir_foreach_block_in_cf_node_reverse(block, node) \
+ for (nir_block *block = nir_cf_node_cf_tree_last(node); \
+ block != nir_cf_node_cf_tree_prev(node); \
+ block = nir_block_cf_tree_prev(block))
+
/* If the following CF node is an if, this function returns that if.
* Otherwise, it returns NULL.
*/
@@ -4413,12 +4930,13 @@ nir_loop *nir_block_get_following_loop(nir_block *block);
nir_block **nir_block_get_predecessors_sorted(const nir_block *block, void *mem_ctx);
-void nir_index_local_regs(nir_function_impl *impl);
void nir_index_ssa_defs(nir_function_impl *impl);
unsigned nir_index_instrs(nir_function_impl *impl);
void nir_index_blocks(nir_function_impl *impl);
+void nir_shader_clear_pass_flags(nir_shader *shader);
+
unsigned nir_shader_index_vars(nir_shader *shader, nir_variable_mode modes);
unsigned nir_function_impl_index_vars(nir_function_impl *impl);
@@ -4427,21 +4945,27 @@ void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table
void nir_print_instr(const nir_instr *instr, FILE *fp);
void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
void nir_log_shader_annotated_tagged(enum mesa_log_level level, const char *tag, nir_shader *shader, struct hash_table *annotations);
-#define nir_log_shadere(s) nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), NULL)
-#define nir_log_shaderw(s) nir_log_shader_annotated_tagged(MESA_LOG_WARN, (MESA_LOG_TAG), (s), NULL)
-#define nir_log_shaderi(s) nir_log_shader_annotated_tagged(MESA_LOG_INFO, (MESA_LOG_TAG), (s), NULL)
+#define nir_log_shadere(s) nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), NULL)
+#define nir_log_shaderw(s) nir_log_shader_annotated_tagged(MESA_LOG_WARN, (MESA_LOG_TAG), (s), NULL)
+#define nir_log_shaderi(s) nir_log_shader_annotated_tagged(MESA_LOG_INFO, (MESA_LOG_TAG), (s), NULL)
#define nir_log_shader_annotated(s, annotations) nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), annotations)
char *nir_shader_as_str(nir_shader *nir, void *mem_ctx);
char *nir_shader_as_str_annotated(nir_shader *nir, struct hash_table *annotations, void *mem_ctx);
+char *nir_instr_as_str(const nir_instr *instr, void *mem_ctx);
/** Shallow clone of a single instruction. */
nir_instr *nir_instr_clone(nir_shader *s, const nir_instr *orig);
+/** Clone a single instruction, including a remap table to rewrite sources. */
+nir_instr *nir_instr_clone_deep(nir_shader *s, const nir_instr *orig,
+ struct hash_table *remap_table);
+
/** Shallow clone of a single ALU instruction. */
nir_alu_instr *nir_alu_instr_clone(nir_shader *s, const nir_alu_instr *orig);
nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
+nir_function *nir_function_clone(nir_shader *ns, const nir_function *fxn);
nir_function_impl *nir_function_impl_clone(nir_shader *shader,
const nir_function_impl *fi);
nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
@@ -4475,84 +4999,133 @@ should_skip_nir(const char *name)
}
static inline bool
-should_clone_nir(void)
+should_print_nir(nir_shader *shader)
{
- static int should_clone = -1;
- if (should_clone < 0)
- should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
+ if ((shader->info.internal && !NIR_DEBUG(PRINT_INTERNAL)) ||
+ shader->info.stage < 0 ||
+ shader->info.stage > MESA_SHADER_KERNEL)
+ return false;
- return should_clone;
+ return unlikely(nir_debug_print_shader[shader->info.stage]);
+}
+#else
+static inline void
+nir_validate_shader(nir_shader *shader, const char *when)
+{
+ (void)shader;
+ (void)when;
+}
+static inline void
+nir_validate_ssa_dominance(nir_shader *shader, const char *when)
+{
+ (void)shader;
+ (void)when;
+}
+static inline void
+nir_metadata_set_validation_flag(nir_shader *shader)
+{
+ (void)shader;
+}
+static inline void
+nir_metadata_check_validation_flag(nir_shader *shader)
+{
+ (void)shader;
}
-
static inline bool
-should_serialize_deserialize_nir(void)
+should_skip_nir(UNUSED const char *pass_name)
{
- static int test_serialize = -1;
- if (test_serialize < 0)
- test_serialize = env_var_as_boolean("NIR_TEST_SERIALIZE", false);
-
- return test_serialize;
+ return false;
}
-
static inline bool
-should_print_nir(nir_shader *shader)
+should_print_nir(UNUSED nir_shader *shader)
{
- static int should_print = -1;
- if (should_print < 0)
- should_print = env_var_as_unsigned("NIR_PRINT", 0);
-
- if (should_print == 1)
- return !shader->info.internal;
-
- return should_print;
+ return false;
}
-#else
-static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
-static inline void nir_validate_ssa_dominance(nir_shader *shader, const char *when) { (void) shader; (void)when; }
-static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
-static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
-static inline bool should_skip_nir(UNUSED const char *pass_name) { return false; }
-static inline bool should_clone_nir(void) { return false; }
-static inline bool should_serialize_deserialize_nir(void) { return false; }
-static inline bool should_print_nir(nir_shader *shader) { return false; }
#endif /* NDEBUG */
-#define _PASS(pass, nir, do_pass) do { \
- if (should_skip_nir(#pass)) { \
- printf("skipping %s\n", #pass); \
- break; \
- } \
- do_pass \
- if (should_clone_nir()) { \
- nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
- nir_shader_replace(nir, clone); \
- } \
- if (should_serialize_deserialize_nir()) { \
- nir_shader_serialize_deserialize(nir); \
- } \
+#define _PASS(pass, nir, do_pass) \
+ do { \
+ if (should_skip_nir(#pass)) { \
+ printf("skipping %s\n", #pass); \
+ break; \
+ } \
+ do_pass if (NIR_DEBUG(CLONE)) \
+ { \
+ nir_shader *_clone = nir_shader_clone(ralloc_parent(nir), nir);\
+ nir_shader_replace(nir, _clone); \
+ } \
+ if (NIR_DEBUG(SERIALIZE)) { \
+ nir_shader_serialize_deserialize(nir); \
+ } \
+ } while (0)
+
+#define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, { \
+ nir_metadata_set_validation_flag(nir); \
+ if (should_print_nir(nir)) \
+ printf("%s\n", #pass); \
+ if (pass(nir, ##__VA_ARGS__)) { \
+ nir_validate_shader(nir, "after " #pass " in " __FILE__); \
+ UNUSED bool _; \
+ progress = true; \
+ if (should_print_nir(nir)) \
+ nir_print_shader(nir, stdout); \
+ nir_metadata_check_validation_flag(nir); \
+ } \
+})
+
+#define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, { \
+ if (should_print_nir(nir)) \
+ printf("%s\n", #pass); \
+ pass(nir, ##__VA_ARGS__); \
+ nir_validate_shader(nir, "after " #pass " in " __FILE__); \
+ if (should_print_nir(nir)) \
+ nir_print_shader(nir, stdout); \
+})
+
+#define _NIR_LOOP_PASS(progress, idempotent, skip, nir, pass, ...) \
+do { \
+ bool nir_loop_pass_progress = false; \
+ if (!_mesa_set_search(skip, (void (*)())&pass)) \
+ NIR_PASS(nir_loop_pass_progress, nir, pass, ##__VA_ARGS__); \
+ if (nir_loop_pass_progress) \
+ _mesa_set_clear(skip, NULL); \
+ if (idempotent || !nir_loop_pass_progress) \
+ _mesa_set_add(skip, (void (*)())&pass); \
+ UNUSED bool _ = false; \
+ progress |= nir_loop_pass_progress; \
} while (0)
-#define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
- nir_metadata_set_validation_flag(nir); \
- if (should_print_nir(nir)) \
- printf("%s\n", #pass); \
- if (pass(nir, ##__VA_ARGS__)) { \
- nir_validate_shader(nir, "after " #pass); \
- progress = true; \
- if (should_print_nir(nir)) \
- nir_print_shader(nir, stdout); \
- nir_metadata_check_validation_flag(nir); \
- } \
-)
-
-#define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
- if (should_print_nir(nir)) \
- printf("%s\n", #pass); \
- pass(nir, ##__VA_ARGS__); \
- nir_validate_shader(nir, "after " #pass); \
- if (should_print_nir(nir)) \
- nir_print_shader(nir, stdout); \
-)
+/* Helper to skip a pass if no different passes have made progress since it was
+ * previously run. Note that two passes are considered the same if they have
+ * the same function pointer, even if they used different options.
+ *
+ * The usage of this is mostly identical to NIR_PASS. "skip" is a "struct set *"
+ * (created by _mesa_pointer_set_create) which the macro uses to keep track of
+ * already run passes.
+ *
+ * Example:
+ * bool progress = true;
+ * struct set *skip = _mesa_pointer_set_create(NULL);
+ * while (progress) {
+ * progress = false;
+ * NIR_LOOP_PASS(progress, skip, nir, pass1);
+ * NIR_LOOP_PASS_NOT_IDEMPOTENT(progress, skip, nir, nir_opt_algebraic);
+ * NIR_LOOP_PASS(progress, skip, nir, pass2);
+ * ...
+ * }
+ * _mesa_set_destroy(skip, NULL);
+ *
+ * You shouldn't mix usage of this with the NIR_PASS set of helpers, without
+ * using a new "skip" in-between.
+ */
+#define NIR_LOOP_PASS(progress, skip, nir, pass, ...) \
+ _NIR_LOOP_PASS(progress, true, skip, nir, pass, ##__VA_ARGS__)
+
+/* Like NIR_LOOP_PASS, but use this for passes which may make further progress
+ * when repeated.
+ */
+#define NIR_LOOP_PASS_NOT_IDEMPOTENT(progress, skip, nir, pass, ...) \
+ _NIR_LOOP_PASS(progress, false, skip, nir, pass, ##__VA_ARGS__)
#define NIR_SKIP(name) should_skip_nir(#name)
@@ -4573,17 +5146,17 @@ typedef bool (*nir_instr_writemask_filter_cb)(const nir_instr *,
* should either return NULL indicating that no lowering needs to be done or
* emit a sequence of instructions using the provided builder (whose cursor
* will already be placed after the instruction to be lowered) and return the
- * resulting nir_ssa_def.
+ * resulting nir_def.
*/
-typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
- nir_instr *, void *);
+typedef nir_def *(*nir_lower_instr_cb)(struct nir_builder *,
+ nir_instr *, void *);
/**
* Special return value for nir_lower_instr_cb when some progress occurred
* (like changing an input to the instr) that didn't result in a replacement
* SSA def being generated.
*/
-#define NIR_LOWER_INSTR_PROGRESS ((nir_ssa_def *)(uintptr_t)1)
+#define NIR_LOWER_INSTR_PROGRESS ((nir_def *)(uintptr_t)1)
/**
* Special return value for nir_lower_instr_cb when some progress occurred
@@ -4591,7 +5164,7 @@ typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
* (like a store)
*/
-#define NIR_LOWER_INSTR_PROGRESS_REPLACE ((nir_ssa_def *)(uintptr_t)2)
+#define NIR_LOWER_INSTR_PROGRESS_REPLACE ((nir_def *)(uintptr_t)2)
/** Iterate over all the instructions in a nir_function_impl and lower them
* using the provided callbacks
@@ -4641,8 +5214,17 @@ void nir_dump_cfg(nir_shader *shader, FILE *fp);
void nir_gs_count_vertices_and_primitives(const nir_shader *shader,
int *out_vtxcnt,
int *out_prmcnt,
+ int *out_decomposed_prmcnt,
unsigned num_streams);
+typedef enum {
+ nir_group_all,
+ nir_group_same_resource_only,
+} nir_load_grouping;
+
+void nir_group_loads(nir_shader *shader, nir_load_grouping grouping,
+ unsigned max_distance);
+
bool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes);
bool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes);
bool nir_split_var_copies(nir_shader *shader);
@@ -4654,14 +5236,23 @@ bool nir_lower_returns(nir_shader *shader);
void nir_inline_function_impl(struct nir_builder *b,
const nir_function_impl *impl,
- nir_ssa_def **params,
+ nir_def **params,
struct hash_table *shader_var_remap);
bool nir_inline_functions(nir_shader *shader);
+void nir_cleanup_functions(nir_shader *shader);
+bool nir_link_shader_functions(nir_shader *shader,
+ const nir_shader *link_shader);
void nir_find_inlinable_uniforms(nir_shader *shader);
void nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
const uint32_t *uniform_values,
const uint16_t *uniform_dw_offsets);
+bool nir_collect_src_uniforms(const nir_src *src, int component,
+ uint32_t *uni_offsets, uint8_t *num_offsets,
+ unsigned max_num_bo, unsigned max_offset);
+void nir_add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
+ uint32_t *uni_offsets, uint8_t *num_offsets,
+ unsigned max_num_bo, unsigned max_offset);
bool nir_propagate_invariant(nir_shader *shader, bool invariant_prim);
@@ -4674,14 +5265,15 @@ bool nir_opt_memcpy(nir_shader *shader);
bool nir_lower_memcpy(nir_shader *shader);
void nir_fixup_deref_modes(nir_shader *shader);
+void nir_fixup_deref_types(nir_shader *shader);
bool nir_lower_global_vars_to_local(nir_shader *shader);
typedef enum {
- nir_lower_direct_array_deref_of_vec_load = (1 << 0),
- nir_lower_indirect_array_deref_of_vec_load = (1 << 1),
- nir_lower_direct_array_deref_of_vec_store = (1 << 2),
- nir_lower_indirect_array_deref_of_vec_store = (1 << 3),
+ nir_lower_direct_array_deref_of_vec_load = (1 << 0),
+ nir_lower_indirect_array_deref_of_vec_load = (1 << 1),
+ nir_lower_direct_array_deref_of_vec_store = (1 << 2),
+ nir_lower_indirect_array_deref_of_vec_store = (1 << 3),
} nir_lower_array_deref_of_vec_options;
bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
@@ -4690,11 +5282,12 @@ bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes,
uint32_t max_lower_array_len);
-bool nir_lower_indirect_builtin_uniform_derefs(nir_shader *shader);
+bool nir_lower_indirect_var_derefs(nir_shader *shader,
+ const struct set *vars);
-bool nir_lower_locals_to_regs(nir_shader *shader);
+bool nir_lower_locals_to_regs(nir_shader *shader, uint8_t bool_bitsize);
-void nir_lower_io_to_temporaries(nir_shader *shader,
+bool nir_lower_io_to_temporaries(nir_shader *shader,
nir_function_impl *entrypoint,
bool outputs, bool inputs);
@@ -4707,9 +5300,9 @@ void nir_lower_clip_halfz(nir_shader *shader);
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
-void nir_gather_ssa_types(nir_function_impl *impl,
- BITSET_WORD *float_types,
- BITSET_WORD *int_types);
+void nir_gather_types(nir_function_impl *impl,
+ BITSET_WORD *float_types,
+ BITSET_WORD *int_types);
void nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
unsigned *size,
@@ -4725,12 +5318,42 @@ void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
void nir_link_varying_precision(nir_shader *producer, nir_shader *consumer);
+nir_variable *nir_clone_uniform_variable(nir_shader *nir,
+ nir_variable *uniform, bool spirv);
+nir_deref_instr *nir_clone_deref_instr(struct nir_builder *b,
+ nir_variable *var,
+ nir_deref_instr *deref);
+
+
+/* Return status from nir_opt_varyings. */
+typedef enum {
+ /* Whether the IR changed such that NIR optimizations should be run, such
+ * as due to removal of loads and stores. IO semantic changes such as
+ * compaction don't count as IR changes because they don't affect NIR
+ * optimizations.
+ */
+ nir_progress_producer = BITFIELD_BIT(0),
+ nir_progress_consumer = BITFIELD_BIT(1),
+} nir_opt_varyings_progress;
+
+nir_opt_varyings_progress
+nir_opt_varyings(nir_shader *producer, nir_shader *consumer, bool spirv,
+ unsigned max_uniform_components, unsigned max_ubos_per_stage);
+
+bool nir_slot_is_sysval_output(gl_varying_slot slot,
+ gl_shader_stage next_shader);
+bool nir_slot_is_varying(gl_varying_slot slot);
+bool nir_slot_is_sysval_output_and_varying(gl_varying_slot slot,
+ gl_shader_stage next_shader);
+bool nir_remove_varying(nir_intrinsic_instr *intr, gl_shader_stage next_shader);
+bool nir_remove_sysval_output(nir_intrinsic_instr *intr);
bool nir_lower_amul(nir_shader *shader,
int (*type_size)(const struct glsl_type *, bool));
bool nir_lower_ubo_vec4(nir_shader *shader);
+void nir_sort_variables_by_location(nir_shader *shader, nir_variable_mode mode);
void nir_assign_io_var_locations(nir_shader *shader,
nir_variable_mode mode,
unsigned *size,
@@ -4749,14 +5372,31 @@ typedef enum {
/* If set, this causes all 64-bit IO operations to be lowered on-the-fly
* to 32-bit operations. This is only valid for nir_var_shader_in/out
* modes.
+ *
+ * Note that this destroys dual-slot information i.e. whether an input
+ * occupies the low or high half of dvec4. Instead, it adds an offset of 1
+ * to the load (which is ambiguous) and expects driver locations of inputs
+ * to be final, which prevents any further optimizations.
+ *
+ * TODO: remove this in favor of nir_lower_io_lower_64bit_to_32_new.
*/
nir_lower_io_lower_64bit_to_32 = (1 << 0),
- /* If set, this forces all non-flat fragment shader inputs to be
- * interpolated as if with the "sample" qualifier. This requires
- * nir_shader_compiler_options::use_interpolated_input_intrinsics.
+ /* If set, this causes the subset of 64-bit IO operations involving floats to be lowered on-the-fly
+ * to 32-bit operations. This is only valid for nir_var_shader_in/out
+ * modes.
+ */
+ nir_lower_io_lower_64bit_float_to_32 = (1 << 1),
+
+ /* This causes all 64-bit IO operations to be lowered to 32-bit operations.
+ * This is only valid for nir_var_shader_in/out modes.
+ *
+ * Only VS inputs: Dual slot information is preserved as nir_io_semantics::
+ * high_dvec2 and gathered into shader_info::dual_slot_inputs, so that
+ * the shader can be arbitrarily optimized and the low or high half of
+ * dvec4 can be DCE'd independently without affecting the other half.
*/
- nir_lower_io_force_sample_interpolation = (1 << 1),
+ nir_lower_io_lower_64bit_to_32_new = (1 << 2),
} nir_lower_io_options;
bool nir_lower_io(nir_shader *shader,
nir_variable_mode modes,
@@ -4764,6 +5404,9 @@ bool nir_lower_io(nir_shader *shader,
nir_lower_io_options);
bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode modes);
+bool nir_lower_color_inputs(nir_shader *nir);
+void nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs);
+bool nir_io_add_intrinsic_xfb_info(nir_shader *nir);
bool
nir_lower_vars_to_explicit_types(nir_shader *shader,
@@ -4788,13 +5431,19 @@ typedef enum {
nir_address_format_64bit_global,
/**
+ * An address format which is a 64-bit global GPU address encoded as a
+ * 2x32-bit vector.
+ */
+ nir_address_format_2x32bit_global,
+
+ /**
* An address format which is a 64-bit global base address and a 32-bit
* offset.
*
- * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
- * address stored with the low bits in .x and high bits in .y, .z is
- * undefined, and .w is an offset. This is intended to match
- * 64bit_bounded_global but without the bounds checking.
+ * This is identical to 64bit_bounded_global except that bounds checking
+ * is not applied when lowering to global access. Even though the size is
+ * never used for an actual bounds check, it needs to be valid so we can
+ * lower deref_buffer_array_length properly.
*/
nir_address_format_64bit_global_32bit_offset,
@@ -4818,7 +5467,7 @@ typedef enum {
* An address format which is a 64-bit value, where the high 32 bits
* are a buffer index, and the low 32 bits are an offset.
*/
- nir_address_format_32bit_index_offset_pack64,
+ nir_address_format_32bit_index_offset_pack64,
/**
* An address format which is comprised of a vec3 where the first two
@@ -4854,7 +5503,7 @@ typedef enum {
/**
* An address format which is a simple 32-bit offset cast to 64-bit.
*/
- nir_address_format_32bit_offset_as_64bit,
+ nir_address_format_32bit_offset_as_64bit,
/**
* An address format representing a purely logical addressing model. In
@@ -4866,43 +5515,11 @@ typedef enum {
nir_address_format_logical,
} nir_address_format;
-static inline unsigned
-nir_address_format_bit_size(nir_address_format addr_format)
-{
- switch (addr_format) {
- case nir_address_format_32bit_global: return 32;
- case nir_address_format_64bit_global: return 64;
- case nir_address_format_64bit_global_32bit_offset: return 32;
- case nir_address_format_64bit_bounded_global: return 32;
- case nir_address_format_32bit_index_offset: return 32;
- case nir_address_format_32bit_index_offset_pack64: return 64;
- case nir_address_format_vec2_index_32bit_offset: return 32;
- case nir_address_format_62bit_generic: return 64;
- case nir_address_format_32bit_offset: return 32;
- case nir_address_format_32bit_offset_as_64bit: return 64;
- case nir_address_format_logical: return 32;
- }
- unreachable("Invalid address format");
-}
+unsigned
+nir_address_format_bit_size(nir_address_format addr_format);
-static inline unsigned
-nir_address_format_num_components(nir_address_format addr_format)
-{
- switch (addr_format) {
- case nir_address_format_32bit_global: return 1;
- case nir_address_format_64bit_global: return 1;
- case nir_address_format_64bit_global_32bit_offset: return 4;
- case nir_address_format_64bit_bounded_global: return 4;
- case nir_address_format_32bit_index_offset: return 2;
- case nir_address_format_32bit_index_offset_pack64: return 1;
- case nir_address_format_vec2_index_32bit_offset: return 3;
- case nir_address_format_62bit_generic: return 1;
- case nir_address_format_32bit_offset: return 1;
- case nir_address_format_32bit_offset_as_64bit: return 1;
- case nir_address_format_logical: return 1;
- }
- unreachable("Invalid address format");
-}
+unsigned
+nir_address_format_num_components(nir_address_format addr_format);
static inline const struct glsl_type *
nir_address_format_to_glsl_type(nir_address_format addr_format)
@@ -4915,16 +5532,26 @@ nir_address_format_to_glsl_type(nir_address_format addr_format)
const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
-nir_ssa_def *nir_build_addr_ieq(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
- nir_address_format addr_format);
+nir_def *nir_build_addr_iadd(struct nir_builder *b, nir_def *addr,
+ nir_address_format addr_format,
+ nir_variable_mode modes,
+ nir_def *offset);
-nir_ssa_def *nir_build_addr_isub(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
- nir_address_format addr_format);
+nir_def *nir_build_addr_iadd_imm(struct nir_builder *b, nir_def *addr,
+ nir_address_format addr_format,
+ nir_variable_mode modes,
+ int64_t offset);
+
+nir_def *nir_build_addr_ieq(struct nir_builder *b, nir_def *addr0, nir_def *addr1,
+ nir_address_format addr_format);
-nir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b,
- nir_deref_instr *deref,
- nir_ssa_def *base_addr,
- nir_address_format addr_format);
+nir_def *nir_build_addr_isub(struct nir_builder *b, nir_def *addr0, nir_def *addr1,
+ nir_address_format addr_format);
+
+nir_def *nir_explicit_io_address_from_deref(struct nir_builder *b,
+ nir_deref_instr *deref,
+ nir_def *base_addr,
+ nir_address_format addr_format);
bool nir_get_explicit_deref_align(nir_deref_instr *deref,
bool default_to_type_align,
@@ -4933,29 +5560,150 @@ bool nir_get_explicit_deref_align(nir_deref_instr *deref,
void nir_lower_explicit_io_instr(struct nir_builder *b,
nir_intrinsic_instr *io_instr,
- nir_ssa_def *addr,
+ nir_def *addr,
nir_address_format addr_format);
bool nir_lower_explicit_io(nir_shader *shader,
nir_variable_mode modes,
nir_address_format);
+typedef struct {
+ uint8_t num_components;
+ uint8_t bit_size;
+ uint16_t align;
+} nir_mem_access_size_align;
+
+/* clang-format off */
+typedef nir_mem_access_size_align
+ (*nir_lower_mem_access_bit_sizes_cb)(nir_intrinsic_op intrin,
+ uint8_t bytes,
+ uint8_t bit_size,
+ uint32_t align_mul,
+ uint32_t align_offset,
+ bool offset_is_const,
+ const void *cb_data);
+/* clang-format on */
+
+typedef struct {
+ nir_lower_mem_access_bit_sizes_cb callback;
+ nir_variable_mode modes;
+ bool may_lower_unaligned_stores_to_atomics;
+ void *cb_data;
+} nir_lower_mem_access_bit_sizes_options;
+
+bool nir_lower_mem_access_bit_sizes(nir_shader *shader,
+ const nir_lower_mem_access_bit_sizes_options *options);
+
+typedef struct {
+ /* Lower load_ubo to be robust. Out-of-bounds loads will return UNDEFINED
+ * values (not necessarily zero).
+ */
+ bool lower_ubo;
+
+ /* Lower load_ssbo/store_ssbo/ssbo_atomic(_swap) to be robust. Out-of-bounds
+ * loads and atomics will return UNDEFINED values (not necessarily zero).
+ * Out-of-bounds stores and atomics CORRUPT the contents of the SSBO.
+ *
+ * This suffices for robustBufferAccess but not robustBufferAccess2.
+ */
+ bool lower_ssbo;
+
+ /* Lower all image_load/image_store/image_atomic(_swap) instructions to be
+ * robust. Out-of-bounds loads will return ZERO.
+ *
+ * This suffices for robustImageAccess but not robustImageAccess2.
+ */
+ bool lower_image;
+
+ /* Lower all buffer image instructions as above. Implied by lower_image. */
+ bool lower_buffer_image;
+
+ /* Lower image_atomic(_swap) for all dimensions. Implied by lower_image. */
+ bool lower_image_atomic;
+
+ /* Vulkan's robustBufferAccess feature is only concerned with buffers that
+ * are bound through descriptor sets, so shared memory is not included, but
+ * it may be useful to enable this for debugging.
+ */
+ bool lower_shared;
+} nir_lower_robust_access_options;
+
+bool nir_lower_robust_access(nir_shader *s,
+ const nir_lower_robust_access_options *opts);
+
+/* clang-format off */
+typedef bool (*nir_should_vectorize_mem_func)(unsigned align_mul,
+ unsigned align_offset,
+ unsigned bit_size,
+ unsigned num_components,
+ nir_intrinsic_instr *low,
+ nir_intrinsic_instr *high,
+ void *data);
+/* clang-format on */
+
+typedef struct {
+ nir_should_vectorize_mem_func callback;
+ nir_variable_mode modes;
+ nir_variable_mode robust_modes;
+ void *cb_data;
+ bool has_shared2_amd;
+} nir_load_store_vectorize_options;
+
+bool nir_opt_load_store_vectorize(nir_shader *shader, const nir_load_store_vectorize_options *options);
+
+typedef bool (*nir_lower_shader_calls_should_remat_func)(nir_instr *instr, void *data);
+
+typedef struct nir_lower_shader_calls_options {
+ /* Address format used for load/store operations on the call stack. */
+ nir_address_format address_format;
+
+ /* Stack alignment */
+ unsigned stack_alignment;
+
+ /* Put loads from the stack as close as possible from where they're needed.
+ * You might want to disable combined_loads for best effects.
+ */
+ bool localized_loads;
+
+ /* If this function pointer is not NULL, lower_shader_calls will run
+ * nir_opt_load_store_vectorize for stack load/store operations. Otherwise
+ * the optimizaion is not run.
+ */
+ nir_should_vectorize_mem_func vectorizer_callback;
+
+ /* Data passed to vectorizer_callback */
+ void *vectorizer_data;
+
+ /* If this function pointer is not NULL, lower_shader_calls will call this
+ * function on instructions that require spill/fill/rematerialization of
+ * their value. If this function returns true, lower_shader_calls will
+ * ensure that the instruction is rematerialized, adding the sources of the
+ * instruction to be spilled/filled.
+ */
+ nir_lower_shader_calls_should_remat_func should_remat_callback;
+
+ /* Data passed to should_remat_callback */
+ void *should_remat_data;
+} nir_lower_shader_calls_options;
+
bool
nir_lower_shader_calls(nir_shader *shader,
- nir_address_format address_format,
- unsigned stack_alignment,
+ const nir_lower_shader_calls_options *options,
nir_shader ***resume_shaders_out,
uint32_t *num_resume_shaders_out,
void *mem_ctx);
+int nir_get_io_offset_src_number(const nir_intrinsic_instr *instr);
+int nir_get_io_arrayed_index_src_number(const nir_intrinsic_instr *instr);
+
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
-nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
+nir_src *nir_get_io_arrayed_index_src(nir_intrinsic_instr *instr);
nir_src *nir_get_shader_call_payload_src(nir_intrinsic_instr *call);
bool nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage);
-bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
-bool nir_lower_regs_to_ssa(nir_shader *shader);
+bool nir_lower_reg_intrinsics_to_ssa_impl(nir_function_impl *impl);
+bool nir_lower_reg_intrinsics_to_ssa(nir_shader *shader);
bool nir_lower_vars_to_ssa(nir_shader *shader);
bool nir_remove_dead_derefs(nir_shader *shader);
@@ -4974,11 +5722,14 @@ bool nir_lower_variable_initializers(nir_shader *shader,
bool nir_zero_initialize_shared_memory(nir_shader *shader,
const unsigned shared_size,
const unsigned chunk_size);
+bool nir_clear_shared_memory(nir_shader *shader,
+ const unsigned shared_size,
+ const unsigned chunk_size);
-bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
-bool nir_lower_vec_to_movs(nir_shader *shader, nir_instr_writemask_filter_cb cb,
+bool nir_move_vec_src_uses_to_dest(nir_shader *shader, bool skip_const_srcs);
+bool nir_lower_vec_to_regs(nir_shader *shader, nir_instr_writemask_filter_cb cb,
const void *_data);
-void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
+bool nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one,
const gl_state_index16 *alpha_ref_state_tokens);
bool nir_lower_alu(nir_shader *shader);
@@ -4986,11 +5737,17 @@ bool nir_lower_alu(nir_shader *shader);
bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
bool always_precise);
+bool nir_scale_fdiv(nir_shader *shader);
+
bool nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
+bool nir_lower_alu_width(nir_shader *shader, nir_vectorize_cb cb, const void *data);
+bool nir_lower_alu_vec8_16_srcs(nir_shader *shader);
bool nir_lower_bool_to_bitsize(nir_shader *shader);
-bool nir_lower_bool_to_float(nir_shader *shader);
+bool nir_lower_bool_to_float(nir_shader *shader, bool has_fcsel_ne);
bool nir_lower_bool_to_int32(nir_shader *shader);
bool nir_opt_simplify_convert_alu_types(nir_shader *shader);
+bool nir_lower_const_arrays_to_uniforms(nir_shader *shader,
+ unsigned max_uniform_components);
bool nir_lower_convert_alu_types(nir_shader *shader,
bool (*should_lower)(nir_intrinsic_instr *));
bool nir_lower_constant_convert_alu_types(nir_shader *shader);
@@ -5000,36 +5757,59 @@ bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all);
void nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer);
-void nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader,
+bool nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader,
bool outputs_only);
-void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
+bool nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_instr_filter_cb filter, void *filter_data);
bool nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
bool nir_vectorize_tess_levels(nir_shader *shader);
+nir_shader *nir_create_passthrough_tcs_impl(const nir_shader_compiler_options *options,
+ unsigned *locations, unsigned num_locations,
+ uint8_t patch_vertices);
+nir_shader *nir_create_passthrough_tcs(const nir_shader_compiler_options *options,
+ const nir_shader *vs, uint8_t patch_vertices);
+nir_shader *nir_create_passthrough_gs(const nir_shader_compiler_options *options,
+ const nir_shader *prev_stage,
+ enum mesa_prim primitive_type,
+ enum mesa_prim output_primitive_type,
+ bool emulate_edgeflags,
+ bool force_line_strip_out);
bool nir_lower_fragcolor(nir_shader *shader, unsigned max_cbufs);
bool nir_lower_fragcoord_wtrans(nir_shader *shader);
-void nir_lower_viewport_transform(nir_shader *shader);
+bool nir_lower_frag_coord_to_pixel_coord(nir_shader *shader);
+bool nir_lower_viewport_transform(nir_shader *shader);
bool nir_lower_uniforms_to_ubo(nir_shader *shader, bool dword_packed, bool load_vec4);
bool nir_lower_is_helper_invocation(nir_shader *shader);
+bool nir_lower_single_sampled(nir_shader *shader);
+
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
uint8_t ballot_bit_size;
uint8_t ballot_components;
- bool lower_to_scalar:1;
- bool lower_vote_trivial:1;
- bool lower_vote_eq:1;
- bool lower_subgroup_masks:1;
- bool lower_shuffle:1;
- bool lower_shuffle_to_32bit:1;
- bool lower_shuffle_to_swizzle_amd:1;
- bool lower_quad:1;
- bool lower_quad_broadcast_dynamic:1;
- bool lower_quad_broadcast_dynamic_to_const:1;
- bool lower_elect:1;
- bool lower_read_invocation_to_cond:1;
+ bool lower_to_scalar : 1;
+ bool lower_vote_trivial : 1;
+ bool lower_vote_eq : 1;
+ bool lower_vote_bool_eq : 1;
+ bool lower_first_invocation_to_ballot : 1;
+ bool lower_read_first_invocation : 1;
+ bool lower_subgroup_masks : 1;
+ bool lower_relative_shuffle : 1;
+ bool lower_shuffle_to_32bit : 1;
+ bool lower_shuffle_to_swizzle_amd : 1;
+ bool lower_shuffle : 1;
+ bool lower_quad : 1;
+ bool lower_quad_broadcast_dynamic : 1;
+ bool lower_quad_broadcast_dynamic_to_const : 1;
+ bool lower_elect : 1;
+ bool lower_read_invocation_to_cond : 1;
+ bool lower_rotate_to_shuffle : 1;
+ bool lower_ballot_bit_count_to_mbcnt_amd : 1;
+ bool lower_inverse_ballot : 1;
+ bool lower_boolean_reduce : 1;
+ bool lower_boolean_shuffle : 1;
} nir_lower_subgroups_options;
bool nir_lower_subgroups(nir_shader *shader,
@@ -5037,17 +5817,38 @@ bool nir_lower_subgroups(nir_shader *shader,
bool nir_lower_system_values(nir_shader *shader);
+nir_def *
+nir_build_lowered_load_helper_invocation(struct nir_builder *b);
+
typedef struct nir_lower_compute_system_values_options {
- bool has_base_global_invocation_id:1;
- bool has_base_workgroup_id:1;
- bool shuffle_local_ids_for_quad_derivatives:1;
- bool lower_local_invocation_index:1;
+ bool has_base_global_invocation_id : 1;
+ bool has_base_workgroup_id : 1;
+ bool shuffle_local_ids_for_quad_derivatives : 1;
+ bool lower_local_invocation_index : 1;
+ bool lower_cs_local_id_to_index : 1;
+ bool lower_workgroup_id_to_index : 1;
+ /* At shader execution time, check if WorkGroupId should be 1D
+ * and compute it quickly. Fall back to slow computation if not.
+ */
+ bool shortcut_1d_workgroup_id : 1;
+ uint32_t num_workgroups[3]; /* Compile-time-known dispatch sizes, or 0 if unknown. */
} nir_lower_compute_system_values_options;
bool nir_lower_compute_system_values(nir_shader *shader,
const nir_lower_compute_system_values_options *options);
-enum PACKED nir_lower_tex_packing {
+struct nir_lower_sysvals_to_varyings_options {
+ bool frag_coord : 1;
+ bool front_face : 1;
+ bool point_coord : 1;
+};
+
+bool
+nir_lower_sysvals_to_varyings(nir_shader *shader,
+ const struct nir_lower_sysvals_to_varyings_options *options);
+
+/***/
+enum ENUM_PACKED nir_lower_tex_packing {
/** No packing */
nir_lower_tex_packing_none = 0,
/**
@@ -5059,6 +5860,7 @@ enum PACKED nir_lower_tex_packing {
nir_lower_tex_packing_8,
};
+/***/
typedef struct nir_lower_tex_options {
/**
* bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
@@ -5067,6 +5869,11 @@ typedef struct nir_lower_tex_options {
unsigned lower_txp;
/**
+ * If true, lower texture projector for any array sampler dims
+ */
+ bool lower_txp_array;
+
+ /**
* If true, lower away nir_tex_src_offset for all texelfetch instructions.
*/
bool lower_txf_offset;
@@ -5077,6 +5884,12 @@ typedef struct nir_lower_tex_options {
bool lower_rect_offset;
/**
+ * If not NULL, this filter will return true for tex instructions that
+ * should lower away nir_tex_src_offset.
+ */
+ nir_instr_filter_cb lower_offset_filter;
+
+ /**
* If true, lower rect textures to 2D, using txs to fetch the
* texture dimensions and dividing the texture coords by the
* texture dims to normalize.
@@ -5084,19 +5897,33 @@ typedef struct nir_lower_tex_options {
bool lower_rect;
/**
+ * If true, lower 1D textures to 2D. This requires the GL/VK driver to map 1D
+ * textures to 2D textures with height=1.
+ *
+ * lower_1d_shadow does this lowering for shadow textures only.
+ */
+ bool lower_1d;
+ bool lower_1d_shadow;
+
+ /**
* If true, convert yuv to rgb.
*/
unsigned lower_y_uv_external;
+ unsigned lower_y_vu_external;
unsigned lower_y_u_v_external;
unsigned lower_yx_xuxv_external;
+ unsigned lower_yx_xvxu_external;
unsigned lower_xy_uxvx_external;
+ unsigned lower_xy_vxux_external;
unsigned lower_ayuv_external;
unsigned lower_xyuv_external;
unsigned lower_yuv_external;
unsigned lower_yu_yv_external;
+ unsigned lower_yv_yu_external;
unsigned lower_y41x_external;
unsigned bt709_external;
unsigned bt2020_external;
+ unsigned yuv_full_range_external;
/**
* To emulate certain texture wrap modes, this can be used
@@ -5156,6 +5983,11 @@ typedef struct nir_lower_tex_options {
bool lower_txd_3d;
/**
+ * If true, lower nir_texop_txd any array surfaces with nir_texop_txl.
+ */
+ bool lower_txd_array;
+
+ /**
* If true, lower nir_texop_txd on shadow samplers (except cube maps)
* with nir_texop_txl. Notice that cube map shadow samplers are lowered
* with lower_txd_cube_map.
@@ -5169,6 +6001,11 @@ typedef struct nir_lower_tex_options {
bool lower_txd;
/**
+ * If true, lower nir_texop_txd when it uses min_lod.
+ */
+ bool lower_txd_clamp;
+
+ /**
* If true, lower nir_texop_txb that try to use shadow compare and min_lod
* at the same time to a nir_texop_lod, some math, and nir_texop_tex.
*/
@@ -5222,34 +6059,91 @@ typedef struct nir_lower_tex_options {
bool lower_tg4_offsets;
/**
- * To lower packed sampler return formats.
- *
- * Indexed by sampler-id.
+ * Lower txf_ms to fragment_mask_fetch and fragment_fetch and samples_identical to
+ * fragment_mask_fetch.
+ */
+ bool lower_to_fragment_fetch_amd;
+
+ /**
+ * To lower packed sampler return formats. This will be called for all
+ * tex instructions.
+ */
+ enum nir_lower_tex_packing (*lower_tex_packing_cb)(const nir_tex_instr *tex, const void *data);
+ const void *lower_tex_packing_data;
+
+ /**
+ * If true, lower nir_texop_lod to return -FLT_MAX if the sum of the
+ * absolute values of derivatives is 0 for all coordinates.
*/
- enum nir_lower_tex_packing lower_tex_packing[32];
+ bool lower_lod_zero_width;
+
+ /* Turns nir_op_tex and other ops with an implicit derivative, in stages
+ * without implicit derivatives (like the vertex shader) to have an explicit
+ * LOD with a value of 0.
+ */
+ bool lower_invalid_implicit_lod;
+
+ /* If true, texture_index (sampler_index) will be zero if a texture_offset
+ * (sampler_offset) source is present. This is convenient for backends that
+ * support indirect indexing of textures (samplers) but not offsetting it.
+ */
+ bool lower_index_to_offset;
+
+ /**
+ * Payload data to be sent to callback / filter functions.
+ */
+ void *callback_data;
} nir_lower_tex_options;
/** Lowers complex texture instructions to simpler ones */
bool nir_lower_tex(nir_shader *shader,
const nir_lower_tex_options *options);
+typedef struct nir_lower_tex_shadow_swizzle {
+ unsigned swizzle_r : 3;
+ unsigned swizzle_g : 3;
+ unsigned swizzle_b : 3;
+ unsigned swizzle_a : 3;
+} nir_lower_tex_shadow_swizzle;
+
+bool
+nir_lower_tex_shadow(nir_shader *s,
+ unsigned n_states,
+ enum compare_func *compare_func,
+ nir_lower_tex_shadow_swizzle *tex_swizzles);
+
typedef struct nir_lower_image_options {
/**
* If true, lower cube size operations.
*/
bool lower_cube_size;
+
+ /**
+ * Lower multi sample image load and samples_identical to use fragment_mask_load.
+ */
+ bool lower_to_fragment_mask_load_amd;
+
+ /**
+ * Lower image_samples to a constant in case the driver doesn't support multisampled
+ * images.
+ */
+ bool lower_image_samples_to_one;
} nir_lower_image_options;
bool nir_lower_image(nir_shader *nir,
const nir_lower_image_options *options);
+bool
+nir_lower_image_atomics_to_global(nir_shader *s);
+
bool nir_lower_readonly_images_to_tex(nir_shader *shader, bool per_variable);
enum nir_lower_non_uniform_access_type {
- nir_lower_non_uniform_ubo_access = (1 << 0),
- nir_lower_non_uniform_ssbo_access = (1 << 1),
+ nir_lower_non_uniform_ubo_access = (1 << 0),
+ nir_lower_non_uniform_ssbo_access = (1 << 1),
nir_lower_non_uniform_texture_access = (1 << 2),
- nir_lower_non_uniform_image_access = (1 << 3),
+ nir_lower_non_uniform_image_access = (1 << 3),
+ nir_lower_non_uniform_get_ssbo_size = (1 << 4),
};
/* Given the nir_src used for the resource, return the channels which might be non-uniform. */
@@ -5261,22 +6155,12 @@ typedef struct nir_lower_non_uniform_access_options {
void *callback_data;
} nir_lower_non_uniform_access_options;
+bool nir_has_non_uniform_access(nir_shader *shader, enum nir_lower_non_uniform_access_type types);
+bool nir_opt_non_uniform_access(nir_shader *shader);
bool nir_lower_non_uniform_access(nir_shader *shader,
const nir_lower_non_uniform_access_options *options);
typedef struct {
- /* If true, a 32-bit division lowering based on NV50LegalizeSSA::handleDIV()
- * is used. It is the faster of the two but it is not exact in some cases
- * (for example, 1091317713u / 1034u gives 5209173 instead of 1055432).
- *
- * If false, a lowering based on AMDGPUTargetLowering::LowerUDIVREM() and
- * AMDGPUTargetLowering::LowerSDIVREM() is used. It requires more
- * instructions than the nv50 path and many of them are integer
- * multiplications, so it is probably slower. It should always return the
- * correct result, though.
- */
- bool imprecise_32bit_lowering;
-
/* Whether 16-bit floating point arithmetic should be allowed in 8-bit
* division lowering
*/
@@ -5289,6 +6173,7 @@ typedef struct nir_input_attachment_options {
bool use_fragcoord_sysval;
bool use_layer_id_sysval;
bool use_view_id_for_layer;
+ uint32_t unscaled_input_attachment_ir3;
} nir_input_attachment_options;
bool nir_lower_input_attachments(nir_shader *shader,
@@ -5303,35 +6188,37 @@ bool nir_lower_clip_gs(nir_shader *shader, unsigned ucp_enables,
const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables,
bool use_clipdist_array);
+
+bool nir_lower_clip_cull_distance_to_vec4s(nir_shader *shader);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
bool nir_lower_clip_disable(nir_shader *shader, unsigned clip_plane_enable);
-void nir_lower_point_size_mov(nir_shader *shader,
+bool nir_lower_point_size_mov(nir_shader *shader,
const gl_state_index16 *pointsize_state_tokens);
bool nir_lower_frexp(nir_shader *nir);
-void nir_lower_two_sided_color(nir_shader *shader, bool face_sysval);
+bool nir_lower_two_sided_color(nir_shader *shader, bool face_sysval);
bool nir_lower_clamp_color_outputs(nir_shader *shader);
bool nir_lower_flatshade(nir_shader *shader);
-void nir_lower_passthrough_edgeflags(nir_shader *shader);
+bool nir_lower_passthrough_edgeflags(nir_shader *shader);
bool nir_lower_patch_vertices(nir_shader *nir, unsigned static_count,
const gl_state_index16 *uniform_state_tokens);
typedef struct nir_lower_wpos_ytransform_options {
gl_state_index16 state_tokens[STATE_LENGTH];
- bool fs_coord_origin_upper_left :1;
- bool fs_coord_origin_lower_left :1;
- bool fs_coord_pixel_center_integer :1;
- bool fs_coord_pixel_center_half_integer :1;
+ bool fs_coord_origin_upper_left : 1;
+ bool fs_coord_origin_lower_left : 1;
+ bool fs_coord_pixel_center_integer : 1;
+ bool fs_coord_pixel_center_half_integer : 1;
} nir_lower_wpos_ytransform_options;
bool nir_lower_wpos_ytransform(nir_shader *shader,
const nir_lower_wpos_ytransform_options *options);
-bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
+bool nir_lower_wpos_center(nir_shader *shader);
bool nir_lower_pntc_ytransform(nir_shader *shader,
const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
@@ -5346,11 +6233,11 @@ typedef struct nir_lower_drawpixels_options {
gl_state_index16 bias_state_tokens[STATE_LENGTH];
unsigned drawpix_sampler;
unsigned pixelmap_sampler;
- bool pixel_maps :1;
- bool scale_and_bias :1;
+ bool pixel_maps : 1;
+ bool scale_and_bias : 1;
} nir_lower_drawpixels_options;
-void nir_lower_drawpixels(nir_shader *shader,
+bool nir_lower_drawpixels(nir_shader *shader,
const nir_lower_drawpixels_options *options);
typedef struct nir_lower_bitmap_options {
@@ -5358,30 +6245,31 @@ typedef struct nir_lower_bitmap_options {
bool swizzle_xxxx;
} nir_lower_bitmap_options;
-void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
-
-bool nir_lower_atomics_to_ssbo(nir_shader *shader);
+bool nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
-typedef enum {
- nir_lower_int_source_mods = 1 << 0,
- nir_lower_float_source_mods = 1 << 1,
- nir_lower_64bit_source_mods = 1 << 2,
- nir_lower_triop_abs = 1 << 3,
- nir_lower_all_source_mods = (1 << 4) - 1
-} nir_lower_to_source_mods_flags;
-
-
-bool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options);
+bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned offset_align_state);
typedef enum {
nir_lower_gs_intrinsics_per_stream = 1 << 0,
nir_lower_gs_intrinsics_count_primitives = 1 << 1,
nir_lower_gs_intrinsics_count_vertices_per_primitive = 1 << 2,
nir_lower_gs_intrinsics_overwrite_incomplete = 1 << 3,
+ nir_lower_gs_intrinsics_always_end_primitive = 1 << 4,
+ nir_lower_gs_intrinsics_count_decomposed_primitives = 1 << 5,
} nir_lower_gs_intrinsics_flags;
bool nir_lower_gs_intrinsics(nir_shader *shader, nir_lower_gs_intrinsics_flags options);
+bool nir_lower_tess_coord_z(nir_shader *shader, bool triangles);
+
+typedef struct {
+ bool payload_to_shared_for_atomics : 1;
+ bool payload_to_shared_for_small_types : 1;
+ uint32_t payload_offset_in_bytes;
+} nir_lower_task_shader_options;
+
+bool nir_lower_task_shader(nir_shader *shader, nir_lower_task_shader_options options);
+
typedef unsigned (*nir_lower_bit_size_callback)(const nir_instr *, void *);
bool nir_lower_bit_size(nir_shader *shader,
@@ -5389,22 +6277,42 @@ bool nir_lower_bit_size(nir_shader *shader,
void *callback_data);
bool nir_lower_64bit_phis(nir_shader *shader);
+bool nir_split_64bit_vec3_and_vec4(nir_shader *shader);
+
nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
bool nir_lower_int64(nir_shader *shader);
+bool nir_lower_int64_float_conversions(nir_shader *shader);
nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
bool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64,
nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);
-bool nir_recompute_io_bases(nir_function_impl *impl, nir_variable_mode modes);
+bool nir_recompute_io_bases(nir_shader *nir, nir_variable_mode modes);
+bool nir_lower_mediump_vars(nir_shader *nir, nir_variable_mode modes);
bool nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes,
uint64_t varying_mask, bool use_16bit_slots);
bool nir_force_mediump_io(nir_shader *nir, nir_variable_mode modes,
nir_alu_type types);
bool nir_unpack_16bit_varying_slots(nir_shader *nir, nir_variable_mode modes);
-bool nir_fold_16bit_sampler_conversions(nir_shader *nir,
- unsigned tex_src_types);
+
+struct nir_opt_tex_srcs_options {
+ unsigned sampler_dims;
+ unsigned src_types;
+};
+
+struct nir_opt_16bit_tex_image_options {
+ nir_rounding_mode rounding_mode;
+ nir_alu_type opt_tex_dest_types;
+ nir_alu_type opt_image_dest_types;
+ bool opt_image_store_data;
+ bool opt_image_srcs;
+ unsigned opt_srcs_options_count;
+ struct nir_opt_tex_srcs_options *opt_srcs_options;
+};
+
+bool nir_opt_16bit_tex_image(nir_shader *nir,
+ struct nir_opt_16bit_tex_image_options *options);
typedef struct {
bool legalize_type; /* whether this src should be legalized */
@@ -5420,39 +6328,64 @@ bool nir_lower_point_size(nir_shader *shader, float min, float max);
void nir_lower_texcoord_replace(nir_shader *s, unsigned coord_replace,
bool point_coord_is_sysval, bool yinvert);
+void nir_lower_texcoord_replace_late(nir_shader *s, unsigned coord_replace,
+ bool point_coord_is_sysval);
+
typedef enum {
nir_lower_interpolation_at_sample = (1 << 1),
nir_lower_interpolation_at_offset = (1 << 2),
- nir_lower_interpolation_centroid = (1 << 3),
- nir_lower_interpolation_pixel = (1 << 4),
- nir_lower_interpolation_sample = (1 << 5),
+ nir_lower_interpolation_centroid = (1 << 3),
+ nir_lower_interpolation_pixel = (1 << 4),
+ nir_lower_interpolation_sample = (1 << 5),
} nir_lower_interpolation_options;
bool nir_lower_interpolation(nir_shader *shader,
nir_lower_interpolation_options options);
+typedef enum {
+ nir_lower_discard_if_to_cf = (1 << 0),
+ nir_lower_demote_if_to_cf = (1 << 1),
+ nir_lower_terminate_if_to_cf = (1 << 2),
+} nir_lower_discard_if_options;
+
+bool nir_lower_discard_if(nir_shader *shader, nir_lower_discard_if_options options);
+
bool nir_lower_discard_or_demote(nir_shader *shader,
bool force_correct_quad_ops_after_discard);
+bool nir_lower_terminate_to_demote(nir_shader *nir);
+
bool nir_lower_memory_model(nir_shader *shader);
bool nir_lower_goto_ifs(nir_shader *shader);
+bool nir_lower_continue_constructs(nir_shader *shader);
bool nir_shader_uses_view_index(nir_shader *shader);
bool nir_can_lower_multiview(nir_shader *shader);
bool nir_lower_multiview(nir_shader *shader, uint32_t view_mask);
-bool nir_lower_fp16_casts(nir_shader *shader);
+typedef enum {
+ nir_lower_fp16_rtz = (1 << 0),
+ nir_lower_fp16_rtne = (1 << 1),
+ nir_lower_fp16_ru = (1 << 2),
+ nir_lower_fp16_rd = (1 << 3),
+ nir_lower_fp16_all = 0xf,
+ nir_lower_fp16_split_fp64 = (1 << 4),
+} nir_lower_fp16_cast_options;
+bool nir_lower_fp16_casts(nir_shader *shader, nir_lower_fp16_cast_options options);
bool nir_normalize_cubemap_coords(nir_shader *shader);
-void nir_live_ssa_defs_impl(nir_function_impl *impl);
+bool nir_shader_supports_implicit_lod(nir_shader *shader);
+
+void nir_live_defs_impl(nir_function_impl *impl);
-const BITSET_WORD *nir_get_live_ssa_defs(nir_cursor cursor, void *mem_ctx);
+const BITSET_WORD *nir_get_live_defs(nir_cursor cursor, void *mem_ctx);
void nir_loop_analyze_impl(nir_function_impl *impl,
- nir_variable_mode indirect_mask);
+ nir_variable_mode indirect_mask,
+ bool force_unroll_sampler_indirect);
-bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
+bool nir_defs_interfere(nir_def *a, nir_def *b);
bool nir_repair_ssa_impl(nir_function_impl *impl);
bool nir_repair_ssa(nir_shader *shader);
@@ -5460,23 +6393,34 @@ bool nir_repair_ssa(nir_shader *shader);
void nir_convert_loop_to_lcssa(nir_loop *loop);
bool nir_convert_to_lcssa(nir_shader *shader, bool skip_invariants, bool skip_bool_invariants);
void nir_divergence_analysis(nir_shader *shader);
+void nir_vertex_divergence_analysis(nir_shader *shader);
bool nir_update_instr_divergence(nir_shader *shader, nir_instr *instr);
+bool nir_has_divergent_loop(nir_shader *shader);
+
+void
+nir_rewrite_uses_to_load_reg(struct nir_builder *b, nir_def *old,
+ nir_def *reg);
/* If phi_webs_only is true, only convert SSA values involved in phi nodes to
* registers. If false, convert all values (even those not involved in a phi
* node) to registers.
*/
-bool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
+bool nir_convert_from_ssa(nir_shader *shader,
+ bool phi_webs_only);
bool nir_lower_phis_to_regs_block(nir_block *block);
bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
+
+bool nir_rematerialize_deref_in_use_blocks(nir_deref_instr *instr);
bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
bool nir_lower_samplers(nir_shader *shader);
+bool nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_sampler_derefs);
+bool nir_dedup_inline_samplers(nir_shader *shader);
bool nir_lower_ssbo(nir_shader *shader);
+bool nir_lower_helper_writes(nir_shader *shader, bool lower_plain_stores);
typedef struct nir_lower_printf_options {
- bool treat_doubles_as_floats : 1;
unsigned max_buffer_size;
} nir_lower_printf_options;
@@ -5489,7 +6433,6 @@ bool nir_opt_comparison_pre(nir_shader *shader);
typedef struct nir_opt_access_options {
bool is_vulkan;
- bool infer_non_readable;
} nir_opt_access_options;
bool nir_opt_access(nir_shader *shader, const nir_opt_access_options *options);
@@ -5503,15 +6446,17 @@ bool nir_opt_constant_folding(nir_shader *shader);
* which will result in b being removed by the pass. Return false if
* combination wasn't possible.
*/
-typedef bool (*nir_combine_memory_barrier_cb)(
+typedef bool (*nir_combine_barrier_cb)(
nir_intrinsic_instr *a, nir_intrinsic_instr *b, void *data);
-bool nir_opt_combine_memory_barriers(nir_shader *shader,
- nir_combine_memory_barrier_cb combine_cb,
- void *data);
+bool nir_opt_combine_barriers(nir_shader *shader,
+ nir_combine_barrier_cb combine_cb,
+ void *data);
+bool nir_opt_barrier_modes(nir_shader *shader);
bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
+bool nir_copy_prop_impl(nir_function_impl *impl);
bool nir_copy_prop(nir_shader *shader);
bool nir_opt_copy_prop_vars(nir_shader *shader);
@@ -5535,7 +6480,14 @@ bool nir_opt_gcm(nir_shader *shader, bool value_number);
bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size);
-bool nir_opt_if(nir_shader *shader, bool aggressive_last_continue);
+bool nir_opt_mqsad(nir_shader *shader);
+
+typedef enum {
+ nir_opt_if_optimize_phi_true_false = (1 << 0),
+ nir_opt_if_avoid_64bit_phis = (1 << 1),
+} nir_opt_if_options;
+
+bool nir_opt_if(nir_shader *shader, nir_opt_if_options options);
bool nir_opt_intrinsics(nir_shader *shader);
@@ -5543,15 +6495,19 @@ bool nir_opt_large_constants(nir_shader *shader,
glsl_type_size_align_func size_align,
unsigned threshold);
+bool nir_opt_loop(nir_shader *shader);
+
bool nir_opt_loop_unroll(nir_shader *shader);
typedef enum {
- nir_move_const_undef = (1 << 0),
- nir_move_load_ubo = (1 << 1),
- nir_move_load_input = (1 << 2),
- nir_move_comparisons = (1 << 3),
- nir_move_copies = (1 << 4),
- nir_move_load_ssbo = (1 << 5),
+ nir_move_const_undef = (1 << 0),
+ nir_move_load_ubo = (1 << 1),
+ nir_move_load_input = (1 << 2),
+ nir_move_comparisons = (1 << 3),
+ nir_move_copies = (1 << 4),
+ nir_move_load_ssbo = (1 << 5),
+ nir_move_load_uniform = (1 << 6),
+ nir_move_alu = (1 << 7),
} nir_move_options;
bool nir_can_move_instr(nir_instr *instr, nir_move_options options);
@@ -5560,11 +6516,27 @@ bool nir_opt_sink(nir_shader *shader, nir_move_options options);
bool nir_opt_move(nir_shader *shader, nir_move_options options);
-bool nir_opt_offsets(nir_shader *shader);
+typedef struct {
+ /** nir_load_uniform max base offset */
+ uint32_t uniform_max;
+
+ /** nir_load_ubo_vec4 max base offset */
+ uint32_t ubo_vec4_max;
+
+ /** nir_var_mem_shared max base offset */
+ uint32_t shared_max;
+
+ /** nir_load/store_buffer_amd max base offset */
+ uint32_t buffer_max;
+} nir_opt_offsets_options;
+
+bool nir_opt_offsets(nir_shader *shader, const nir_opt_offsets_options *options);
bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
bool indirect_load_ok, bool expensive_alu_ok);
+bool nir_opt_reassociate_bfi(nir_shader *shader);
+
bool nir_opt_rematerialize_compares(nir_shader *shader);
bool nir_opt_remove_phis(nir_shader *shader);
@@ -5572,9 +6544,9 @@ bool nir_opt_remove_phis_block(nir_block *block);
bool nir_opt_phi_precision(nir_shader *shader);
-bool nir_opt_shrink_vectors(nir_shader *shader, bool shrink_image_store);
+bool nir_opt_shrink_stores(nir_shader *shader, bool shrink_image_store);
-bool nir_opt_trivial_continues(nir_shader *shader);
+bool nir_opt_shrink_vectors(nir_shader *shader, bool shrink_start);
bool nir_opt_undef(nir_shader *shader);
@@ -5582,29 +6554,20 @@ bool nir_lower_undef_to_zero(nir_shader *shader);
bool nir_opt_uniform_atomics(nir_shader *shader);
-typedef bool (*nir_opt_vectorize_cb)(const nir_instr *instr, void *data);
+bool nir_opt_uniform_subgroup(nir_shader *shader,
+ const nir_lower_subgroups_options *);
-bool nir_opt_vectorize(nir_shader *shader, nir_opt_vectorize_cb filter,
+bool nir_opt_vectorize(nir_shader *shader, nir_vectorize_cb filter,
void *data);
bool nir_opt_conditional_discard(nir_shader *shader);
bool nir_opt_move_discards_to_top(nir_shader *shader);
-typedef bool (*nir_should_vectorize_mem_func)(unsigned align_mul,
- unsigned align_offset,
- unsigned bit_size,
- unsigned num_components,
- nir_intrinsic_instr *low, nir_intrinsic_instr *high,
- void *data);
+bool nir_opt_ray_queries(nir_shader *shader);
-typedef struct {
- nir_should_vectorize_mem_func callback;
- nir_variable_mode modes;
- nir_variable_mode robust_modes;
- void *cb_data;
-} nir_load_store_vectorize_options;
+bool nir_opt_ray_query_ranges(nir_shader *shader);
-bool nir_opt_load_store_vectorize(nir_shader *shader, const nir_load_store_vectorize_options *options);
+bool nir_opt_reuse_constants(nir_shader *shader);
void nir_sweep(nir_shader *shader);
@@ -5635,6 +6598,29 @@ nir_variable_is_in_block(const nir_variable *var)
return nir_variable_is_in_ubo(var) || nir_variable_is_in_ssbo(var);
}
+static inline unsigned
+nir_variable_count_slots(const nir_variable *var, const struct glsl_type *type)
+{
+ return var->data.compact ? DIV_ROUND_UP(var->data.location_frac + glsl_get_length(type), 4) : glsl_count_attribute_slots(type, false);
+}
+
+static inline unsigned
+nir_deref_count_slots(nir_deref_instr *deref, nir_variable *var)
+{
+ if (var->data.compact) {
+ switch (deref->deref_type) {
+ case nir_deref_type_array:
+ return 1;
+ case nir_deref_type_var:
+ return nir_variable_count_slots(var, deref->type);
+ default:
+ unreachable("illegal deref type");
+ }
+ }
+ return glsl_count_attribute_slots(deref->type, false);
+}
+
+/* See default_ub_config in nir_range_analysis.c for documentation. */
typedef struct nir_unsigned_upper_bound_config {
unsigned min_subgroup_size;
unsigned max_subgroup_size;
@@ -5647,14 +6633,227 @@ typedef struct nir_unsigned_upper_bound_config {
uint32_t
nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
- nir_ssa_scalar scalar,
+ nir_scalar scalar,
const nir_unsigned_upper_bound_config *config);
bool
nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
- nir_ssa_scalar ssa, unsigned const_val,
+ nir_scalar ssa, unsigned const_val,
const nir_unsigned_upper_bound_config *config);
+typedef struct {
+ /* True if gl_DrawID is considered uniform, i.e. if the preamble is run
+ * at least once per "internal" draw rather than per user-visible draw.
+ */
+ bool drawid_uniform;
+
+ /* True if the subgroup size is uniform. */
+ bool subgroup_size_uniform;
+
+ /* True if load_workgroup_size is supported in the preamble. */
+ bool load_workgroup_size_allowed;
+
+ /* size/align for load/store_preamble. */
+ void (*def_size)(nir_def *def, unsigned *size, unsigned *align);
+
+ /* Total available size for load/store_preamble storage, in units
+ * determined by def_size.
+ */
+ unsigned preamble_storage_size;
+
+ /* Give the cost for an instruction. nir_opt_preamble will prioritize
+ * instructions with higher costs. Instructions with cost 0 may still be
+ * lifted, but only when required to lift other instructions with non-0
+ * cost (e.g. a load_const source of an expression).
+ */
+ float (*instr_cost_cb)(nir_instr *instr, const void *data);
+
+ /* Give the cost of rewriting the instruction to use load_preamble. This
+ * may happen from inserting move instructions, etc. If the benefit doesn't
+ * exceed the cost here then we won't rewrite it.
+ */
+ float (*rewrite_cost_cb)(nir_def *def, const void *data);
+
+ /* Instructions whose definitions should not be rewritten. These could
+ * still be moved to the preamble, but they shouldn't be the root of a
+ * replacement expression. Instructions with cost 0 and derefs are
+ * automatically included by the pass.
+ */
+ nir_instr_filter_cb avoid_instr_cb;
+
+ const void *cb_data;
+} nir_opt_preamble_options;
+
+bool
+nir_opt_preamble(nir_shader *shader,
+ const nir_opt_preamble_options *options,
+ unsigned *size);
+
+nir_function_impl *nir_shader_get_preamble(nir_shader *shader);
+
+bool nir_lower_point_smooth(nir_shader *shader);
+bool nir_lower_poly_line_smooth(nir_shader *shader, unsigned num_smooth_aa_sample);
+
+bool nir_mod_analysis(nir_scalar val, nir_alu_type val_type, unsigned div, unsigned *mod);
+
+bool
+nir_remove_tex_shadow(nir_shader *shader, unsigned textures_bitmask);
+
+void
+nir_trivialize_registers(nir_shader *s);
+
+unsigned
+nir_static_workgroup_size(const nir_shader *s);
+
+static inline nir_intrinsic_instr *
+nir_reg_get_decl(nir_def *reg)
+{
+ assert(reg->parent_instr->type == nir_instr_type_intrinsic);
+ nir_intrinsic_instr *decl = nir_instr_as_intrinsic(reg->parent_instr);
+ assert(decl->intrinsic == nir_intrinsic_decl_reg);
+
+ return decl;
+}
+
+static inline nir_intrinsic_instr *
+nir_next_decl_reg(nir_intrinsic_instr *prev, nir_function_impl *impl)
+{
+ nir_instr *start;
+ if (prev != NULL)
+ start = nir_instr_next(&prev->instr);
+ else if (impl != NULL)
+ start = nir_block_first_instr(nir_start_block(impl));
+ else
+ return NULL;
+
+ for (nir_instr *instr = start; instr; instr = nir_instr_next(instr)) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ if (intrin->intrinsic == nir_intrinsic_decl_reg)
+ return intrin;
+ }
+
+ return NULL;
+}
+
+#define nir_foreach_reg_decl(reg, impl) \
+ for (nir_intrinsic_instr *reg = nir_next_decl_reg(NULL, impl); \
+ reg; reg = nir_next_decl_reg(reg, NULL))
+
+#define nir_foreach_reg_decl_safe(reg, impl) \
+ for (nir_intrinsic_instr *reg = nir_next_decl_reg(NULL, impl), \
+ *next_ = nir_next_decl_reg(reg, NULL); \
+ reg; reg = next_, next_ = nir_next_decl_reg(next_, NULL))
+
+static inline nir_cursor
+nir_after_reg_decls(nir_function_impl *impl)
+{
+ nir_intrinsic_instr *last_reg_decl = NULL;
+ nir_foreach_reg_decl(reg_decl, impl)
+ last_reg_decl = reg_decl;
+
+ if (last_reg_decl != NULL)
+ return nir_after_instr(&last_reg_decl->instr);
+ return nir_before_impl(impl);
+}
+
+static inline bool
+nir_is_load_reg(nir_intrinsic_instr *intr)
+{
+ return intr->intrinsic == nir_intrinsic_load_reg ||
+ intr->intrinsic == nir_intrinsic_load_reg_indirect;
+}
+
+static inline bool
+nir_is_store_reg(nir_intrinsic_instr *intr)
+{
+ return intr->intrinsic == nir_intrinsic_store_reg ||
+ intr->intrinsic == nir_intrinsic_store_reg_indirect;
+}
+
+#define nir_foreach_reg_load(load, reg) \
+ assert(reg->intrinsic == nir_intrinsic_decl_reg); \
+ \
+ nir_foreach_use(load, &reg->def) \
+ if (nir_is_load_reg(nir_instr_as_intrinsic(nir_src_parent_instr(load))))
+
+#define nir_foreach_reg_load_safe(load, reg) \
+ assert(reg->intrinsic == nir_intrinsic_decl_reg); \
+ \
+ nir_foreach_use_safe(load, &reg->def) \
+ if (nir_is_load_reg(nir_instr_as_intrinsic(nir_src_parent_instr(load))))
+
+#define nir_foreach_reg_store(store, reg) \
+ assert(reg->intrinsic == nir_intrinsic_decl_reg); \
+ \
+ nir_foreach_use(store, &reg->def) \
+ if (nir_is_store_reg(nir_instr_as_intrinsic(nir_src_parent_instr(store))))
+
+#define nir_foreach_reg_store_safe(store, reg) \
+ assert(reg->intrinsic == nir_intrinsic_decl_reg); \
+ \
+ nir_foreach_use_safe(store, &reg->def) \
+ if (nir_is_store_reg(nir_instr_as_intrinsic(nir_src_parent_instr(store))))
+
+static inline nir_intrinsic_instr *
+nir_load_reg_for_def(const nir_def *def)
+{
+ if (def->parent_instr->type != nir_instr_type_intrinsic)
+ return NULL;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(def->parent_instr);
+ if (!nir_is_load_reg(intr))
+ return NULL;
+
+ return intr;
+}
+
+static inline nir_intrinsic_instr *
+nir_store_reg_for_def(const nir_def *def)
+{
+ /* Look for the trivial store: single use of our destination by a
+ * store_register intrinsic.
+ */
+ if (!list_is_singular(&def->uses))
+ return NULL;
+
+ nir_src *src = list_first_entry(&def->uses, nir_src, use_link);
+ if (nir_src_is_if(src))
+ return NULL;
+
+ nir_instr *parent = nir_src_parent_instr(src);
+ if (parent->type != nir_instr_type_intrinsic)
+ return NULL;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(parent);
+ if (!nir_is_store_reg(intr))
+ return NULL;
+
+ /* The first value is data. Third is indirect index, ignore that one. */
+ if (&intr->src[0] != src)
+ return NULL;
+
+ return intr;
+}
+
+struct nir_use_dominance_state;
+
+struct nir_use_dominance_state *
+nir_calc_use_dominance_impl(nir_function_impl *impl, bool post_dominance);
+
+nir_instr *
+nir_get_immediate_use_dominator(struct nir_use_dominance_state *state,
+ nir_instr *instr);
+nir_instr *nir_use_dominance_lca(struct nir_use_dominance_state *state,
+ nir_instr *i1, nir_instr *i2);
+bool nir_instr_dominates_use(struct nir_use_dominance_state *state,
+ nir_instr *parent, nir_instr *child);
+void nir_print_use_dominators(struct nir_use_dominance_state *state,
+ nir_instr **instructions,
+ unsigned num_instructions);
+
#include "nir_inline_helpers.h"
#ifdef __cplusplus