summaryrefslogtreecommitdiff
path: root/src/broadcom/compiler/v3d_compiler.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/broadcom/compiler/v3d_compiler.h')
-rw-r--r--src/broadcom/compiler/v3d_compiler.h309
1 files changed, 195 insertions, 114 deletions
diff --git a/src/broadcom/compiler/v3d_compiler.h b/src/broadcom/compiler/v3d_compiler.h
index 0c1419661d3..12aaacdc14a 100644
--- a/src/broadcom/compiler/v3d_compiler.h
+++ b/src/broadcom/compiler/v3d_compiler.h
@@ -31,6 +31,7 @@
#include <stdint.h>
#include <string.h>
+#include "util/blend.h"
#include "util/macros.h"
#include "common/v3d_debug.h"
#include "common/v3d_device_info.h"
@@ -40,7 +41,6 @@
#include "util/u_math.h"
#include "qpu/qpu_instr.h"
-#include "pipe/p_state.h"
/**
* Maximum number of outstanding TMU operations we can queue for execution.
@@ -87,7 +87,7 @@ enum qfile {
/** A physical register, such as the W coordinate payload. */
QFILE_REG,
- /** One of the regsiters for fixed function interactions. */
+ /** One of the registers for fixed function interactions. */
QFILE_MAGIC,
/**
@@ -97,12 +97,6 @@ enum qfile {
QFILE_TEMP,
/**
- * VPM reads use this with an index value to say what part of the VPM
- * is being read.
- */
- QFILE_VPM,
-
- /**
* Stores an immediate value in the index field that will be used
* directly by qpu_load_imm().
*/
@@ -169,6 +163,19 @@ struct qinst {
* otherwise.
*/
int uniform;
+
+ /* If this is a a TLB Z write */
+ bool is_tlb_z_write;
+
+ /* If this is a retiring TMU instruction (the last in a lookup sequence),
+ * how many ldtmu instructions are required to read the results.
+ */
+ uint32_t ldtmu_count;
+
+ /* Position of this instruction in the program. Filled in during
+ * register allocation.
+ */
+ int32_t ip;
};
enum quniform_contents {
@@ -330,6 +337,19 @@ enum quniform_contents {
* Current value of gl_ViewIndex for Multiview rendering.
*/
QUNIFORM_VIEW_INDEX,
+
+ /**
+ * Inline uniform buffers
+ */
+ QUNIFORM_INLINE_UBO_0,
+ QUNIFORM_INLINE_UBO_1,
+ QUNIFORM_INLINE_UBO_2,
+ QUNIFORM_INLINE_UBO_3,
+
+ /**
+ * Current value of DrawIndex for Multidraw
+ */
+ QUNIFORM_DRAW_ID,
};
static inline uint32_t v3d_unit_data_create(uint32_t unit, uint32_t value)
@@ -369,13 +389,7 @@ static inline uint8_t v3d_slot_get_component(struct v3d_varying_slot slot)
return slot.slot_and_component & 3;
}
-enum v3d_execution_environment {
- V3D_ENVIRONMENT_OPENGL = 0,
- V3D_ENVIRONMENT_VULKAN,
-};
-
struct v3d_key {
- void *shader_state;
struct {
uint8_t swizzle[4];
} tex[V3D_MAX_TEXTURE_SAMPLERS];
@@ -388,9 +402,9 @@ struct v3d_key {
uint8_t num_samplers_used;
uint8_t ucp_enables;
bool is_last_geometry_stage;
- bool robust_buffer_access;
-
- enum v3d_execution_environment environment;
+ bool robust_uniform_access;
+ bool robust_storage_access;
+ bool robust_image_access;
};
struct v3d_fs_key {
@@ -400,7 +414,6 @@ struct v3d_fs_key {
bool line_smoothing;
bool point_coord_upper_left;
bool msaa;
- bool sample_coverage;
bool sample_alpha_to_coverage;
bool sample_alpha_to_one;
/* Mask of which color render targets are present. */
@@ -419,14 +432,12 @@ struct v3d_fs_key {
*/
struct {
enum pipe_format format;
- const uint8_t *swizzle;
+ uint8_t swizzle[4];
} color_fmt[V3D_MAX_DRAW_BUFFERS];
- uint8_t logicop_func;
+ enum pipe_logicop logicop_func;
uint32_t point_sprite_mask;
- struct pipe_rt_blend_state blend;
-
/* If the fragment shader reads gl_PrimitiveID then we have 2 scenarios:
*
* - If there is a geometry shader, then gl_PrimitiveID must be written
@@ -468,7 +479,7 @@ struct v3d_vs_key {
bool clamp_color;
};
-/** A basic block of VIR intructions. */
+/** A basic block of VIR instructions. */
struct qblock {
struct list_head link;
@@ -566,6 +577,7 @@ enum v3d_compilation_result {
*/
struct v3d_compiler {
const struct v3d_device_info *devinfo;
+ uint32_t max_inline_uniform_buffers;
struct ra_regs *regs;
struct ra_class *reg_class_any[3];
struct ra_class *reg_class_r5[3];
@@ -584,6 +596,19 @@ struct v3d_interp_input {
unsigned mode; /* interpolation mode */
};
+struct v3d_ra_node_info {
+ struct {
+ uint32_t priority;
+ uint8_t class_bits;
+ bool is_program_end;
+ bool unused;
+
+ /* V3D 7.x */
+ bool is_ldunif_dst;
+ } *info;
+ uint32_t alloc_count;
+};
+
struct v3d_compile {
const struct v3d_device_info *devinfo;
nir_shader *s;
@@ -596,7 +621,7 @@ struct v3d_compile {
void *debug_output_data;
/**
- * Mapping from nir_register * or nir_ssa_def * to array of struct
+ * Mapping from nir_register * or nir_def * to array of struct
* qreg for the values.
*/
struct hash_table *def_ht;
@@ -615,11 +640,12 @@ struct v3d_compile {
uint32_t output_fifo_size;
struct {
- nir_dest *dest;
+ nir_def *def;
uint8_t num_components;
uint8_t component_mask;
} flush[MAX_TMU_QUEUE_SIZE];
uint32_t flush_count;
+ uint32_t total_count;
} tmu;
/**
@@ -652,16 +678,13 @@ struct v3d_compile {
bool uses_center_w;
bool writes_z;
+ bool writes_z_from_fep;
+ bool reads_z;
bool uses_implicit_point_line_varyings;
/* True if a fragment shader reads gl_PrimitiveID */
bool fs_uses_primitive_id;
- /* If the fragment shader does anything that requires to force
- * per-sample MSAA, such as reading gl_SampleID.
- */
- bool force_per_sample_msaa;
-
/* Whether we are using the fallback scheduler. This will be set after
* register allocation has failed once.
*/
@@ -681,6 +704,11 @@ struct v3d_compile {
bool disable_constant_ubo_load_sorting;
bool sorted_any_ubo_loads;
+ /* Moves UBO/SSBO loads right before their first user (nir_opt_move).
+ * This can reduce register pressure.
+ */
+ bool move_buffer_loads;
+
/* Emits ldunif for each new uniform, even if the uniform was already
* emitted in the same block. Useful to compile shaders with high
* register pressure or to disable the optimization during uniform
@@ -692,6 +720,19 @@ struct v3d_compile {
bool disable_loop_unrolling;
bool unrolled_any_loops;
+ /* Disables nir_opt_gcm to reduce register pressure. */
+ bool disable_gcm;
+
+ /* If calling nir_opt_gcm made any progress. Used to skip new rebuilds
+ * if possible
+ */
+ bool gcm_progress;
+
+ /* Disables scheduling of general TMU loads (and unfiltered image load).
+ */
+ bool disable_general_tmu_sched;
+ bool has_general_tmu_load;
+
/* Minimum number of threads we are willing to use to register allocate
* a shader with the current compilation strategy. This only prevents
* us from lowering the thread count to register allocate successfully,
@@ -705,7 +746,9 @@ struct v3d_compile {
* strategies that can reduce register pressure and hopefully reduce or
* eliminate TMU spills in the shader.
*/
- bool tmu_spilling_allowed;
+ uint32_t max_tmu_spills;
+
+ uint32_t compile_strategy_idx;
/* The UBO index and block used with the last unifa load, as well as the
* current unifa offset *after* emitting that load. This is used to skip
@@ -715,6 +758,7 @@ struct v3d_compile {
struct qblock *current_unifa_block;
int32_t current_unifa_index;
uint32_t current_unifa_offset;
+ bool current_unifa_is_ubo;
/* State for whether we're executing on each channel currently. 0 if
* yes, otherwise a block number + 1 that the channel jumped to.
@@ -749,6 +793,11 @@ struct v3d_compile {
struct qreg cs_shared_offset;
int local_invocation_index_bits;
+ /* Starting value of the sample mask in a fragment shader. We use
+ * this to identify lanes that have been terminated/discarded.
+ */
+ struct qreg start_msf;
+
/* If the shader uses subgroup functionality */
bool has_subgroups;
@@ -761,14 +810,27 @@ struct v3d_compile {
uint32_t spill_size;
/* Shader-db stats */
uint32_t spills, fills, loops;
+
+ /* Whether we are in the process of spilling registers for
+ * register allocation
+ */
+ bool spilling;
+
/**
* Register spilling's per-thread base address, shared between each
- * spill/fill's addressing calculations.
+ * spill/fill's addressing calculations (also used for scratch
+ * access).
*/
struct qreg spill_base;
+
/* Bit vector of which temps may be spilled */
BITSET_WORD *spillable;
+ /* Used during register allocation */
+ int thread_index;
+ struct v3d_ra_node_info nodes;
+ struct ra_graph *g;
+
/**
* Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
*
@@ -799,11 +861,16 @@ struct v3d_compile {
uint32_t uniform_array_size;
uint32_t num_uniforms;
uint32_t output_position_index;
- nir_variable *output_color_var[4];
+ nir_variable *output_color_var[V3D_MAX_DRAW_BUFFERS];
uint32_t output_sample_mask_index;
struct qreg undef;
uint32_t num_temps;
+ /* Number of temps in the program right before we spill a new temp. We
+ * use this to know which temps existed before a spill and which were
+ * added with the spill itself.
+ */
+ uint32_t spill_start_num_temps;
struct vir_cursor cursor;
struct list_head blocks;
@@ -848,12 +915,16 @@ struct v3d_compile {
bool emitted_tlb_load;
bool lock_scoreboard_on_first_thrsw;
- /* Total number of spilled registers in the program */
- uint32_t spill_count;
-
enum v3d_compilation_result compilation_result;
bool tmu_dirty_rcl;
+ bool has_global_address;
+
+ /* If we have processed a discard/terminate instruction. This may
+ * cause some lanes to be inactive even during uniform control
+ * flow.
+ */
+ bool emitted_discard;
};
struct v3d_uniform_list {
@@ -866,6 +937,13 @@ struct v3d_prog_data {
struct v3d_uniform_list uniforms;
uint32_t spill_size;
+ uint32_t tmu_spills;
+ uint32_t tmu_fills;
+ uint32_t tmu_count;
+
+ uint32_t qpu_read_stalls;
+
+ uint8_t compile_strategy_idx;
uint8_t threads;
@@ -877,6 +955,8 @@ struct v3d_prog_data {
bool tmu_dirty_rcl;
bool has_control_barrier;
+
+ bool has_global_address;
};
struct v3d_vs_prog_data {
@@ -964,10 +1044,15 @@ struct v3d_fs_prog_data {
uint8_t num_inputs;
bool writes_z;
+ bool writes_z_from_fep;
bool disable_ez;
bool uses_center_w;
bool uses_implicit_point_line_varyings;
bool lock_scoreboard_on_first_thrsw;
+
+ /* If the fragment shader does anything that requires to force
+ * per-sample MSAA, such as reading gl_SampleID.
+ */
bool force_per_sample_msaa;
};
@@ -998,6 +1083,10 @@ v3d_compute_vpm_config(struct v3d_device_info *devinfo,
struct v3d_gs_prog_data *gs,
struct vpm_config *vpm_cfg_bin,
struct vpm_config *vpm_cfg);
+void
+v3d_pack_unnormalized_coordinates(struct v3d_device_info *devinfo,
+ uint32_t *p1_packed,
+ bool unnormalized_coordinates);
static inline bool
vir_has_uniform(struct qinst *inst)
@@ -1005,7 +1094,8 @@ vir_has_uniform(struct qinst *inst)
return inst->uniform != ~0;
}
-const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo);
+const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo,
+ uint32_t max_inline_uniform_buffers);
void v3d_compiler_free(const struct v3d_compiler *compiler);
void v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s);
@@ -1066,15 +1156,14 @@ bool vir_is_raw_mov(struct qinst *inst);
bool vir_is_tex(const struct v3d_device_info *devinfo, struct qinst *inst);
bool vir_is_add(struct qinst *inst);
bool vir_is_mul(struct qinst *inst);
-bool vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst);
-bool vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst);
+bool vir_writes_r4_implicitly(const struct v3d_device_info *devinfo, struct qinst *inst);
struct qreg vir_follow_movs(struct v3d_compile *c, struct qreg reg);
uint8_t vir_channels_written(struct qinst *inst);
struct qreg ntq_get_src(struct v3d_compile *c, nir_src src, int i);
-void ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
- struct qreg result);
+void ntq_store_def(struct v3d_compile *c, nir_def *def, int chan,
+ struct qreg result);
bool ntq_tmu_fifo_overflow(struct v3d_compile *c, uint32_t components);
-void ntq_add_pending_tmu_flush(struct v3d_compile *c, nir_dest *dest,
+void ntq_add_pending_tmu_flush(struct v3d_compile *c, nir_def *def,
uint32_t component_mask);
void ntq_flush_tmu(struct v3d_compile *c);
void vir_emit_thrsw(struct v3d_compile *c);
@@ -1095,32 +1184,27 @@ bool vir_opt_redundant_flags(struct v3d_compile *c);
bool vir_opt_small_immediates(struct v3d_compile *c);
bool vir_opt_vpm(struct v3d_compile *c);
bool vir_opt_constant_alu(struct v3d_compile *c);
-void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
-void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
-void v3d_nir_lower_line_smooth(nir_shader *shader);
-void v3d_nir_lower_logic_ops(nir_shader *s, struct v3d_compile *c);
-void v3d_nir_lower_robust_buffer_access(nir_shader *shader, struct v3d_compile *c);
-void v3d_nir_lower_scratch(nir_shader *s);
-void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
-void v3d_nir_lower_image_load_store(nir_shader *s);
-void vir_lower_uniforms(struct v3d_compile *c);
-
-void v3d33_vir_vpm_read_setup(struct v3d_compile *c, int num_components);
-void v3d33_vir_vpm_write_setup(struct v3d_compile *c);
-void v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
-void v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
-void v3d40_vir_emit_image_load_store(struct v3d_compile *c,
- nir_intrinsic_instr *instr);
+bool v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
+bool v3d_nir_lower_line_smooth(nir_shader *shader);
+bool v3d_nir_lower_logic_ops(nir_shader *s, struct v3d_compile *c);
+bool v3d_nir_lower_scratch(nir_shader *s);
+bool v3d_nir_lower_txf_ms(nir_shader *s);
+bool v3d_nir_lower_image_load_store(nir_shader *s, struct v3d_compile *c);
+bool v3d_nir_lower_load_store_bitsize(nir_shader *s);
+
+void v3d_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
+void v3d_vir_emit_image_load_store(struct v3d_compile *c,
+ nir_intrinsic_instr *instr);
void v3d_vir_to_qpu(struct v3d_compile *c, struct qpu_reg *temp_registers);
uint32_t v3d_qpu_schedule_instructions(struct v3d_compile *c);
void qpu_validate(struct v3d_compile *c);
-struct qpu_reg *v3d_register_allocate(struct v3d_compile *c, bool *spilled);
+struct qpu_reg *v3d_register_allocate(struct v3d_compile *c);
bool vir_init_reg_sets(struct v3d_compiler *compiler);
int v3d_shaderdb_dump(struct v3d_compile *c, char **shaderdb_str);
-bool v3d_gl_format_is_return_32(GLenum format);
+bool v3d_gl_format_is_return_32(enum pipe_format format);
uint32_t
v3d_get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src);
@@ -1220,28 +1304,35 @@ vir_##name(struct v3d_compile *c, struct qreg a, struct qreg b) \
#define VIR_SFU(name) \
static inline struct qreg \
vir_##name(struct v3d_compile *c, struct qreg a) \
-{ \
- if (c->devinfo->ver >= 41) { \
- return vir_emit_def(c, vir_add_inst(V3D_QPU_A_##name, \
- c->undef, \
- a, c->undef)); \
- } else { \
- vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_##name), a); \
- return vir_FMOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4)); \
- } \
+{ \
+ return vir_emit_def(c, vir_add_inst(V3D_QPU_A_##name, \
+ c->undef, \
+ a, c->undef)); \
} \
static inline struct qinst * \
vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
struct qreg a) \
{ \
- if (c->devinfo->ver >= 41) { \
- return vir_emit_nondef(c, vir_add_inst(V3D_QPU_A_##name, \
- dest, \
- a, c->undef)); \
- } else { \
- vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_##name), a); \
- return vir_FMOV_dest(c, dest, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4)); \
- } \
+ return vir_emit_nondef(c, vir_add_inst(V3D_QPU_A_##name, \
+ dest, \
+ a, c->undef)); \
+}
+
+#define VIR_SFU2(name) \
+static inline struct qreg \
+vir_##name(struct v3d_compile *c, struct qreg a, struct qreg b) \
+{ \
+ return vir_emit_def(c, vir_add_inst(V3D_QPU_A_##name, \
+ c->undef, \
+ a, b)); \
+} \
+static inline struct qinst * \
+vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
+ struct qreg a, struct qreg b) \
+{ \
+ return vir_emit_nondef(c, vir_add_inst(V3D_QPU_A_##name, \
+ dest, \
+ a, b)); \
}
#define VIR_A_ALU2(name) VIR_ALU2(name, vir_add_inst, V3D_QPU_A_##name)
@@ -1343,6 +1434,28 @@ VIR_SFU(LOG)
VIR_SFU(SIN)
VIR_SFU(RSQRT2)
+VIR_SFU(BALLOT)
+VIR_SFU(BCASTF)
+VIR_SFU(ALLEQ)
+VIR_SFU(ALLFEQ)
+VIR_SFU2(ROTQ)
+VIR_SFU2(ROT)
+VIR_SFU2(SHUFFLE)
+
+VIR_A_ALU2(VPACK)
+VIR_A_ALU2(V8PACK)
+VIR_A_ALU2(V10PACK)
+VIR_A_ALU2(V11FPACK)
+
+VIR_M_ALU1(FTOUNORM16)
+VIR_M_ALU1(FTOSNORM16)
+
+VIR_M_ALU1(VFTOUNORM8)
+VIR_M_ALU1(VFTOSNORM8)
+
+VIR_M_ALU1(VFTOUNORM10LO)
+VIR_M_ALU1(VFTOUNORM10HI)
+
static inline struct qinst *
vir_MOV_cond(struct v3d_compile *c, enum v3d_qpu_cond cond,
struct qreg dest, struct qreg src)
@@ -1372,16 +1485,11 @@ vir_NOP(struct v3d_compile *c)
static inline struct qreg
vir_LDTMU(struct v3d_compile *c)
{
- if (c->devinfo->ver >= 41) {
- struct qinst *ldtmu = vir_add_inst(V3D_QPU_A_NOP, c->undef,
- c->undef, c->undef);
- ldtmu->qpu.sig.ldtmu = true;
-
- return vir_emit_def(c, ldtmu);
- } else {
- vir_NOP(c)->qpu.sig.ldtmu = true;
- return vir_MOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4));
- }
+ struct qinst *ldtmu = vir_add_inst(V3D_QPU_A_NOP, c->undef,
+ c->undef, c->undef);
+ ldtmu->qpu.sig.ldtmu = true;
+
+ return vir_emit_def(c, ldtmu);
}
static inline struct qreg
@@ -1394,7 +1502,6 @@ vir_UMUL(struct v3d_compile *c, struct qreg src0, struct qreg src1)
static inline struct qreg
vir_TLBU_COLOR_READ(struct v3d_compile *c, uint32_t config)
{
- assert(c->devinfo->ver >= 41); /* XXX */
assert((config & 0xffffff00) == 0xffffff00);
struct qinst *ldtlb = vir_add_inst(V3D_QPU_A_NOP, c->undef,
@@ -1407,38 +1514,12 @@ vir_TLBU_COLOR_READ(struct v3d_compile *c, uint32_t config)
static inline struct qreg
vir_TLB_COLOR_READ(struct v3d_compile *c)
{
- assert(c->devinfo->ver >= 41); /* XXX */
-
struct qinst *ldtlb = vir_add_inst(V3D_QPU_A_NOP, c->undef,
c->undef, c->undef);
ldtlb->qpu.sig.ldtlb = true;
return vir_emit_def(c, ldtlb);
}
-/*
-static inline struct qreg
-vir_LOAD_IMM(struct v3d_compile *c, uint32_t val)
-{
- return vir_emit_def(c, vir_inst(QOP_LOAD_IMM, c->undef,
- vir_reg(QFILE_LOAD_IMM, val), c->undef));
-}
-
-static inline struct qreg
-vir_LOAD_IMM_U2(struct v3d_compile *c, uint32_t val)
-{
- return vir_emit_def(c, vir_inst(QOP_LOAD_IMM_U2, c->undef,
- vir_reg(QFILE_LOAD_IMM, val),
- c->undef));
-}
-static inline struct qreg
-vir_LOAD_IMM_I2(struct v3d_compile *c, uint32_t val)
-{
- return vir_emit_def(c, vir_inst(QOP_LOAD_IMM_I2, c->undef,
- vir_reg(QFILE_LOAD_IMM, val),
- c->undef));
-}
-*/
-
static inline struct qinst *
vir_BRANCH(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
{