summaryrefslogtreecommitdiff
path: root/src/mesa/state_tracker/st_glsl_to_nir.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/state_tracker/st_glsl_to_nir.cpp')
-rw-r--r--src/mesa/state_tracker/st_glsl_to_nir.cpp783
1 files changed, 352 insertions, 431 deletions
diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp
index b3594154aef..a341b3a7723 100644
--- a/src/mesa/state_tracker/st_glsl_to_nir.cpp
+++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp
@@ -30,7 +30,6 @@
#include "program/program.h"
#include "program/prog_statevars.h"
#include "program/prog_parameter.h"
-#include "program/ir_to_mesa.h"
#include "main/context.h"
#include "main/mtypes.h"
#include "main/errors.h"
@@ -44,6 +43,7 @@
#include "st_shader_cache.h"
#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_builder.h"
#include "compiler/glsl_types.h"
#include "compiler/glsl/glsl_to_nir.h"
#include "compiler/glsl/gl_nir.h"
@@ -51,12 +51,14 @@
#include "compiler/glsl/ir.h"
#include "compiler/glsl/ir_optimization.h"
#include "compiler/glsl/linker_util.h"
+#include "compiler/glsl/program.h"
+#include "compiler/glsl/shader_cache.h"
#include "compiler/glsl/string_to_uint_map.h"
static int
type_size(const struct glsl_type *type)
{
- return type->count_attribute_slots(false);
+ return glsl_count_attribute_slots(type, false);
}
/* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
@@ -74,7 +76,7 @@ st_nir_fixup_varying_slots(struct st_context *st, nir_shader *shader,
assert(!st->allow_st_finalize_nir_twice);
nir_foreach_variable_with_modes(var, shader, mode) {
- if (var->data.location >= VARYING_SLOT_VAR0) {
+ if (var->data.location >= VARYING_SLOT_VAR0 && var->data.location < VARYING_SLOT_PATCH0) {
var->data.location += 9;
} else if (var->data.location == VARYING_SLOT_PNTC) {
var->data.location = VARYING_SLOT_VAR8;
@@ -85,19 +87,6 @@ st_nir_fixup_varying_slots(struct st_context *st, nir_shader *shader,
}
}
-static void
-st_shader_gather_info(nir_shader *nir, struct gl_program *prog)
-{
- nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
-
- /* Copy the info we just generated back into the gl_program */
- const char *prog_name = prog->info.name;
- const char *prog_label = prog->info.label;
- prog->info = nir->info;
- prog->info.name = prog_name;
- prog->info.label = prog_label;
-}
-
/* input location assignment for VS inputs must be handled specially, so
* that it is aligned w/ st's vbo state.
* (This isn't the case with, for ex, FS inputs, which only need to agree
@@ -134,7 +123,7 @@ st_nir_assign_vs_in_locations(struct nir_shader *nir)
/* Re-lower global vars, to deal with any dead VS inputs. */
if (removed_inputs)
- NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+ NIR_PASS(_, nir, nir_lower_global_vars_to_local);
}
static int
@@ -172,15 +161,6 @@ st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
* fails. In this case just find the first matching "color.*"..
*
* Note for arrays you could end up w/ color[n].f, for example.
- *
- * glsl_to_tgsi works slightly differently in this regard. It is
- * emitting something more low level, so it just translates the
- * params list 1:1 to CONST[] regs. Going from GLSL IR to TGSI,
- * it just calculates the additional offset of struct field members
- * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
- * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir). It never
- * needs to work backwards to get base var loc from the param-list
- * which already has them separated out.
*/
if (!prog->sh.data->spirv) {
int namelen = strlen(var->name);
@@ -204,12 +184,13 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
int shaderidx = 0;
int imageidx = 0;
- nir_foreach_uniform_variable(uniform, nir) {
+ nir_foreach_variable_with_modes(uniform, nir, nir_var_uniform |
+ nir_var_image) {
int loc;
const struct glsl_type *type = glsl_without_array(uniform->type);
- if (!uniform->data.bindless && (type->is_sampler() || type->is_image())) {
- if (type->is_sampler()) {
+ if (!uniform->data.bindless && (glsl_type_is_sampler(type) || glsl_type_is_image(type))) {
+ if (glsl_type_is_sampler(type)) {
loc = shaderidx;
shaderidx += type_size(uniform->type);
} else {
@@ -218,9 +199,6 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
}
} else if (uniform->state_slots) {
const gl_state_index16 *const stateTokens = uniform->state_slots[0].tokens;
- /* This state reference has already been setup by ir_to_mesa, but we'll
- * get the same index back here.
- */
unsigned comps;
if (glsl_type_is_struct_or_ifc(type)) {
@@ -252,188 +230,11 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
}
}
-void
-st_nir_opts(nir_shader *nir)
-{
- bool progress;
-
- do {
- progress = false;
-
- NIR_PASS_V(nir, nir_lower_vars_to_ssa);
-
- /* Linking deals with unused inputs/outputs, but here we can remove
- * things local to the shader in the hopes that we can cleanup other
- * things. This pass will also remove variables with only stores, so we
- * might be able to make progress after it.
- */
- NIR_PASS(progress, nir, nir_remove_dead_variables,
- nir_var_function_temp | nir_var_shader_temp |
- nir_var_mem_shared,
- NULL);
-
- NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
- NIR_PASS(progress, nir, nir_opt_dead_write_vars);
-
- if (nir->options->lower_to_scalar) {
- NIR_PASS_V(nir, nir_lower_alu_to_scalar,
- nir->options->lower_to_scalar_filter, NULL);
- NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
- }
-
- NIR_PASS_V(nir, nir_lower_alu);
- NIR_PASS_V(nir, nir_lower_pack);
- NIR_PASS(progress, nir, nir_copy_prop);
- NIR_PASS(progress, nir, nir_opt_remove_phis);
- NIR_PASS(progress, nir, nir_opt_dce);
- if (nir_opt_trivial_continues(nir)) {
- progress = true;
- NIR_PASS(progress, nir, nir_copy_prop);
- NIR_PASS(progress, nir, nir_opt_dce);
- }
- NIR_PASS(progress, nir, nir_opt_if, false);
- NIR_PASS(progress, nir, nir_opt_dead_cf);
- NIR_PASS(progress, nir, nir_opt_cse);
- NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
-
- NIR_PASS(progress, nir, nir_opt_phi_precision);
- NIR_PASS(progress, nir, nir_opt_algebraic);
- NIR_PASS(progress, nir, nir_opt_constant_folding);
-
- if (!nir->info.flrp_lowered) {
- unsigned lower_flrp =
- (nir->options->lower_flrp16 ? 16 : 0) |
- (nir->options->lower_flrp32 ? 32 : 0) |
- (nir->options->lower_flrp64 ? 64 : 0);
-
- if (lower_flrp) {
- bool lower_flrp_progress = false;
-
- NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
- lower_flrp,
- false /* always_precise */);
- if (lower_flrp_progress) {
- NIR_PASS(progress, nir,
- nir_opt_constant_folding);
- progress = true;
- }
- }
-
- /* Nothing should rematerialize any flrps, so we only need to do this
- * lowering once.
- */
- nir->info.flrp_lowered = true;
- }
-
- NIR_PASS(progress, nir, nir_opt_undef);
- NIR_PASS(progress, nir, nir_opt_conditional_discard);
- if (nir->options->max_unroll_iterations) {
- NIR_PASS(progress, nir, nir_opt_loop_unroll);
- }
- } while (progress);
-}
-
-static void
-shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
-{
- assert(glsl_type_is_vector_or_scalar(type));
-
- uint32_t comp_size = glsl_type_is_boolean(type)
- ? 4 : glsl_get_bit_size(type) / 8;
- unsigned length = glsl_get_vector_elements(type);
- *size = comp_size * length,
- *align = comp_size * (length == 3 ? 4 : length);
-}
-
-/* First third of converting glsl_to_nir.. this leaves things in a pre-
- * nir_lower_io state, so that shader variants can more easily insert/
- * replace variables, etc.
- */
-static void
-st_nir_preprocess(struct st_context *st, struct gl_program *prog,
- struct gl_shader_program *shader_program,
- gl_shader_stage stage)
-{
- struct pipe_screen *screen = st->screen;
- const nir_shader_compiler_options *options =
- st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
- assert(options);
- nir_shader *nir = prog->nir;
-
- /* Set the next shader stage hint for VS and TES. */
- if (!nir->info.separate_shader &&
- (nir->info.stage == MESA_SHADER_VERTEX ||
- nir->info.stage == MESA_SHADER_TESS_EVAL)) {
-
- unsigned prev_stages = (1 << (prog->info.stage + 1)) - 1;
- unsigned stages_mask =
- ~prev_stages & shader_program->data->linked_stages;
-
- nir->info.next_stage = stages_mask ?
- (gl_shader_stage) u_bit_scan(&stages_mask) : MESA_SHADER_FRAGMENT;
- } else {
- nir->info.next_stage = MESA_SHADER_FRAGMENT;
- }
-
- nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- if (!st->ctx->SoftFP64 && ((nir->info.bit_sizes_int | nir->info.bit_sizes_float) & 64) &&
- (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
- st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
- }
-
- /* ES has strict SSO validation rules for shader IO matching so we can't
- * remove dead IO until the resource list has been built. Here we skip
- * removing them until later. This will potentially make the IO lowering
- * calls below do a little extra work but should otherwise have no impact.
- */
- if (!_mesa_is_gles(st->ctx) || !nir->info.separate_shader) {
- nir_variable_mode mask = nir_var_shader_in | nir_var_shader_out;
- nir_remove_dead_variables(nir, mask, NULL);
- }
-
- if (options->lower_all_io_to_temps ||
- nir->info.stage == MESA_SHADER_VERTEX ||
- nir->info.stage == MESA_SHADER_GEOMETRY) {
- NIR_PASS_V(nir, nir_lower_io_to_temporaries,
- nir_shader_get_entrypoint(nir),
- true, true);
- } else if (nir->info.stage == MESA_SHADER_FRAGMENT ||
- !screen->get_param(screen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS)) {
- NIR_PASS_V(nir, nir_lower_io_to_temporaries,
- nir_shader_get_entrypoint(nir),
- true, false);
- }
-
- NIR_PASS_V(nir, nir_lower_global_vars_to_local);
- NIR_PASS_V(nir, nir_split_var_copies);
- NIR_PASS_V(nir, nir_lower_var_copies);
-
- if (options->lower_to_scalar) {
- NIR_PASS_V(nir, nir_lower_alu_to_scalar,
- options->lower_to_scalar_filter, NULL);
- }
-
- /* before buffers and vars_to_ssa */
- NIR_PASS_V(nir, gl_nir_lower_images, true);
-
- /* TODO: Change GLSL to not lower shared memory. */
- if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
- shader_program->data->spirv) {
- NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
- nir_var_mem_shared, shared_type_info);
- NIR_PASS_V(prog->nir, nir_lower_explicit_io,
- nir_var_mem_shared, nir_address_format_32bit_offset);
- }
-
- /* Do a round of constant folding to clean up address calculations */
- NIR_PASS_V(nir, nir_opt_constant_folding);
-}
-
static bool
-dest_is_64bit(nir_dest *dest, void *state)
+def_is_64bit(nir_def *def, void *state)
{
bool *lower = (bool *)state;
- if (dest && (nir_dest_bit_size(*dest) == 64)) {
+ if (def && (def->bit_size == 64)) {
*lower = true;
return false;
}
@@ -459,7 +260,7 @@ filter_64_bit_instr(const nir_instr *const_instr, UNUSED const void *data)
* doesn't have const variants, so do the ugly const_cast here. */
nir_instr *instr = const_cast<nir_instr *>(const_instr);
- nir_foreach_dest(instr, dest_is_64bit, &lower);
+ nir_foreach_def(instr, def_is_64bit, &lower);
if (lower)
return true;
nir_foreach_src(instr, src_is_64bit, &lower);
@@ -511,30 +312,21 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
* storage is only associated with the original parameter list.
* This should be enough for Bitmap and DrawPixels constants.
*/
- _mesa_ensure_and_associate_uniform_storage(st->ctx, shader_program, prog, 16);
-
- st_set_prog_affected_state_flags(prog);
+ _mesa_ensure_and_associate_uniform_storage(st->ctx, shader_program, prog, 28);
/* None of the builtins being lowered here can be produced by SPIR-V. See
* _mesa_builtin_uniform_desc. Also drivers that support packed uniform
* storage don't need to lower builtins.
*/
if (!shader_program->data->spirv &&
- !st->ctx->Const.PackedDriverUniformStorage) {
- /* at this point, array uniforms have been split into separate
- * nir_variable structs where possible. this codepath can't handle dynamic
- * array indexing, however, so all indirect uniform derefs
- * must be eliminated beforehand to avoid trying to lower one of those builtins
- */
- NIR_PASS_V(nir, nir_lower_indirect_builtin_uniform_derefs);
- NIR_PASS_V(nir, st_nir_lower_builtin);
- }
+ !st->ctx->Const.PackedDriverUniformStorage)
+ NIR_PASS(_, nir, st_nir_lower_builtin);
if (!screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF))
- NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
+ NIR_PASS(_, nir, gl_nir_lower_atomics, shader_program, true);
- NIR_PASS_V(nir, nir_opt_intrinsics);
- NIR_PASS_V(nir, nir_opt_fragdepth);
+ NIR_PASS(_, nir, nir_opt_intrinsics);
+ NIR_PASS(_, nir, nir_opt_fragdepth);
/* Lower 64-bit ops. */
if (nir->options->lower_int64_options ||
@@ -542,40 +334,61 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
bool lowered_64bit_ops = false;
bool revectorize = false;
- /* nir_lower_doubles is not prepared for vector ops, so if the backend doesn't
- * request lower_alu_to_scalar until now, lower all 64 bit ops, and try to
- * vectorize them afterwards again */
- if (!nir->options->lower_to_scalar) {
- NIR_PASS(revectorize, nir, nir_lower_alu_to_scalar, filter_64_bit_instr, nullptr);
- NIR_PASS(revectorize, nir, nir_lower_phis_to_scalar, false);
- }
-
if (nir->options->lower_doubles_options) {
+ /* nir_lower_doubles is not prepared for vector ops, so if the backend doesn't
+ * request lower_alu_to_scalar until now, lower all 64 bit ops, and try to
+ * vectorize them afterwards again */
+ if (!nir->options->lower_to_scalar) {
+ NIR_PASS(revectorize, nir, nir_lower_alu_to_scalar, filter_64_bit_instr, nullptr);
+ NIR_PASS(revectorize, nir, nir_lower_phis_to_scalar, false);
+ }
+ /* doubles lowering requires frexp to be lowered first if it will be,
+ * since the pass generates other 64-bit ops. Most backends lower
+ * frexp, and using doubles is rare, and using frexp is even more rare
+ * (no instances in shader-db), so we're not too worried about
+ * accidentally lowering a 32-bit frexp here.
+ */
+ NIR_PASS(lowered_64bit_ops, nir, nir_lower_frexp);
+
NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
st->ctx->SoftFP64, nir->options->lower_doubles_options);
}
if (nir->options->lower_int64_options)
NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64);
- if (revectorize)
- NIR_PASS_V(nir, nir_opt_vectorize, nullptr, nullptr);
+ if (revectorize && !nir->options->vectorize_vec2_16bit)
+ NIR_PASS(_, nir, nir_opt_vectorize, nullptr, nullptr);
if (revectorize || lowered_64bit_ops)
- st_nir_opts(nir);
+ gl_nir_opts(nir);
}
nir_variable_mode mask =
nir_var_shader_in | nir_var_shader_out | nir_var_function_temp;
nir_remove_dead_variables(nir, mask, NULL);
- if (!st->has_hw_atomics && !screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF))
- NIR_PASS_V(nir, nir_lower_atomics_to_ssbo);
+ if (!st->has_hw_atomics && !screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF)) {
+ unsigned align_offset_state = 0;
+ if (st->ctx->Const.ShaderStorageBufferOffsetAlignment > 4) {
+ struct gl_program_parameter_list *params = prog->Parameters;
+ for (unsigned i = 0; i < shader_program->data->NumAtomicBuffers; i++) {
+ gl_state_index16 state[STATE_LENGTH] = { STATE_ATOMIC_COUNTER_OFFSET, (short)shader_program->data->AtomicBuffers[i].Binding };
+ _mesa_add_state_reference(params, state);
+ }
+ align_offset_state = STATE_ATOMIC_COUNTER_OFFSET;
+ }
+ NIR_PASS(_, nir, nir_lower_atomics_to_ssbo, align_offset_state);
+ }
+
+ st_set_prog_affected_state_flags(prog);
st_finalize_nir_before_variants(nir);
char *msg = NULL;
- if (st->allow_st_finalize_nir_twice)
- msg = st_finalize_nir(st, prog, shader_program, nir, true, true);
+ if (st->allow_st_finalize_nir_twice) {
+ st_serialize_base_nir(prog, nir);
+ msg = st_finalize_nir(st, prog, shader_program, nir, true, true, false);
+ }
if (st->ctx->_Shader->Flags & GLSL_DUMP) {
_mesa_log("\n");
@@ -592,9 +405,19 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
static void
st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
{
- NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
- NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
- NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
+ if (consumer)
+ NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
+
+ if (!producer)
+ return;
+
+ NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
+
+ if (producer->info.stage == MESA_SHADER_TESS_CTRL &&
+ producer->options->vectorize_tess_levels)
+ NIR_PASS(_, producer, nir_vectorize_tess_levels);
+
+ NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
if ((producer)->info.stage != MESA_SHADER_TESS_CTRL) {
/* Calling lower_io_to_vector creates output variable writes with
@@ -603,90 +426,34 @@ st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
* them. This, in turn, creates temporary variables and extra
* copy_deref intrinsics that we need to clean up.
*/
- NIR_PASS_V(producer, nir_lower_io_to_temporaries,
+ NIR_PASS(_, producer, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(producer), true, false);
- NIR_PASS_V(producer, nir_lower_global_vars_to_local);
- NIR_PASS_V(producer, nir_split_var_copies);
- NIR_PASS_V(producer, nir_lower_var_copies);
+ NIR_PASS(_, producer, nir_lower_global_vars_to_local);
+ NIR_PASS(_, producer, nir_split_var_copies);
+ NIR_PASS(_, producer, nir_lower_var_copies);
}
/* Undef scalar store_deref intrinsics are not ignored by nir_lower_io,
* so they must be removed before that. These passes remove them.
*/
- NIR_PASS_V(producer, nir_lower_vars_to_ssa);
- NIR_PASS_V(producer, nir_opt_undef);
- NIR_PASS_V(producer, nir_opt_dce);
-}
-
-static void
-st_nir_link_shaders(nir_shader *producer, nir_shader *consumer)
-{
- if (producer->options->lower_to_scalar) {
- NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
- NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
- }
-
- nir_lower_io_arrays_to_elements(producer, consumer);
-
- st_nir_opts(producer);
- st_nir_opts(consumer);
-
- if (nir_link_opt_varyings(producer, consumer))
- st_nir_opts(consumer);
-
- NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
- NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
-
- if (nir_remove_unused_varyings(producer, consumer)) {
- NIR_PASS_V(producer, nir_lower_global_vars_to_local);
- NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
-
- st_nir_opts(producer);
- st_nir_opts(consumer);
-
- /* Optimizations can cause varyings to become unused.
- * nir_compact_varyings() depends on all dead varyings being removed so
- * we need to call nir_remove_dead_variables() again here.
- */
- NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out,
- NULL);
- NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in,
- NULL);
- }
-
- nir_link_varying_precision(producer, consumer);
-}
-
-static void
-st_lower_patch_vertices_in(struct gl_shader_program *shader_prog)
-{
- struct gl_linked_shader *linked_tcs =
- shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
- struct gl_linked_shader *linked_tes =
- shader_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
-
- /* If we have a TCS and TES linked together, lower TES patch vertices. */
- if (linked_tcs && linked_tes) {
- nir_shader *tcs_nir = linked_tcs->Program->nir;
- nir_shader *tes_nir = linked_tes->Program->nir;
-
- /* The TES input vertex count is the TCS output vertex count,
- * lower TES gl_PatchVerticesIn to a constant.
- */
- uint32_t tes_patch_verts = tcs_nir->info.tess.tcs_vertices_out;
- NIR_PASS_V(tes_nir, nir_lower_patch_vertices, tes_patch_verts, NULL);
- }
+ NIR_PASS(_, producer, nir_lower_vars_to_ssa);
+ NIR_PASS(_, producer, nir_opt_undef);
+ NIR_PASS(_, producer, nir_opt_dce);
}
extern "C" {
-void
+bool
st_nir_lower_wpos_ytransform(struct nir_shader *nir,
struct gl_program *prog,
struct pipe_screen *pscreen)
{
- if (nir->info.stage != MESA_SHADER_FRAGMENT)
- return;
+ bool progress = false;
+
+ if (nir->info.stage != MESA_SHADER_FRAGMENT) {
+ nir_shader_preserve_all_metadata(nir);
+ return progress;
+ }
static const gl_state_index16 wposTransformState[STATE_LENGTH] = {
STATE_FB_WPOS_Y_TRANSFORM
@@ -697,20 +464,20 @@ st_nir_lower_wpos_ytransform(struct nir_shader *nir,
sizeof(wpos_options.state_tokens));
wpos_options.fs_coord_origin_upper_left =
pscreen->get_param(pscreen,
- PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT);
+ PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT);
wpos_options.fs_coord_origin_lower_left =
pscreen->get_param(pscreen,
- PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
+ PIPE_CAP_FS_COORD_ORIGIN_LOWER_LEFT);
wpos_options.fs_coord_pixel_center_integer =
pscreen->get_param(pscreen,
- PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
+ PIPE_CAP_FS_COORD_PIXEL_CENTER_INTEGER);
wpos_options.fs_coord_pixel_center_half_integer =
pscreen->get_param(pscreen,
- PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER);
+ PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER);
if (nir_lower_wpos_ytransform(nir, &wpos_options)) {
- nir_validate_shader(nir, "after nir_lower_wpos_ytransform");
_mesa_add_state_reference(prog->Parameters, wposTransformState);
+ progress = true;
}
static const gl_state_index16 pntcTransformState[STATE_LENGTH] = {
@@ -719,17 +486,29 @@ st_nir_lower_wpos_ytransform(struct nir_shader *nir,
if (nir_lower_pntc_ytransform(nir, &pntcTransformState)) {
_mesa_add_state_reference(prog->Parameters, pntcTransformState);
+ progress = true;
}
+
+ return progress;
}
-bool
-st_link_nir(struct gl_context *ctx,
- struct gl_shader_program *shader_program)
+static bool
+st_link_glsl_to_nir(struct gl_context *ctx,
+ struct gl_shader_program *shader_program)
{
struct st_context *st = st_context(ctx);
struct gl_linked_shader *linked_shader[MESA_SHADER_STAGES];
unsigned num_shaders = 0;
+ /* Return early if we are loading the shader from on-disk cache */
+ if (st_load_nir_from_disk_cache(ctx, shader_program)) {
+ return GL_TRUE;
+ }
+
+ MESA_TRACE_FUNC();
+
+ assert(shader_program->data->LinkStatus);
+
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (shader_program->_LinkedShaders[i])
linked_shader[num_shaders++] = shader_program->_LinkedShaders[i];
@@ -740,13 +519,12 @@ st_link_nir(struct gl_context *ctx,
const nir_shader_compiler_options *options =
st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
struct gl_program *prog = shader->Program;
- struct st_program *stp = (struct st_program *)prog;
- _mesa_copy_linked_program_data(shader_program, shader);
+ shader->Program->info.separate_shader = shader_program->SeparateShader;
assert(!prog->nir);
- stp->shader_program = shader_program;
- stp->state.type = PIPE_SHADER_IR_NIR;
+ prog->shader_program = shader_program;
+ prog->state.type = PIPE_SHADER_IR_NIR;
/* Parameters will be filled during NIR linking. */
prog->Parameters = _mesa_new_parameter_list();
@@ -754,8 +532,6 @@ st_link_nir(struct gl_context *ctx,
if (shader_program->data->spirv) {
prog->nir = _mesa_spirv_to_nir(ctx, shader_program, shader->Stage, options);
} else {
- validate_ir_tree(shader->ir);
-
if (ctx->_Shader->Flags & GLSL_DUMP) {
_mesa_log("\n");
_mesa_log("GLSL IR for linked %s program %d:\n",
@@ -765,84 +541,97 @@ st_link_nir(struct gl_context *ctx,
_mesa_log("\n\n");
}
- prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
- st_nir_preprocess(st, prog, shader_program, shader->Stage);
+ prog->nir = glsl_to_nir(&st->ctx->Const, shader_program, shader->Stage, options);
}
- if (options->lower_to_scalar) {
- NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
+ memcpy(prog->nir->info.source_sha1, shader->linked_source_sha1,
+ SHA1_DIGEST_LENGTH);
+
+ nir_shader_gather_info(prog->nir, nir_shader_get_entrypoint(prog->nir));
+ if (!st->ctx->SoftFP64 && ((prog->nir->info.bit_sizes_int | prog->nir->info.bit_sizes_float) & 64) &&
+ (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
+
+ /* It's not possible to use float64 on GLSL ES, so don't bother trying to
+ * build the support code. The support code depends on higher versions of
+ * desktop GLSL, so it will fail to compile (below) anyway.
+ */
+ if (_mesa_is_desktop_gl(st->ctx) && st->ctx->Const.GLSLVersion >= 400)
+ st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
}
}
- st_lower_patch_vertices_in(shader_program);
-
- /* For SPIR-V, we have to perform the NIR linking before applying
- * st_nir_preprocess.
- */
if (shader_program->data->spirv) {
static const gl_nir_linker_options opts = {
true /*fill_parameters */
};
- if (!gl_nir_link_spirv(ctx, shader_program, &opts))
+ if (!gl_nir_link_spirv(&ctx->Const, &ctx->Extensions, shader_program,
+ &opts))
return GL_FALSE;
-
- nir_build_program_resource_list(ctx, shader_program, true);
-
- for (unsigned i = 0; i < num_shaders; i++) {
- struct gl_linked_shader *shader = linked_shader[i];
- struct gl_program *prog = shader->Program;
-
- prog->ExternalSamplersUsed = gl_external_samplers(prog);
- _mesa_update_shader_textures_used(shader_program, prog);
- st_nir_preprocess(st, prog, shader_program, shader->Stage);
- }
- }
-
- /* Linking the stages in the opposite order (from fragment to vertex)
- * ensures that inter-shader outputs written to in an earlier stage
- * are eliminated if they are (transitively) not used in a later
- * stage.
- */
- for (int i = num_shaders - 2; i >= 0; i--) {
- st_nir_link_shaders(linked_shader[i]->Program->nir,
- linked_shader[i + 1]->Program->nir);
- }
- /* Linking shaders also optimizes them. Separate shaders, compute shaders
- * and shaders with a fixed-func VS or FS that don't need linking are
- * optimized here.
- */
- if (num_shaders == 1)
- st_nir_opts(linked_shader[0]->Program->nir);
-
- if (!shader_program->data->spirv) {
+ } else {
if (!gl_nir_link_glsl(ctx, shader_program))
return GL_FALSE;
+ }
- for (unsigned i = 0; i < num_shaders; i++) {
- struct gl_program *prog = linked_shader[i]->Program;
- prog->ExternalSamplersUsed = gl_external_samplers(prog);
- _mesa_update_shader_textures_used(shader_program, prog);
- }
-
- nir_build_program_resource_list(ctx, shader_program, false);
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_program *prog = linked_shader[i]->Program;
+ prog->ExternalSamplersUsed = gl_external_samplers(prog);
+ _mesa_update_shader_textures_used(shader_program, prog);
}
+ nir_build_program_resource_list(&ctx->Const, shader_program,
+ shader_program->data->spirv);
+
for (unsigned i = 0; i < num_shaders; i++) {
struct gl_linked_shader *shader = linked_shader[i];
nir_shader *nir = shader->Program->nir;
+ gl_shader_stage stage = shader->Stage;
+ const struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[stage];
- /* don't infer ACCESS_NON_READABLE so that Program->sh.ImageAccess is
- * correct: https://gitlab.freedesktop.org/mesa/mesa/-/issues/3278
+ if (nir->info.io_lowered) {
+ /* Since IO is lowered, we won't need the IO variables from now on.
+ * nir_build_program_resource_list was the last pass that needed them.
+ */
+ NIR_PASS_V(nir, nir_remove_dead_variables,
+ nir_var_shader_in | nir_var_shader_out, NULL);
+ }
+
+ /* If there are forms of indirect addressing that the driver
+ * cannot handle, perform the lowering pass.
*/
- nir_opt_access_options opt_access_options;
- opt_access_options.is_vulkan = false;
- opt_access_options.infer_non_readable = false;
- NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
+ if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput ||
+ options->EmitNoIndirectTemp || options->EmitNoIndirectUniform) {
+ nir_variable_mode mode = (nir_variable_mode)0;
+
+ if (!nir->info.io_lowered) {
+ mode |= options->EmitNoIndirectInput ?
+ nir_var_shader_in : (nir_variable_mode)0;
+ mode |= options->EmitNoIndirectOutput ?
+ nir_var_shader_out : (nir_variable_mode)0;
+ }
+ mode |= options->EmitNoIndirectTemp ?
+ nir_var_function_temp : (nir_variable_mode)0;
+ mode |= options->EmitNoIndirectUniform ?
+ nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo :
+ (nir_variable_mode)0;
+
+ if (mode)
+ nir_lower_indirect_derefs(nir, mode, UINT32_MAX);
+ }
/* This needs to run after the initial pass of nir_lower_vars_to_ssa, so
* that the buffer indices are constants in nir where they where
* constants in GLSL. */
- NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
+ NIR_PASS(_, nir, gl_nir_lower_buffers, shader_program);
+
+ NIR_PASS(_, nir, st_nir_lower_wpos_ytransform, shader->Program,
+ st->screen);
+
+ NIR_PASS(_, nir, nir_lower_system_values);
+ NIR_PASS(_, nir, nir_lower_compute_system_values, NULL);
+
+ if (nir->info.io_lowered)
+ continue; /* the rest is for non-lowered IO only */
/* Remap the locations to slots so those requiring two slots will occupy
* two locations. For instance, if we have in the IR code a dvec3 attr0 in
@@ -852,25 +641,6 @@ st_link_nir(struct gl_context *ctx,
if (nir->info.stage == MESA_SHADER_VERTEX && !shader_program->data->spirv)
nir_remap_dual_slot_attributes(nir, &shader->Program->DualSlotInputs);
- NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
- st->screen);
-
- NIR_PASS_V(nir, nir_lower_system_values);
- NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
-
- NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
-
- st_shader_gather_info(nir, shader->Program);
- if (shader->Stage == MESA_SHADER_VERTEX) {
- /* NIR expands dual-slot inputs out to two locations. We need to
- * compact things back down GL-style single-slot inputs to avoid
- * confusing the state tracker.
- */
- shader->Program->info.inputs_read =
- nir_get_single_slot_attribs_mask(nir->info.inputs_read,
- shader->Program->DualSlotInputs);
- }
-
if (i >= 1) {
struct gl_program *prev_shader = linked_shader[i - 1]->Program;
@@ -888,6 +658,23 @@ st_link_nir(struct gl_context *ctx,
}
}
+ /* If the program is a separate shader program check if we need to vectorise
+ * the first and last program interfaces too.
+ */
+ if (shader_program->SeparateShader && num_shaders > 0) {
+ struct gl_linked_shader *first_shader = linked_shader[0];
+ struct gl_linked_shader *last_shader = linked_shader[num_shaders - 1];
+ if (first_shader->Stage != MESA_SHADER_COMPUTE) {
+ if (ctx->Const.ShaderCompilerOptions[first_shader->Stage].NirOptions->vectorize_io &&
+ first_shader->Stage > MESA_SHADER_VERTEX)
+ st_nir_vectorize_io(NULL, first_shader->Program->nir);
+
+ if (ctx->Const.ShaderCompilerOptions[last_shader->Stage].NirOptions->vectorize_io &&
+ last_shader->Stage < MESA_SHADER_FRAGMENT)
+ st_nir_vectorize_io(last_shader->Program->nir, NULL);
+ }
+ }
+
struct shader_info *prev_info = NULL;
for (unsigned i = 0; i < num_shaders; i++) {
@@ -897,7 +684,7 @@ st_link_nir(struct gl_context *ctx,
char *msg = st_glsl_to_nir_post_opts(st, shader->Program, shader_program);
if (msg) {
linker_error(shader_program, msg);
- break;
+ return false;
}
if (prev_info &&
@@ -916,7 +703,6 @@ st_link_nir(struct gl_context *ctx,
for (unsigned i = 0; i < num_shaders; i++) {
struct gl_linked_shader *shader = linked_shader[i];
struct gl_program *prog = shader->Program;
- struct st_program *stp = st_program(prog);
/* Make sure that prog->info is in sync with nir->info, but st/mesa
* expects some of the values to be from before lowering.
@@ -928,12 +714,25 @@ st_link_nir(struct gl_context *ctx,
prog->info.num_ssbos = old_info.num_ssbos;
prog->info.num_ubos = old_info.num_ubos;
prog->info.num_abos = old_info.num_abos;
- if (prog->info.stage == MESA_SHADER_VERTEX)
- prog->info.inputs_read = old_info.inputs_read;
- /* Initialize st_vertex_program members. */
- if (shader->Stage == MESA_SHADER_VERTEX)
- st_prepare_vertex_program(stp, NULL);
+ if (prog->info.stage == MESA_SHADER_VERTEX) {
+ if (prog->nir->info.io_lowered &&
+ prog->nir->options->io_options & nir_io_glsl_opt_varyings) {
+ prog->info.inputs_read = prog->nir->info.inputs_read;
+ prog->DualSlotInputs = prog->nir->info.dual_slot_inputs;
+ } else {
+ /* NIR expands dual-slot inputs out to two locations. We need to
+ * compact things back down GL-style single-slot inputs to avoid
+ * confusing the state tracker.
+ */
+ prog->info.inputs_read =
+ nir_get_single_slot_attribs_mask(prog->nir->info.inputs_read,
+ prog->DualSlotInputs);
+ }
+
+ /* Initialize st_vertex_program members. */
+ st_prepare_vertex_program(prog);
+ }
/* Get pipe_stream_output_info. */
if (shader->Stage == MESA_SHADER_VERTEX ||
@@ -941,14 +740,29 @@ st_link_nir(struct gl_context *ctx,
shader->Stage == MESA_SHADER_GEOMETRY)
st_translate_stream_output_info(prog);
- st_store_ir_in_disk_cache(st, prog, true);
+ st_store_nir_in_disk_cache(st, prog);
- st_release_variants(st, stp);
+ st_release_variants(st, prog);
st_finalize_program(st, prog);
+ }
+
+ struct pipe_context *pctx = st_context(ctx)->pipe;
+ if (pctx->link_shader) {
+ void *driver_handles[PIPE_SHADER_TYPES];
+ memset(driver_handles, 0, sizeof(driver_handles));
- /* The GLSL IR won't be needed anymore. */
- ralloc_free(shader->ir);
- shader->ir = NULL;
+ for (uint32_t i = 0; i < MESA_SHADER_STAGES; ++i) {
+ struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
+ if (shader) {
+ struct gl_program *p = shader->Program;
+ if (p && p->variants) {
+ enum pipe_shader_type type = pipe_shader_type_from_mesa(shader->Stage);
+ driver_handles[type] = p->variants->driver_shader;
+ }
+ }
+ }
+
+ pctx->link_shader(pctx, driver_handles);
}
return true;
@@ -957,6 +771,10 @@ st_link_nir(struct gl_context *ctx,
void
st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
{
+ /* Lowered IO don't have variables, so exit. */
+ if (nir->info.io_lowered)
+ return;
+
if (nir->info.stage == MESA_SHADER_VERTEX) {
nir_assign_io_var_locations(nir, nir_var_shader_out,
&nir->num_outputs,
@@ -995,14 +813,17 @@ st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
struct gl_program *prog)
{
if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
- NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
+ NIR_PASS(_, nir, gl_nir_lower_samplers_as_deref, shader_program);
else
- NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
+ NIR_PASS(_, nir, gl_nir_lower_samplers, shader_program);
if (prog) {
BITSET_COPY(prog->info.textures_used, nir->info.textures_used);
BITSET_COPY(prog->info.textures_used_by_txf, nir->info.textures_used_by_txf);
- prog->info.images_used = nir->info.images_used;
+ BITSET_COPY(prog->info.samplers_used, nir->info.samplers_used);
+ BITSET_COPY(prog->info.images_used, nir->info.images_used);
+ BITSET_COPY(prog->info.image_buffers, nir->info.image_buffers);
+ BITSET_COPY(prog->info.msaa_images, nir->info.msaa_images);
}
}
@@ -1022,17 +843,17 @@ void
st_nir_lower_uniforms(struct st_context *st, nir_shader *nir)
{
if (st->ctx->Const.PackedDriverUniformStorage) {
- NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
+ NIR_PASS(_, nir, nir_lower_io, nir_var_uniform,
st_packed_uniforms_type_size,
(nir_lower_io_options)0);
} else {
- NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
+ NIR_PASS(_, nir, nir_lower_io, nir_var_uniform,
st_unpacked_uniforms_type_size,
(nir_lower_io_options)0);
}
if (nir->options->lower_uniforms_to_ubo)
- NIR_PASS_V(nir, nir_lower_uniforms_to_ubo,
+ NIR_PASS(_, nir, nir_lower_uniforms_to_ubo,
st->ctx->Const.PackedDriverUniformStorage,
!st->ctx->Const.NativeIntegers);
}
@@ -1044,30 +865,48 @@ char *
st_finalize_nir(struct st_context *st, struct gl_program *prog,
struct gl_shader_program *shader_program,
nir_shader *nir, bool finalize_by_driver,
- bool is_before_variants)
+ bool is_before_variants,
+ bool is_draw_shader)
{
struct pipe_screen *screen = st->screen;
- NIR_PASS_V(nir, nir_split_var_copies);
- NIR_PASS_V(nir, nir_lower_var_copies);
+ MESA_TRACE_FUNC();
- if (st->lower_rect_tex) {
- struct nir_lower_tex_options opts = { 0 };
+ NIR_PASS(_, nir, nir_split_var_copies);
+ NIR_PASS(_, nir, nir_lower_var_copies);
- opts.lower_rect = true;
+ const bool lower_tg4_offsets =
+ !is_draw_shader && !st->screen->get_param(screen, PIPE_CAP_TEXTURE_GATHER_OFFSETS);
- NIR_PASS_V(nir, nir_lower_tex, &opts);
+ if (!is_draw_shader && (st->lower_rect_tex || lower_tg4_offsets)) {
+ struct nir_lower_tex_options opts = {0};
+ opts.lower_rect = !!st->lower_rect_tex;
+ opts.lower_tg4_offsets = lower_tg4_offsets;
+
+ NIR_PASS(_, nir, nir_lower_tex, &opts);
}
st_nir_assign_varying_locations(st, nir);
st_nir_assign_uniform_locations(st->ctx, prog, nir);
+ /* Lower load_deref/store_deref of inputs and outputs.
+ * This depends on st_nir_assign_varying_locations.
+ *
+ * TODO: remove this once nir_io_glsl_opt_varyings is enabled by default.
+ */
+ if (!is_draw_shader && nir->options->io_options & nir_io_glsl_lower_derefs &&
+ !(nir->options->io_options & nir_io_glsl_opt_varyings)) {
+ nir_lower_io_passes(nir, false);
+ NIR_PASS(_, nir, nir_remove_dead_variables,
+ nir_var_shader_in | nir_var_shader_out, NULL);
+ }
+
/* Set num_uniforms in number of attribute slots (vec4s) */
nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
st_nir_lower_uniforms(st, nir);
- if (is_before_variants && nir->options->lower_uniforms_to_ubo) {
+ if (!is_draw_shader && is_before_variants && nir->options->lower_uniforms_to_ubo) {
/* This must be done after uniforms are lowered to UBO and all
* nir_var_uniform variables are removed from NIR to prevent conflicts
* between state parameter merging and shader variant generation.
@@ -1076,14 +915,96 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog,
}
st_nir_lower_samplers(screen, nir, shader_program, prog);
- if (!screen->get_param(screen, PIPE_CAP_NIR_IMAGES_AS_DEREF))
- NIR_PASS_V(nir, gl_nir_lower_images, false);
+ if (!is_draw_shader && !screen->get_param(screen, PIPE_CAP_NIR_IMAGES_AS_DEREF))
+ NIR_PASS(_, nir, gl_nir_lower_images, false);
char *msg = NULL;
- if (finalize_by_driver && screen->finalize_nir)
+ if (!is_draw_shader && finalize_by_driver && screen->finalize_nir)
msg = screen->finalize_nir(screen, nir);
return msg;
}
+/**
+ * Link a GLSL shader program. Called via glLinkProgram().
+ */
+void
+st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ unsigned int i;
+ bool spirv = false;
+
+ MESA_TRACE_FUNC();
+
+ _mesa_clear_shader_program_data(ctx, prog);
+
+ prog->data = _mesa_create_shader_program_data();
+
+ prog->data->LinkStatus = LINKING_SUCCESS;
+
+ for (i = 0; i < prog->NumShaders; i++) {
+ if (!prog->Shaders[i]->CompileStatus) {
+ linker_error(prog, "linking with uncompiled/unspecialized shader");
+ }
+
+ if (!i) {
+ spirv = (prog->Shaders[i]->spirv_data != NULL);
+ } else if (spirv && !prog->Shaders[i]->spirv_data) {
+ /* The GL_ARB_gl_spirv spec adds a new bullet point to the list of
+ * reasons LinkProgram can fail:
+ *
+ * "All the shader objects attached to <program> do not have the
+ * same value for the SPIR_V_BINARY_ARB state."
+ */
+ linker_error(prog,
+ "not all attached shaders have the same "
+ "SPIR_V_BINARY_ARB state");
+ }
+ }
+ prog->data->spirv = spirv;
+
+ if (prog->data->LinkStatus) {
+ if (!spirv)
+ link_shaders(ctx, prog);
+ else
+ _mesa_spirv_link_shaders(ctx, prog);
+ }
+
+ /* If LinkStatus is LINKING_SUCCESS, then reset sampler validated to true.
+ * Validation happens via the LinkShader call below. If LinkStatus is
+ * LINKING_SKIPPED, then SamplersValidated will have been restored from the
+ * shader cache.
+ */
+ if (prog->data->LinkStatus == LINKING_SUCCESS) {
+ prog->SamplersValidated = GL_TRUE;
+ }
+
+ if (prog->data->LinkStatus && !st_link_glsl_to_nir(ctx, prog)) {
+ prog->data->LinkStatus = LINKING_FAILURE;
+ }
+
+ if (prog->data->LinkStatus != LINKING_FAILURE)
+ _mesa_create_program_resource_hash(prog);
+
+ /* Return early if we are loading the shader from on-disk cache */
+ if (prog->data->LinkStatus == LINKING_SKIPPED)
+ return;
+
+ if (ctx->_Shader->Flags & GLSL_DUMP) {
+ if (!prog->data->LinkStatus) {
+ fprintf(stderr, "GLSL shader program %d failed to link\n", prog->Name);
+ }
+
+ if (prog->data->InfoLog && prog->data->InfoLog[0] != 0) {
+ fprintf(stderr, "GLSL shader program %d info log:\n", prog->Name);
+ fprintf(stderr, "%s\n", prog->data->InfoLog);
+ }
+ }
+
+#ifdef ENABLE_SHADER_CACHE
+ if (prog->data->LinkStatus)
+ shader_cache_write_program_metadata(ctx, prog);
+#endif
+}
+
} /* extern "C" */