summaryrefslogtreecommitdiff
path: root/src/compiler/nir
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/nir_lower_system_values.c18
-rw-r--r--src/compiler/nir/nir_lower_variable_initializers.c8
-rw-r--r--src/compiler/nir/nir_opt_uniform_atomics.c9
-rw-r--r--src/compiler/nir/nir_print.c10
-rw-r--r--src/compiler/nir/nir_range_analysis.c26
5 files changed, 36 insertions, 35 deletions
diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c
index 732b8e80a6b..60443050407 100644
--- a/src/compiler/nir/nir_lower_system_values.c
+++ b/src/compiler/nir/nir_lower_system_values.c
@@ -320,10 +320,10 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ssa_def *x = nir_channel(b, ids, 0);
nir_ssa_def *y = nir_channel(b, ids, 1);
nir_ssa_def *z = nir_channel(b, ids, 2);
- unsigned size_x = b->shader->info.cs.workgroup_size[0];
+ unsigned size_x = b->shader->info.workgroup_size[0];
nir_ssa_def *size_x_imm;
- if (b->shader->info.cs.workgroup_size_variable)
+ if (b->shader->info.workgroup_size_variable)
size_x_imm = nir_channel(b, nir_load_workgroup_size(b), 0);
else
size_x_imm = nir_imm_int(b, size_x);
@@ -371,7 +371,7 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ishl(b, x_bits_1n, one));
nir_ssa_def *i;
- if (!b->shader->info.cs.workgroup_size_variable &&
+ if (!b->shader->info.workgroup_size_variable &&
util_is_power_of_two_nonzero(size_x)) {
nir_ssa_def *log2_size_x = nir_imm_int(b, util_logbase2(size_x));
i = nir_ior(b, bits_01x, nir_ishl(b, y_bits_1n, log2_size_x));
@@ -405,9 +405,9 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
nir_ssa_def *size_x =
- nir_imm_int(b, b->shader->info.cs.workgroup_size[0]);
+ nir_imm_int(b, b->shader->info.workgroup_size[0]);
nir_ssa_def *size_y =
- nir_imm_int(b, b->shader->info.cs.workgroup_size[1]);
+ nir_imm_int(b, b->shader->info.workgroup_size[1]);
/* Because no hardware supports a local workgroup size greater than
* about 1K, this calculation can be done in 32-bit and can save some
@@ -425,7 +425,7 @@ lower_compute_system_value_instr(nir_builder *b,
}
case nir_intrinsic_load_workgroup_size:
- if (b->shader->info.cs.workgroup_size_variable) {
+ if (b->shader->info.workgroup_size_variable) {
/* If the local work group size is variable it can't be lowered at
* this point. We do, however, have to make sure that the intrinsic
* is only 32-bit.
@@ -436,9 +436,9 @@ lower_compute_system_value_instr(nir_builder *b,
* than 32 bits for the local size */
nir_const_value workgroup_size_const[3];
memset(workgroup_size_const, 0, sizeof(workgroup_size_const));
- workgroup_size_const[0].u32 = b->shader->info.cs.workgroup_size[0];
- workgroup_size_const[1].u32 = b->shader->info.cs.workgroup_size[1];
- workgroup_size_const[2].u32 = b->shader->info.cs.workgroup_size[2];
+ workgroup_size_const[0].u32 = b->shader->info.workgroup_size[0];
+ workgroup_size_const[1].u32 = b->shader->info.workgroup_size[1];
+ workgroup_size_const[2].u32 = b->shader->info.workgroup_size[2];
return nir_u2u(b, nir_build_imm(b, 3, 32, workgroup_size_const), bit_size);
}
diff --git a/src/compiler/nir/nir_lower_variable_initializers.c b/src/compiler/nir/nir_lower_variable_initializers.c
index b26624f8cf6..ba0e7ba7be6 100644
--- a/src/compiler/nir/nir_lower_variable_initializers.c
+++ b/src/compiler/nir/nir_lower_variable_initializers.c
@@ -154,10 +154,10 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
nir_builder_init(&b, nir_shader_get_entrypoint(shader));
b.cursor = nir_before_cf_list(&b.impl->body);
- assert(!shader->info.cs.workgroup_size_variable);
- const unsigned local_count = shader->info.cs.workgroup_size[0] *
- shader->info.cs.workgroup_size[1] *
- shader->info.cs.workgroup_size[2];
+ assert(!shader->info.workgroup_size_variable);
+ const unsigned local_count = shader->info.workgroup_size[0] *
+ shader->info.workgroup_size[1] *
+ shader->info.workgroup_size[2];
/* The initialization logic is simplified if we can always split the memory
* in full chunk_size units.
diff --git a/src/compiler/nir/nir_opt_uniform_atomics.c b/src/compiler/nir/nir_opt_uniform_atomics.c
index 433803cb35e..2c64e3198a4 100644
--- a/src/compiler/nir/nir_opt_uniform_atomics.c
+++ b/src/compiler/nir/nir_opt_uniform_atomics.c
@@ -169,7 +169,7 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
unsigned dims_needed = 0;
for (unsigned i = 0; i < 3; i++)
- dims_needed |= (shader->info.cs.workgroup_size[i] > 1) << i;
+ dims_needed |= (shader->info.workgroup_size[i] > 1) << i;
return (dims & dims_needed) == dims_needed || dims & 0x8;
}
@@ -306,9 +306,10 @@ nir_opt_uniform_atomics(nir_shader *shader)
/* A 1x1x1 workgroup only ever has one active lane, so there's no point in
* optimizing any atomics.
*/
- if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.workgroup_size_variable &&
- shader->info.cs.workgroup_size[0] == 1 && shader->info.cs.workgroup_size[1] == 1 &&
- shader->info.cs.workgroup_size[2] == 1)
+ if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
+ !shader->info.workgroup_size_variable &&
+ shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
+ shader->info.workgroup_size[2] == 1)
return false;
nir_foreach_function(function, shader) {
diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c
index 363a8e4aeb1..649d1de224d 100644
--- a/src/compiler/nir/nir_print.c
+++ b/src/compiler/nir/nir_print.c
@@ -1605,12 +1605,12 @@ nir_print_shader_annotated(nir_shader *shader, FILE *fp,
if (shader->info.label)
fprintf(fp, "label: %s\n", shader->info.label);
- if (gl_shader_stage_is_compute(shader->info.stage)) {
+ if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
fprintf(fp, "workgroup-size: %u, %u, %u%s\n",
- shader->info.cs.workgroup_size[0],
- shader->info.cs.workgroup_size[1],
- shader->info.cs.workgroup_size[2],
- shader->info.cs.workgroup_size_variable ? " (variable)" : "");
+ shader->info.workgroup_size[0],
+ shader->info.workgroup_size[1],
+ shader->info.workgroup_size[2],
+ shader->info.workgroup_size_variable ? " (variable)" : "");
fprintf(fp, "shared-size: %u\n", shader->info.shared_size);
}
diff --git a/src/compiler/nir/nir_range_analysis.c b/src/compiler/nir/nir_range_analysis.c
index 18ac161d5e0..501084f14f5 100644
--- a/src/compiler/nir/nir_range_analysis.c
+++ b/src/compiler/nir/nir_range_analysis.c
@@ -1292,20 +1292,19 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_local_invocation_index:
- if (shader->info.stage != MESA_SHADER_COMPUTE ||
- shader->info.cs.workgroup_size_variable) {
+ if (shader->info.workgroup_size_variable) {
res = config->max_workgroup_invocations - 1;
} else {
- res = (shader->info.cs.workgroup_size[0] *
- shader->info.cs.workgroup_size[1] *
- shader->info.cs.workgroup_size[2]) - 1u;
+ res = (shader->info.workgroup_size[0] *
+ shader->info.workgroup_size[1] *
+ shader->info.workgroup_size[2]) - 1u;
}
break;
case nir_intrinsic_load_local_invocation_id:
- if (shader->info.cs.workgroup_size_variable)
+ if (shader->info.workgroup_size_variable)
res = config->max_workgroup_size[scalar.comp] - 1u;
else
- res = shader->info.cs.workgroup_size[scalar.comp] - 1u;
+ res = shader->info.workgroup_size[scalar.comp] - 1u;
break;
case nir_intrinsic_load_workgroup_id:
res = config->max_workgroup_count[scalar.comp] - 1u;
@@ -1314,11 +1313,11 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
res = config->max_workgroup_count[scalar.comp];
break;
case nir_intrinsic_load_global_invocation_id:
- if (shader->info.cs.workgroup_size_variable) {
+ if (shader->info.workgroup_size_variable) {
res = mul_clamp(config->max_workgroup_size[scalar.comp],
config->max_workgroup_count[scalar.comp]) - 1u;
} else {
- res = (shader->info.cs.workgroup_size[scalar.comp] *
+ res = (shader->info.workgroup_size[scalar.comp] *
config->max_workgroup_count[scalar.comp]) - 1u;
}
break;
@@ -1339,10 +1338,11 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
case nir_intrinsic_load_subgroup_id:
case nir_intrinsic_load_num_subgroups: {
uint32_t workgroup_size = config->max_workgroup_invocations;
- if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.workgroup_size_variable) {
- workgroup_size = shader->info.cs.workgroup_size[0] *
- shader->info.cs.workgroup_size[1] *
- shader->info.cs.workgroup_size[2];
+ if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
+ !shader->info.workgroup_size_variable) {
+ workgroup_size = shader->info.workgroup_size[0] *
+ shader->info.workgroup_size[1] *
+ shader->info.workgroup_size[2];
}
res = DIV_ROUND_UP(workgroup_size, config->min_subgroup_size);
if (intrin->intrinsic == nir_intrinsic_load_subgroup_id)