summaryrefslogtreecommitdiff
path: root/src/compiler
diff options
context:
space:
mode:
authorCaio Marcelo de Oliveira Filho <caio.oliveira@intel.com>2021-05-26 23:53:32 -0700
committerMarge Bot <eric+marge@anholt.net>2021-06-07 22:34:42 +0000
commit430d2206daef6ae16403b6b0ed7d5b28dd9e68bd (patch)
treec6d266ccada5aa1c49ae91c30d3bdea427873251 /src/compiler
parent4b9e52e81820a4237902f7db2d24251b2419ae81 (diff)
compiler: Rename local_size to workgroup_size
Acked-by: Emma Anholt <emma@anholt.net> Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> Acked-by: Timur Kristóf <timur.kristof@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11190>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/glsl/linker.cpp28
-rw-r--r--src/compiler/glsl/lower_cs_derived.cpp4
-rw-r--r--src/compiler/nir/nir_lower_system_values.c24
-rw-r--r--src/compiler/nir/nir_lower_variable_initializers.c8
-rw-r--r--src/compiler/nir/nir_opt_uniform_atomics.c8
-rw-r--r--src/compiler/nir/nir_print.c10
-rw-r--r--src/compiler/nir/nir_range_analysis.c24
-rw-r--r--src/compiler/shader_info.h6
-rw-r--r--src/compiler/spirv/spirv_to_nir.c30
9 files changed, 71 insertions, 71 deletions
diff --git a/src/compiler/glsl/linker.cpp b/src/compiler/glsl/linker.cpp
index 4cc1da0bf27..c67f4b0ff1e 100644
--- a/src/compiler/glsl/linker.cpp
+++ b/src/compiler/glsl/linker.cpp
@@ -2230,9 +2230,9 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
return;
for (int i = 0; i < 3; i++)
- gl_prog->info.cs.local_size[i] = 0;
+ gl_prog->info.cs.workgroup_size[i] = 0;
- gl_prog->info.cs.local_size_variable = false;
+ gl_prog->info.cs.workgroup_size_variable = false;
gl_prog->info.cs.derivative_group = DERIVATIVE_GROUP_NONE;
@@ -2250,9 +2250,9 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
struct gl_shader *shader = shader_list[sh];
if (shader->info.Comp.LocalSize[0] != 0) {
- if (gl_prog->info.cs.local_size[0] != 0) {
+ if (gl_prog->info.cs.workgroup_size[0] != 0) {
for (int i = 0; i < 3; i++) {
- if (gl_prog->info.cs.local_size[i] !=
+ if (gl_prog->info.cs.workgroup_size[i] !=
shader->info.Comp.LocalSize[i]) {
linker_error(prog, "compute shader defined with conflicting "
"local sizes\n");
@@ -2261,11 +2261,11 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
}
}
for (int i = 0; i < 3; i++) {
- gl_prog->info.cs.local_size[i] =
+ gl_prog->info.cs.workgroup_size[i] =
shader->info.Comp.LocalSize[i];
}
} else if (shader->info.Comp.LocalSizeVariable) {
- if (gl_prog->info.cs.local_size[0] != 0) {
+ if (gl_prog->info.cs.workgroup_size[0] != 0) {
/* The ARB_compute_variable_group_size spec says:
*
* If one compute shader attached to a program declares a
@@ -2277,7 +2277,7 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
"variable local group size\n");
return;
}
- gl_prog->info.cs.local_size_variable = true;
+ gl_prog->info.cs.workgroup_size_variable = true;
}
enum gl_derivative_group group = shader->info.Comp.DerivativeGroup;
@@ -2296,30 +2296,30 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
* since we already know we're in the right type of shader program
* for doing it.
*/
- if (gl_prog->info.cs.local_size[0] == 0 &&
- !gl_prog->info.cs.local_size_variable) {
+ if (gl_prog->info.cs.workgroup_size[0] == 0 &&
+ !gl_prog->info.cs.workgroup_size_variable) {
linker_error(prog, "compute shader must contain a fixed or a variable "
"local group size\n");
return;
}
if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
- if (gl_prog->info.cs.local_size[0] % 2 != 0) {
+ if (gl_prog->info.cs.workgroup_size[0] % 2 != 0) {
linker_error(prog, "derivative_group_quadsNV must be used with a "
"local group size whose first dimension "
"is a multiple of 2\n");
return;
}
- if (gl_prog->info.cs.local_size[1] % 2 != 0) {
+ if (gl_prog->info.cs.workgroup_size[1] % 2 != 0) {
linker_error(prog, "derivative_group_quadsNV must be used with a local"
"group size whose second dimension "
"is a multiple of 2\n");
return;
}
} else if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
- if ((gl_prog->info.cs.local_size[0] *
- gl_prog->info.cs.local_size[1] *
- gl_prog->info.cs.local_size[2]) % 4 != 0) {
+ if ((gl_prog->info.cs.workgroup_size[0] *
+ gl_prog->info.cs.workgroup_size[1] *
+ gl_prog->info.cs.workgroup_size[2]) % 4 != 0) {
linker_error(prog, "derivative_group_linearNV must be used with a "
"local group size whose total number of invocations "
"is a multiple of 4\n");
diff --git a/src/compiler/glsl/lower_cs_derived.cpp b/src/compiler/glsl/lower_cs_derived.cpp
index 15534b0ac6b..69ee3990bb7 100644
--- a/src/compiler/glsl/lower_cs_derived.cpp
+++ b/src/compiler/glsl/lower_cs_derived.cpp
@@ -54,7 +54,7 @@ public:
explicit lower_cs_derived_visitor(gl_linked_shader *shader)
: progress(false),
shader(shader),
- local_size_variable(shader->Program->info.cs.local_size_variable),
+ local_size_variable(shader->Program->info.cs.workgroup_size_variable),
gl_WorkGroupSize(NULL),
gl_WorkGroupID(NULL),
gl_LocalInvocationID(NULL),
@@ -144,7 +144,7 @@ lower_cs_derived_visitor::find_sysvals()
ir_constant_data data;
memset(&data, 0, sizeof(data));
for (int i = 0; i < 3; i++)
- data.u[i] = shader->Program->info.cs.local_size[i];
+ data.u[i] = shader->Program->info.cs.workgroup_size[i];
gl_WorkGroupSize = new(shader) ir_constant(glsl_type::uvec3_type, &data);
}
}
diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c
index 6d5d9d59617..20e9603cccd 100644
--- a/src/compiler/nir/nir_lower_system_values.c
+++ b/src/compiler/nir/nir_lower_system_values.c
@@ -320,10 +320,10 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ssa_def *x = nir_channel(b, ids, 0);
nir_ssa_def *y = nir_channel(b, ids, 1);
nir_ssa_def *z = nir_channel(b, ids, 2);
- unsigned size_x = b->shader->info.cs.local_size[0];
+ unsigned size_x = b->shader->info.cs.workgroup_size[0];
nir_ssa_def *size_x_imm;
- if (b->shader->info.cs.local_size_variable)
+ if (b->shader->info.cs.workgroup_size_variable)
size_x_imm = nir_channel(b, nir_load_local_group_size(b), 0);
else
size_x_imm = nir_imm_int(b, size_x);
@@ -371,7 +371,7 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ishl(b, x_bits_1n, one));
nir_ssa_def *i;
- if (!b->shader->info.cs.local_size_variable &&
+ if (!b->shader->info.cs.workgroup_size_variable &&
util_is_power_of_two_nonzero(size_x)) {
nir_ssa_def *log2_size_x = nir_imm_int(b, util_logbase2(size_x));
i = nir_ior(b, bits_01x, nir_ishl(b, y_bits_1n, log2_size_x));
@@ -405,9 +405,9 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
nir_ssa_def *size_x =
- nir_imm_int(b, b->shader->info.cs.local_size[0]);
+ nir_imm_int(b, b->shader->info.cs.workgroup_size[0]);
nir_ssa_def *size_y =
- nir_imm_int(b, b->shader->info.cs.local_size[1]);
+ nir_imm_int(b, b->shader->info.cs.workgroup_size[1]);
/* Because no hardware supports a local workgroup size greater than
* about 1K, this calculation can be done in 32-bit and can save some
@@ -425,7 +425,7 @@ lower_compute_system_value_instr(nir_builder *b,
}
case nir_intrinsic_load_local_group_size:
- if (b->shader->info.cs.local_size_variable) {
+ if (b->shader->info.cs.workgroup_size_variable) {
/* If the local work group size is variable it can't be lowered at
* this point. We do, however, have to make sure that the intrinsic
* is only 32-bit.
@@ -434,12 +434,12 @@ lower_compute_system_value_instr(nir_builder *b,
} else {
/* using a 32 bit constant is safe here as no device/driver needs more
* than 32 bits for the local size */
- nir_const_value local_size_const[3];
- memset(local_size_const, 0, sizeof(local_size_const));
- local_size_const[0].u32 = b->shader->info.cs.local_size[0];
- local_size_const[1].u32 = b->shader->info.cs.local_size[1];
- local_size_const[2].u32 = b->shader->info.cs.local_size[2];
- return nir_u2u(b, nir_build_imm(b, 3, 32, local_size_const), bit_size);
+ nir_const_value workgroup_size_const[3];
+ memset(workgroup_size_const, 0, sizeof(workgroup_size_const));
+ workgroup_size_const[0].u32 = b->shader->info.cs.workgroup_size[0];
+ workgroup_size_const[1].u32 = b->shader->info.cs.workgroup_size[1];
+ workgroup_size_const[2].u32 = b->shader->info.cs.workgroup_size[2];
+ return nir_u2u(b, nir_build_imm(b, 3, 32, workgroup_size_const), bit_size);
}
case nir_intrinsic_load_global_invocation_id_zero_base: {
diff --git a/src/compiler/nir/nir_lower_variable_initializers.c b/src/compiler/nir/nir_lower_variable_initializers.c
index b089cc15738..b26624f8cf6 100644
--- a/src/compiler/nir/nir_lower_variable_initializers.c
+++ b/src/compiler/nir/nir_lower_variable_initializers.c
@@ -154,10 +154,10 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
nir_builder_init(&b, nir_shader_get_entrypoint(shader));
b.cursor = nir_before_cf_list(&b.impl->body);
- assert(!shader->info.cs.local_size_variable);
- const unsigned local_count = shader->info.cs.local_size[0] *
- shader->info.cs.local_size[1] *
- shader->info.cs.local_size[2];
+ assert(!shader->info.cs.workgroup_size_variable);
+ const unsigned local_count = shader->info.cs.workgroup_size[0] *
+ shader->info.cs.workgroup_size[1] *
+ shader->info.cs.workgroup_size[2];
/* The initialization logic is simplified if we can always split the memory
* in full chunk_size units.
diff --git a/src/compiler/nir/nir_opt_uniform_atomics.c b/src/compiler/nir/nir_opt_uniform_atomics.c
index 83113284cc2..433803cb35e 100644
--- a/src/compiler/nir/nir_opt_uniform_atomics.c
+++ b/src/compiler/nir/nir_opt_uniform_atomics.c
@@ -169,7 +169,7 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
unsigned dims_needed = 0;
for (unsigned i = 0; i < 3; i++)
- dims_needed |= (shader->info.cs.local_size[i] > 1) << i;
+ dims_needed |= (shader->info.cs.workgroup_size[i] > 1) << i;
return (dims & dims_needed) == dims_needed || dims & 0x8;
}
@@ -306,9 +306,9 @@ nir_opt_uniform_atomics(nir_shader *shader)
/* A 1x1x1 workgroup only ever has one active lane, so there's no point in
* optimizing any atomics.
*/
- if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.local_size_variable &&
- shader->info.cs.local_size[0] == 1 && shader->info.cs.local_size[1] == 1 &&
- shader->info.cs.local_size[2] == 1)
+ if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.workgroup_size_variable &&
+ shader->info.cs.workgroup_size[0] == 1 && shader->info.cs.workgroup_size[1] == 1 &&
+ shader->info.cs.workgroup_size[2] == 1)
return false;
nir_foreach_function(function, shader) {
diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c
index 4ceac077da1..363a8e4aeb1 100644
--- a/src/compiler/nir/nir_print.c
+++ b/src/compiler/nir/nir_print.c
@@ -1606,11 +1606,11 @@ nir_print_shader_annotated(nir_shader *shader, FILE *fp,
fprintf(fp, "label: %s\n", shader->info.label);
if (gl_shader_stage_is_compute(shader->info.stage)) {
- fprintf(fp, "local-size: %u, %u, %u%s\n",
- shader->info.cs.local_size[0],
- shader->info.cs.local_size[1],
- shader->info.cs.local_size[2],
- shader->info.cs.local_size_variable ? " (variable)" : "");
+ fprintf(fp, "workgroup-size: %u, %u, %u%s\n",
+ shader->info.cs.workgroup_size[0],
+ shader->info.cs.workgroup_size[1],
+ shader->info.cs.workgroup_size[2],
+ shader->info.cs.workgroup_size_variable ? " (variable)" : "");
fprintf(fp, "shared-size: %u\n", shader->info.shared_size);
}
diff --git a/src/compiler/nir/nir_range_analysis.c b/src/compiler/nir/nir_range_analysis.c
index 6936b49c9de..608e1c0dd28 100644
--- a/src/compiler/nir/nir_range_analysis.c
+++ b/src/compiler/nir/nir_range_analysis.c
@@ -1293,19 +1293,19 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
switch (intrin->intrinsic) {
case nir_intrinsic_load_local_invocation_index:
if (shader->info.stage != MESA_SHADER_COMPUTE ||
- shader->info.cs.local_size_variable) {
+ shader->info.cs.workgroup_size_variable) {
res = config->max_work_group_invocations - 1;
} else {
- res = (shader->info.cs.local_size[0] *
- shader->info.cs.local_size[1] *
- shader->info.cs.local_size[2]) - 1u;
+ res = (shader->info.cs.workgroup_size[0] *
+ shader->info.cs.workgroup_size[1] *
+ shader->info.cs.workgroup_size[2]) - 1u;
}
break;
case nir_intrinsic_load_local_invocation_id:
- if (shader->info.cs.local_size_variable)
+ if (shader->info.cs.workgroup_size_variable)
res = config->max_work_group_size[scalar.comp] - 1u;
else
- res = shader->info.cs.local_size[scalar.comp] - 1u;
+ res = shader->info.cs.workgroup_size[scalar.comp] - 1u;
break;
case nir_intrinsic_load_work_group_id:
res = config->max_work_group_count[scalar.comp] - 1u;
@@ -1314,11 +1314,11 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
res = config->max_work_group_count[scalar.comp];
break;
case nir_intrinsic_load_global_invocation_id:
- if (shader->info.cs.local_size_variable) {
+ if (shader->info.cs.workgroup_size_variable) {
res = mul_clamp(config->max_work_group_size[scalar.comp],
config->max_work_group_count[scalar.comp]) - 1u;
} else {
- res = (shader->info.cs.local_size[scalar.comp] *
+ res = (shader->info.cs.workgroup_size[scalar.comp] *
config->max_work_group_count[scalar.comp]) - 1u;
}
break;
@@ -1339,10 +1339,10 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
case nir_intrinsic_load_subgroup_id:
case nir_intrinsic_load_num_subgroups: {
uint32_t work_group_size = config->max_work_group_invocations;
- if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.local_size_variable) {
- work_group_size = shader->info.cs.local_size[0] *
- shader->info.cs.local_size[1] *
- shader->info.cs.local_size[2];
+ if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.workgroup_size_variable) {
+ work_group_size = shader->info.cs.workgroup_size[0] *
+ shader->info.cs.workgroup_size[1] *
+ shader->info.cs.workgroup_size[2];
}
res = DIV_ROUND_UP(work_group_size, config->min_subgroup_size);
if (intrin->intrinsic == nir_intrinsic_load_subgroup_id)
diff --git a/src/compiler/shader_info.h b/src/compiler/shader_info.h
index 3dfb2cc0b2b..b1800226c23 100644
--- a/src/compiler/shader_info.h
+++ b/src/compiler/shader_info.h
@@ -384,10 +384,10 @@ typedef struct shader_info {
} fs;
struct {
- uint16_t local_size[3];
- uint16_t local_size_hint[3];
+ uint16_t workgroup_size[3];
+ uint16_t workgroup_size_hint[3];
- bool local_size_variable:1;
+ bool workgroup_size_variable:1;
uint8_t user_data_components_amd:3;
/*
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index aa2fdf529dd..d94577d23c8 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -4796,16 +4796,16 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
case SpvExecutionModeLocalSizeHint:
vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL);
- b->shader->info.cs.local_size_hint[0] = mode->operands[0];
- b->shader->info.cs.local_size_hint[1] = mode->operands[1];
- b->shader->info.cs.local_size_hint[2] = mode->operands[2];
+ b->shader->info.cs.workgroup_size_hint[0] = mode->operands[0];
+ b->shader->info.cs.workgroup_size_hint[1] = mode->operands[1];
+ b->shader->info.cs.workgroup_size_hint[2] = mode->operands[2];
break;
case SpvExecutionModeLocalSize:
vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
- b->shader->info.cs.local_size[0] = mode->operands[0];
- b->shader->info.cs.local_size[1] = mode->operands[1];
- b->shader->info.cs.local_size[2] = mode->operands[2];
+ b->shader->info.cs.workgroup_size[0] = mode->operands[0];
+ b->shader->info.cs.workgroup_size[1] = mode->operands[1];
+ b->shader->info.cs.workgroup_size[2] = mode->operands[2];
break;
case SpvExecutionModeOutputVertices:
@@ -5016,16 +5016,16 @@ vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_poin
switch (mode->exec_mode) {
case SpvExecutionModeLocalSizeId:
- b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
- b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
- b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
+ b->shader->info.cs.workgroup_size[0] = vtn_constant_uint(b, mode->operands[0]);
+ b->shader->info.cs.workgroup_size[1] = vtn_constant_uint(b, mode->operands[1]);
+ b->shader->info.cs.workgroup_size[2] = vtn_constant_uint(b, mode->operands[2]);
break;
case SpvExecutionModeLocalSizeHintId:
vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL);
- b->shader->info.cs.local_size_hint[0] = vtn_constant_uint(b, mode->operands[0]);
- b->shader->info.cs.local_size_hint[1] = vtn_constant_uint(b, mode->operands[1]);
- b->shader->info.cs.local_size_hint[2] = vtn_constant_uint(b, mode->operands[2]);
+ b->shader->info.cs.workgroup_size_hint[0] = vtn_constant_uint(b, mode->operands[0]);
+ b->shader->info.cs.workgroup_size_hint[1] = vtn_constant_uint(b, mode->operands[1]);
+ b->shader->info.cs.workgroup_size_hint[2] = vtn_constant_uint(b, mode->operands[2]);
break;
default:
@@ -5993,9 +5993,9 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
nir_const_value *const_size =
b->workgroup_size_builtin->constant->values;
- b->shader->info.cs.local_size[0] = const_size[0].u32;
- b->shader->info.cs.local_size[1] = const_size[1].u32;
- b->shader->info.cs.local_size[2] = const_size[2].u32;
+ b->shader->info.cs.workgroup_size[0] = const_size[0].u32;
+ b->shader->info.cs.workgroup_size[1] = const_size[1].u32;
+ b->shader->info.cs.workgroup_size[2] = const_size[2].u32;
}
/* Set types on all vtn_values */