summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/amd/compiler/aco_instruction_selection_setup.cpp6
-rw-r--r--src/amd/vulkan/radv_meta_buffer.c20
-rw-r--r--src/amd/vulkan/radv_meta_bufimage.c70
-rw-r--r--src/amd/vulkan/radv_meta_clear.c10
-rw-r--r--src/amd/vulkan/radv_meta_copy_vrs_htile.c10
-rw-r--r--src/amd/vulkan/radv_meta_dcc_retile.c10
-rw-r--r--src/amd/vulkan/radv_meta_fast_clear.c10
-rw-r--r--src/amd/vulkan/radv_meta_fmask_expand.c10
-rw-r--r--src/amd/vulkan/radv_meta_resolve_cs.c20
-rw-r--r--src/amd/vulkan/radv_nir_to_llvm.c2
-rw-r--r--src/amd/vulkan/radv_pipeline.c6
-rw-r--r--src/amd/vulkan/radv_query.c40
-rw-r--r--src/amd/vulkan/radv_shader_info.c2
-rw-r--r--src/broadcom/compiler/nir_to_vir.c12
-rw-r--r--src/broadcom/compiler/vir.c6
-rw-r--r--src/compiler/glsl/linker.cpp28
-rw-r--r--src/compiler/glsl/lower_cs_derived.cpp4
-rw-r--r--src/compiler/nir/nir_lower_system_values.c18
-rw-r--r--src/compiler/nir/nir_lower_variable_initializers.c8
-rw-r--r--src/compiler/nir/nir_opt_uniform_atomics.c9
-rw-r--r--src/compiler/nir/nir_print.c10
-rw-r--r--src/compiler/nir/nir_range_analysis.c26
-rw-r--r--src/compiler/shader_enums.h9
-rw-r--r--src/compiler/shader_info.h12
-rw-r--r--src/compiler/spirv/spirv_to_nir.c20
-rw-r--r--src/freedreno/ir3/ir3_compiler_nir.c8
-rw-r--r--src/gallium/auxiliary/nir/nir_to_tgsi_info.c6
-rw-r--r--src/gallium/auxiliary/nir/tgsi_to_nir.c14
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.c6
-rw-r--r--src/gallium/drivers/freedreno/a5xx/fd5_compute.c2
-rw-r--r--src/gallium/drivers/freedreno/a6xx/fd6_compute.c2
-rw-r--r--src/gallium/drivers/iris/iris_program.c2
-rw-r--r--src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp6
-rw-r--r--src/gallium/drivers/radeonsi/si_shader.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_shaderlib_nir.c12
-rw-r--r--src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c6
-rw-r--r--src/gallium/drivers/zink/zink_program.c6
-rw-r--r--src/gallium/frontends/clover/nir/invocation.cpp8
-rw-r--r--src/gallium/frontends/lavapipe/lvp_execute.c6
-rw-r--r--src/intel/compiler/brw_fs.cpp16
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp8
-rw-r--r--src/intel/compiler/brw_nir_lower_cs_intrinsics.c30
-rw-r--r--src/intel/compiler/brw_nir_rt.c2
-rw-r--r--src/mesa/main/compute.c6
-rw-r--r--src/mesa/main/shaderapi.c2
-rw-r--r--src/mesa/state_tracker/st_cb_compute.c2
-rw-r--r--src/microsoft/clc/clc_compiler.c22
-rw-r--r--src/microsoft/clc/clc_nir.c6
-rw-r--r--src/microsoft/compiler/nir_to_dxil.c6
50 files changed, 293 insertions, 275 deletions
diff --git a/src/amd/compiler/aco_instruction_selection_setup.cpp b/src/amd/compiler/aco_instruction_selection_setup.cpp
index f60937fc9c0..15f9ce33b84 100644
--- a/src/amd/compiler/aco_instruction_selection_setup.cpp
+++ b/src/amd/compiler/aco_instruction_selection_setup.cpp
@@ -1078,9 +1078,9 @@ setup_isel_context(Program* program,
program->workgroup_size = program->wave_size;
} else if (program->stage == compute_cs) {
/* CS sets the workgroup size explicitly */
- program->workgroup_size = shaders[0]->info.cs.workgroup_size[0] *
- shaders[0]->info.cs.workgroup_size[1] *
- shaders[0]->info.cs.workgroup_size[2];
+ program->workgroup_size = shaders[0]->info.workgroup_size[0] *
+ shaders[0]->info.workgroup_size[1] *
+ shaders[0]->info.workgroup_size[2];
} else if (program->stage.hw == HWStage::ES || program->stage == geometry_gs) {
/* Unmerged ESGS operate in workgroups if on-chip GS (LDS rings) are enabled on GFX7-8 (not implemented in Mesa) */
program->workgroup_size = program->wave_size;
diff --git a/src/amd/vulkan/radv_meta_buffer.c b/src/amd/vulkan/radv_meta_buffer.c
index 5f6e0afb829..b7a1e239bbc 100644
--- a/src/amd/vulkan/radv_meta_buffer.c
+++ b/src/amd/vulkan/radv_meta_buffer.c
@@ -8,15 +8,15 @@ static nir_shader *
build_buffer_fill_shader(struct radv_device *dev)
{
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_buffer_fill");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -38,15 +38,15 @@ static nir_shader *
build_buffer_copy_shader(struct radv_device *dev)
{
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_buffer_copy");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_bufimage.c b/src/amd/vulkan/radv_meta_bufimage.c
index d472930932a..36d7637a82a 100644
--- a/src/amd/vulkan/radv_meta_bufimage.c
+++ b/src/amd/vulkan/radv_meta_bufimage.c
@@ -40,9 +40,9 @@ build_nir_itob_compute_shader(struct radv_device *dev, bool is_3d)
const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_BUF, false, GLSL_TYPE_FLOAT);
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL,
is_3d ? "meta_itob_cs_3d" : "meta_itob_cs");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, sampler_type, "s_tex");
input_img->data.descriptor_set = 0;
input_img->data.binding = 0;
@@ -54,8 +54,8 @@ build_nir_itob_compute_shader(struct radv_device *dev, bool is_3d)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -227,9 +227,9 @@ build_nir_btoi_compute_shader(struct radv_device *dev, bool is_3d)
const struct glsl_type *img_type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT);
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL,
is_3d ? "meta_btoi_cs_3d" : "meta_btoi_cs");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, buf_type, "s_tex");
input_img->data.descriptor_set = 0;
input_img->data.binding = 0;
@@ -241,8 +241,8 @@ build_nir_btoi_compute_shader(struct radv_device *dev, bool is_3d)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -409,9 +409,9 @@ build_nir_btoi_r32g32b32_compute_shader(struct radv_device *dev)
const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_BUF, false, GLSL_TYPE_FLOAT);
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_btoi_r32g32b32_cs");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, buf_type, "s_tex");
input_img->data.descriptor_set = 0;
input_img->data.binding = 0;
@@ -423,8 +423,8 @@ build_nir_btoi_r32g32b32_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -571,9 +571,9 @@ build_nir_itoi_compute_shader(struct radv_device *dev, bool is_3d, int samples)
const struct glsl_type *img_type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT);
nir_builder b = nir_builder_init_simple_shader(
MESA_SHADER_COMPUTE, NULL, is_3d ? "meta_itoi_cs_3d-%d" : "meta_itoi_cs-%d", samples);
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, buf_type, "s_tex");
input_img->data.descriptor_set = 0;
input_img->data.binding = 0;
@@ -585,8 +585,8 @@ build_nir_itoi_compute_shader(struct radv_device *dev, bool is_3d, int samples)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -772,9 +772,9 @@ build_nir_itoi_r32g32b32_compute_shader(struct radv_device *dev)
const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_BUF, false, GLSL_TYPE_FLOAT);
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_itoi_r32g32b32_cs");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, type, "input_img");
input_img->data.descriptor_set = 0;
input_img->data.binding = 0;
@@ -787,8 +787,8 @@ build_nir_itoi_r32g32b32_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -942,9 +942,9 @@ build_nir_cleari_compute_shader(struct radv_device *dev, bool is_3d, int samples
const struct glsl_type *img_type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT);
nir_builder b = nir_builder_init_simple_shader(
MESA_SHADER_COMPUTE, NULL, is_3d ? "meta_cleari_cs_3d-%d" : "meta_cleari_cs-%d", samples);
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform, img_type, "out_img");
output_img->data.descriptor_set = 0;
@@ -953,8 +953,8 @@ build_nir_cleari_compute_shader(struct radv_device *dev, bool is_3d, int samples
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -1108,9 +1108,9 @@ build_nir_cleari_r32g32b32_compute_shader(struct radv_device *dev)
const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_BUF, false, GLSL_TYPE_FLOAT);
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_cleari_r32g32b32_cs");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform, img_type, "out_img");
output_img->data.descriptor_set = 0;
@@ -1119,8 +1119,8 @@ build_nir_cleari_r32g32b32_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_clear.c b/src/amd/vulkan/radv_meta_clear.c
index 44016c99e17..2298bc0a4ad 100644
--- a/src/amd/vulkan/radv_meta_clear.c
+++ b/src/amd/vulkan/radv_meta_clear.c
@@ -1002,15 +1002,15 @@ build_clear_htile_mask_shader()
{
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_clear_htile_mask");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_copy_vrs_htile.c b/src/amd/vulkan/radv_meta_copy_vrs_htile.c
index f8f15f89bf1..adf98c71355 100644
--- a/src/amd/vulkan/radv_meta_copy_vrs_htile.c
+++ b/src/amd/vulkan/radv_meta_copy_vrs_htile.c
@@ -45,15 +45,15 @@ static nir_shader *
build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf)
{
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_copy_vrs_htile");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
/* Get coordinates. */
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_dcc_retile.c b/src/amd/vulkan/radv_meta_dcc_retile.c
index 8dba826e13f..6f598176a2a 100644
--- a/src/amd/vulkan/radv_meta_dcc_retile.c
+++ b/src/amd/vulkan/radv_meta_dcc_retile.c
@@ -36,8 +36,8 @@ get_global_ids(nir_builder *b, unsigned num_components)
nir_ssa_def *block_ids = nir_channels(b, nir_load_workgroup_id(b, 32), mask);
nir_ssa_def *block_size = nir_channels(
b,
- nir_imm_ivec4(b, b->shader->info.cs.workgroup_size[0], b->shader->info.cs.workgroup_size[1],
- b->shader->info.cs.workgroup_size[2], 0),
+ nir_imm_ivec4(b, b->shader->info.workgroup_size[0], b->shader->info.workgroup_size[1],
+ b->shader->info.workgroup_size[2], 0),
mask);
return nir_iadd(b, nir_imul(b, block_ids, block_size), local_ids);
@@ -49,9 +49,9 @@ build_dcc_retile_compute_shader(struct radv_device *dev, struct radeon_surf *sur
const struct glsl_type *buf_type = glsl_image_type(GLSL_SAMPLER_DIM_BUF, false, GLSL_TYPE_UINT);
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "dcc_retile_compute");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_ssa_def *src_dcc_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
nir_ssa_def *src_dcc_pitch = nir_channels(&b, src_dcc_size, 1);
diff --git a/src/amd/vulkan/radv_meta_fast_clear.c b/src/amd/vulkan/radv_meta_fast_clear.c
index f7486ae6c67..64b2910458b 100644
--- a/src/amd/vulkan/radv_meta_fast_clear.c
+++ b/src/amd/vulkan/radv_meta_fast_clear.c
@@ -37,9 +37,9 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "dcc_decompress_compute");
/* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
- b.shader->info.cs.workgroup_size[0] = 16;
- b.shader->info.cs.workgroup_size[1] = 16;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 16;
+ b.shader->info.workgroup_size[1] = 16;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, img_type, "in_img");
input_img->data.descriptor_set = 0;
input_img->data.binding = 0;
@@ -51,8 +51,8 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
diff --git a/src/amd/vulkan/radv_meta_fmask_expand.c b/src/amd/vulkan/radv_meta_fmask_expand.c
index 5bf7f26e659..d0c3e149ffc 100644
--- a/src/amd/vulkan/radv_meta_fmask_expand.c
+++ b/src/amd/vulkan/radv_meta_fmask_expand.c
@@ -35,9 +35,9 @@ build_fmask_expand_compute_shader(struct radv_device *device, int samples)
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_fmask_expand_cs-%d", samples);
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, type, "s_tex");
input_img->data.descriptor_set = 0;
@@ -51,8 +51,8 @@ build_fmask_expand_compute_shader(struct radv_device *device, int samples)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
nir_ssa_def *layer_id = nir_channel(&b, wg_id, 2);
diff --git a/src/amd/vulkan/radv_meta_resolve_cs.c b/src/amd/vulkan/radv_meta_resolve_cs.c
index 01df68a58a8..95e8f6b23e9 100644
--- a/src/amd/vulkan/radv_meta_resolve_cs.c
+++ b/src/amd/vulkan/radv_meta_resolve_cs.c
@@ -67,9 +67,9 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_resolve_cs-%d-%s", samples,
is_integer ? "int" : (is_srgb ? "srgb" : "float"));
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, sampler_type, "s_tex");
input_img->data.descriptor_set = 0;
@@ -81,8 +81,8 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
@@ -137,9 +137,9 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples,
nir_builder b = nir_builder_init_simple_shader(
MESA_SHADER_COMPUTE, NULL, "meta_resolve_cs_%s-%s-%d",
index == DEPTH_RESOLVE ? "depth" : "stencil", get_resolve_mode_str(resolve_mode), samples);
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, sampler_type, "s_tex");
input_img->data.descriptor_set = 0;
@@ -151,8 +151,8 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples,
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
nir_ssa_def *layer_id = nir_channel(&b, wg_id, 2);
diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c
index 93e4fee8f2f..b2c22a37b1e 100644
--- a/src/amd/vulkan/radv_nir_to_llvm.c
+++ b/src/amd/vulkan/radv_nir_to_llvm.c
@@ -2935,7 +2935,7 @@ radv_nir_get_max_workgroup_size(enum chip_class chip_class, gl_shader_stage stag
const unsigned backup_sizes[] = {chip_class >= GFX9 ? 128 : 64, 1, 1};
unsigned sizes[3];
for (unsigned i = 0; i < 3; i++)
- sizes[i] = nir ? nir->info.cs.workgroup_size[i] : backup_sizes[i];
+ sizes[i] = nir ? nir->info.workgroup_size[i] : backup_sizes[i];
return radv_get_max_workgroup_size(chip_class, stage, sizes);
}
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 28afe212046..d5ee79bc627 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -2791,9 +2791,9 @@ radv_fill_shader_keys(struct radv_device *device, struct radv_shader_variant_key
if (!subgroup_size)
subgroup_size = device->physical_device->cs_wave_size;
- unsigned local_size = nir[MESA_SHADER_COMPUTE]->info.cs.workgroup_size[0] *
- nir[MESA_SHADER_COMPUTE]->info.cs.workgroup_size[1] *
- nir[MESA_SHADER_COMPUTE]->info.cs.workgroup_size[2];
+ unsigned local_size = nir[MESA_SHADER_COMPUTE]->info.workgroup_size[0] *
+ nir[MESA_SHADER_COMPUTE]->info.workgroup_size[1] *
+ nir[MESA_SHADER_COMPUTE]->info.workgroup_size[2];
/* Games don't always request full subgroups when they should,
* which can cause bugs if cswave32 is enabled.
diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
index 5efb7dfb739..583d534d1b8 100644
--- a/src/amd/vulkan/radv_query.c
+++ b/src/amd/vulkan/radv_query.c
@@ -130,9 +130,9 @@ build_occlusion_query_shader(struct radv_device *device)
* }
*/
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "occlusion_query");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
nir_variable *outer_counter =
@@ -151,8 +151,8 @@ build_occlusion_query_shader(struct radv_device *device)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
@@ -275,9 +275,9 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
*/
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "pipeline_statistics_query");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
nir_variable *output_offset =
nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
@@ -292,8 +292,8 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
@@ -421,9 +421,9 @@ build_tfb_query_shader(struct radv_device *device)
* }
*/
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "tfb_query");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
/* Create and initialize local variables. */
nir_variable *result =
@@ -443,8 +443,8 @@ build_tfb_query_shader(struct radv_device *device)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
@@ -552,9 +552,9 @@ build_timestamp_query_shader(struct radv_device *device)
* }
*/
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "timestamp_query");
- b.shader->info.cs.workgroup_size[0] = 64;
- b.shader->info.cs.workgroup_size[1] = 1;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 64;
+ b.shader->info.workgroup_size[1] = 1;
+ b.shader->info.workgroup_size[2] = 1;
/* Create and initialize local variables. */
nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
@@ -573,8 +573,8 @@ build_timestamp_query_shader(struct radv_device *device)
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
nir_ssa_def *block_size =
- nir_imm_ivec4(&b, b.shader->info.cs.workgroup_size[0], b.shader->info.cs.workgroup_size[1],
- b.shader->info.cs.workgroup_size[2], 0);
+ nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+ b.shader->info.workgroup_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
diff --git a/src/amd/vulkan/radv_shader_info.c b/src/amd/vulkan/radv_shader_info.c
index 6c77e02e383..ce260b4648f 100644
--- a/src/amd/vulkan/radv_shader_info.c
+++ b/src/amd/vulkan/radv_shader_info.c
@@ -649,7 +649,7 @@ radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *n
switch (nir->info.stage) {
case MESA_SHADER_COMPUTE:
for (int i = 0; i < 3; ++i)
- info->cs.block_size[i] = nir->info.cs.workgroup_size[i];
+ info->cs.block_size[i] = nir->info.workgroup_size[i];
break;
case MESA_SHADER_FRAGMENT:
info->ps.can_discard = nir->info.fs.uses_discard;
diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c
index 0cf66f5d4cd..e75dd1b01fa 100644
--- a/src/broadcom/compiler/nir_to_vir.c
+++ b/src/broadcom/compiler/nir_to_vir.c
@@ -3744,9 +3744,9 @@ nir_to_vir(struct v3d_compile *c)
/* Set up the division between gl_LocalInvocationIndex and
* wg_in_mem in the payload reg.
*/
- int wg_size = (c->s->info.cs.workgroup_size[0] *
- c->s->info.cs.workgroup_size[1] *
- c->s->info.cs.workgroup_size[2]);
+ int wg_size = (c->s->info.workgroup_size[0] *
+ c->s->info.workgroup_size[1] *
+ c->s->info.workgroup_size[2]);
c->local_invocation_index_bits =
ffs(util_next_power_of_two(MAX2(wg_size, 64))) - 1;
assert(c->local_invocation_index_bits <= 8);
@@ -3754,9 +3754,9 @@ nir_to_vir(struct v3d_compile *c)
if (c->s->info.shared_size) {
struct qreg wg_in_mem = vir_SHR(c, c->cs_payload[1],
vir_uniform_ui(c, 16));
- if (c->s->info.cs.workgroup_size[0] != 1 ||
- c->s->info.cs.workgroup_size[1] != 1 ||
- c->s->info.cs.workgroup_size[2] != 1) {
+ if (c->s->info.workgroup_size[0] != 1 ||
+ c->s->info.workgroup_size[1] != 1 ||
+ c->s->info.workgroup_size[2] != 1) {
int wg_bits = (16 -
c->local_invocation_index_bits);
int wg_mask = (1 << wg_bits) - 1;
diff --git a/src/broadcom/compiler/vir.c b/src/broadcom/compiler/vir.c
index d0634901c63..812ec76c04b 100644
--- a/src/broadcom/compiler/vir.c
+++ b/src/broadcom/compiler/vir.c
@@ -804,9 +804,9 @@ v3d_cs_set_prog_data(struct v3d_compile *c,
{
prog_data->shared_size = c->s->info.shared_size;
- prog_data->local_size[0] = c->s->info.cs.workgroup_size[0];
- prog_data->local_size[1] = c->s->info.cs.workgroup_size[1];
- prog_data->local_size[2] = c->s->info.cs.workgroup_size[2];
+ prog_data->local_size[0] = c->s->info.workgroup_size[0];
+ prog_data->local_size[1] = c->s->info.workgroup_size[1];
+ prog_data->local_size[2] = c->s->info.workgroup_size[2];
}
static void
diff --git a/src/compiler/glsl/linker.cpp b/src/compiler/glsl/linker.cpp
index c67f4b0ff1e..df1bf920a76 100644
--- a/src/compiler/glsl/linker.cpp
+++ b/src/compiler/glsl/linker.cpp
@@ -2230,9 +2230,9 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
return;
for (int i = 0; i < 3; i++)
- gl_prog->info.cs.workgroup_size[i] = 0;
+ gl_prog->info.workgroup_size[i] = 0;
- gl_prog->info.cs.workgroup_size_variable = false;
+ gl_prog->info.workgroup_size_variable = false;
gl_prog->info.cs.derivative_group = DERIVATIVE_GROUP_NONE;
@@ -2250,9 +2250,9 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
struct gl_shader *shader = shader_list[sh];
if (shader->info.Comp.LocalSize[0] != 0) {
- if (gl_prog->info.cs.workgroup_size[0] != 0) {
+ if (gl_prog->info.workgroup_size[0] != 0) {
for (int i = 0; i < 3; i++) {
- if (gl_prog->info.cs.workgroup_size[i] !=
+ if (gl_prog->info.workgroup_size[i] !=
shader->info.Comp.LocalSize[i]) {
linker_error(prog, "compute shader defined with conflicting "
"local sizes\n");
@@ -2261,11 +2261,11 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
}
}
for (int i = 0; i < 3; i++) {
- gl_prog->info.cs.workgroup_size[i] =
+ gl_prog->info.workgroup_size[i] =
shader->info.Comp.LocalSize[i];
}
} else if (shader->info.Comp.LocalSizeVariable) {
- if (gl_prog->info.cs.workgroup_size[0] != 0) {
+ if (gl_prog->info.workgroup_size[0] != 0) {
/* The ARB_compute_variable_group_size spec says:
*
* If one compute shader attached to a program declares a
@@ -2277,7 +2277,7 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
"variable local group size\n");
return;
}
- gl_prog->info.cs.workgroup_size_variable = true;
+ gl_prog->info.workgroup_size_variable = true;
}
enum gl_derivative_group group = shader->info.Comp.DerivativeGroup;
@@ -2296,30 +2296,30 @@ link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
* since we already know we're in the right type of shader program
* for doing it.
*/
- if (gl_prog->info.cs.workgroup_size[0] == 0 &&
- !gl_prog->info.cs.workgroup_size_variable) {
+ if (gl_prog->info.workgroup_size[0] == 0 &&
+ !gl_prog->info.workgroup_size_variable) {
linker_error(prog, "compute shader must contain a fixed or a variable "
"local group size\n");
return;
}
if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
- if (gl_prog->info.cs.workgroup_size[0] % 2 != 0) {
+ if (gl_prog->info.workgroup_size[0] % 2 != 0) {
linker_error(prog, "derivative_group_quadsNV must be used with a "
"local group size whose first dimension "
"is a multiple of 2\n");
return;
}
- if (gl_prog->info.cs.workgroup_size[1] % 2 != 0) {
+ if (gl_prog->info.workgroup_size[1] % 2 != 0) {
linker_error(prog, "derivative_group_quadsNV must be used with a local"
"group size whose second dimension "
"is a multiple of 2\n");
return;
}
} else if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
- if ((gl_prog->info.cs.workgroup_size[0] *
- gl_prog->info.cs.workgroup_size[1] *
- gl_prog->info.cs.workgroup_size[2]) % 4 != 0) {
+ if ((gl_prog->info.workgroup_size[0] *
+ gl_prog->info.workgroup_size[1] *
+ gl_prog->info.workgroup_size[2]) % 4 != 0) {
linker_error(prog, "derivative_group_linearNV must be used with a "
"local group size whose total number of invocations "
"is a multiple of 4\n");
diff --git a/src/compiler/glsl/lower_cs_derived.cpp b/src/compiler/glsl/lower_cs_derived.cpp
index 99a0028fb6a..6faa99fad16 100644
--- a/src/compiler/glsl/lower_cs_derived.cpp
+++ b/src/compiler/glsl/lower_cs_derived.cpp
@@ -54,7 +54,7 @@ public:
explicit lower_cs_derived_visitor(gl_linked_shader *shader)
: progress(false),
shader(shader),
- local_size_variable(shader->Program->info.cs.workgroup_size_variable),
+ local_size_variable(shader->Program->info.workgroup_size_variable),
gl_WorkGroupSize(NULL),
gl_WorkGroupID(NULL),
gl_LocalInvocationID(NULL),
@@ -144,7 +144,7 @@ lower_cs_derived_visitor::find_sysvals()
ir_constant_data data;
memset(&data, 0, sizeof(data));
for (int i = 0; i < 3; i++)
- data.u[i] = shader->Program->info.cs.workgroup_size[i];
+ data.u[i] = shader->Program->info.workgroup_size[i];
gl_WorkGroupSize = new(shader) ir_constant(glsl_type::uvec3_type, &data);
}
}
diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c
index 732b8e80a6b..60443050407 100644
--- a/src/compiler/nir/nir_lower_system_values.c
+++ b/src/compiler/nir/nir_lower_system_values.c
@@ -320,10 +320,10 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ssa_def *x = nir_channel(b, ids, 0);
nir_ssa_def *y = nir_channel(b, ids, 1);
nir_ssa_def *z = nir_channel(b, ids, 2);
- unsigned size_x = b->shader->info.cs.workgroup_size[0];
+ unsigned size_x = b->shader->info.workgroup_size[0];
nir_ssa_def *size_x_imm;
- if (b->shader->info.cs.workgroup_size_variable)
+ if (b->shader->info.workgroup_size_variable)
size_x_imm = nir_channel(b, nir_load_workgroup_size(b), 0);
else
size_x_imm = nir_imm_int(b, size_x);
@@ -371,7 +371,7 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ishl(b, x_bits_1n, one));
nir_ssa_def *i;
- if (!b->shader->info.cs.workgroup_size_variable &&
+ if (!b->shader->info.workgroup_size_variable &&
util_is_power_of_two_nonzero(size_x)) {
nir_ssa_def *log2_size_x = nir_imm_int(b, util_logbase2(size_x));
i = nir_ior(b, bits_01x, nir_ishl(b, y_bits_1n, log2_size_x));
@@ -405,9 +405,9 @@ lower_compute_system_value_instr(nir_builder *b,
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
nir_ssa_def *size_x =
- nir_imm_int(b, b->shader->info.cs.workgroup_size[0]);
+ nir_imm_int(b, b->shader->info.workgroup_size[0]);
nir_ssa_def *size_y =
- nir_imm_int(b, b->shader->info.cs.workgroup_size[1]);
+ nir_imm_int(b, b->shader->info.workgroup_size[1]);
/* Because no hardware supports a local workgroup size greater than
* about 1K, this calculation can be done in 32-bit and can save some
@@ -425,7 +425,7 @@ lower_compute_system_value_instr(nir_builder *b,
}
case nir_intrinsic_load_workgroup_size:
- if (b->shader->info.cs.workgroup_size_variable) {
+ if (b->shader->info.workgroup_size_variable) {
/* If the local work group size is variable it can't be lowered at
* this point. We do, however, have to make sure that the intrinsic
* is only 32-bit.
@@ -436,9 +436,9 @@ lower_compute_system_value_instr(nir_builder *b,
* than 32 bits for the local size */
nir_const_value workgroup_size_const[3];
memset(workgroup_size_const, 0, sizeof(workgroup_size_const));
- workgroup_size_const[0].u32 = b->shader->info.cs.workgroup_size[0];
- workgroup_size_const[1].u32 = b->shader->info.cs.workgroup_size[1];
- workgroup_size_const[2].u32 = b->shader->info.cs.workgroup_size[2];
+ workgroup_size_const[0].u32 = b->shader->info.workgroup_size[0];
+ workgroup_size_const[1].u32 = b->shader->info.workgroup_size[1];
+ workgroup_size_const[2].u32 = b->shader->info.workgroup_size[2];
return nir_u2u(b, nir_build_imm(b, 3, 32, workgroup_size_const), bit_size);
}
diff --git a/src/compiler/nir/nir_lower_variable_initializers.c b/src/compiler/nir/nir_lower_variable_initializers.c
index b26624f8cf6..ba0e7ba7be6 100644
--- a/src/compiler/nir/nir_lower_variable_initializers.c
+++ b/src/compiler/nir/nir_lower_variable_initializers.c
@@ -154,10 +154,10 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
nir_builder_init(&b, nir_shader_get_entrypoint(shader));
b.cursor = nir_before_cf_list(&b.impl->body);
- assert(!shader->info.cs.workgroup_size_variable);
- const unsigned local_count = shader->info.cs.workgroup_size[0] *
- shader->info.cs.workgroup_size[1] *
- shader->info.cs.workgroup_size[2];
+ assert(!shader->info.workgroup_size_variable);
+ const unsigned local_count = shader->info.workgroup_size[0] *
+ shader->info.workgroup_size[1] *
+ shader->info.workgroup_size[2];
/* The initialization logic is simplified if we can always split the memory
* in full chunk_size units.
diff --git a/src/compiler/nir/nir_opt_uniform_atomics.c b/src/compiler/nir/nir_opt_uniform_atomics.c
index 433803cb35e..2c64e3198a4 100644
--- a/src/compiler/nir/nir_opt_uniform_atomics.c
+++ b/src/compiler/nir/nir_opt_uniform_atomics.c
@@ -169,7 +169,7 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
unsigned dims_needed = 0;
for (unsigned i = 0; i < 3; i++)
- dims_needed |= (shader->info.cs.workgroup_size[i] > 1) << i;
+ dims_needed |= (shader->info.workgroup_size[i] > 1) << i;
return (dims & dims_needed) == dims_needed || dims & 0x8;
}
@@ -306,9 +306,10 @@ nir_opt_uniform_atomics(nir_shader *shader)
/* A 1x1x1 workgroup only ever has one active lane, so there's no point in
* optimizing any atomics.
*/
- if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.workgroup_size_variable &&
- shader->info.cs.workgroup_size[0] == 1 && shader->info.cs.workgroup_size[1] == 1 &&
- shader->info.cs.workgroup_size[2] == 1)
+ if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
+ !shader->info.workgroup_size_variable &&
+ shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
+ shader->info.workgroup_size[2] == 1)
return false;
nir_foreach_function(function, shader) {
diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c
index 363a8e4aeb1..649d1de224d 100644
--- a/src/compiler/nir/nir_print.c
+++ b/src/compiler/nir/nir_print.c
@@ -1605,12 +1605,12 @@ nir_print_shader_annotated(nir_shader *shader, FILE *fp,
if (shader->info.label)
fprintf(fp, "label: %s\n", shader->info.label);
- if (gl_shader_stage_is_compute(shader->info.stage)) {
+ if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
fprintf(fp, "workgroup-size: %u, %u, %u%s\n",
- shader->info.cs.workgroup_size[0],
- shader->info.cs.workgroup_size[1],
- shader->info.cs.workgroup_size[2],
- shader->info.cs.workgroup_size_variable ? " (variable)" : "");
+ shader->info.workgroup_size[0],
+ shader->info.workgroup_size[1],
+ shader->info.workgroup_size[2],
+ shader->info.workgroup_size_variable ? " (variable)" : "");
fprintf(fp, "shared-size: %u\n", shader->info.shared_size);
}
diff --git a/src/compiler/nir/nir_range_analysis.c b/src/compiler/nir/nir_range_analysis.c
index 18ac161d5e0..501084f14f5 100644
--- a/src/compiler/nir/nir_range_analysis.c
+++ b/src/compiler/nir/nir_range_analysis.c
@@ -1292,20 +1292,19 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_local_invocation_index:
- if (shader->info.stage != MESA_SHADER_COMPUTE ||
- shader->info.cs.workgroup_size_variable) {
+ if (shader->info.workgroup_size_variable) {
res = config->max_workgroup_invocations - 1;
} else {
- res = (shader->info.cs.workgroup_size[0] *
- shader->info.cs.workgroup_size[1] *
- shader->info.cs.workgroup_size[2]) - 1u;
+ res = (shader->info.workgroup_size[0] *
+ shader->info.workgroup_size[1] *
+ shader->info.workgroup_size[2]) - 1u;
}
break;
case nir_intrinsic_load_local_invocation_id:
- if (shader->info.cs.workgroup_size_variable)
+ if (shader->info.workgroup_size_variable)
res = config->max_workgroup_size[scalar.comp] - 1u;
else
- res = shader->info.cs.workgroup_size[scalar.comp] - 1u;
+ res = shader->info.workgroup_size[scalar.comp] - 1u;
break;
case nir_intrinsic_load_workgroup_id:
res = config->max_workgroup_count[scalar.comp] - 1u;
@@ -1314,11 +1313,11 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
res = config->max_workgroup_count[scalar.comp];
break;
case nir_intrinsic_load_global_invocation_id:
- if (shader->info.cs.workgroup_size_variable) {
+ if (shader->info.workgroup_size_variable) {
res = mul_clamp(config->max_workgroup_size[scalar.comp],
config->max_workgroup_count[scalar.comp]) - 1u;
} else {
- res = (shader->info.cs.workgroup_size[scalar.comp] *
+ res = (shader->info.workgroup_size[scalar.comp] *
config->max_workgroup_count[scalar.comp]) - 1u;
}
break;
@@ -1339,10 +1338,11 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
case nir_intrinsic_load_subgroup_id:
case nir_intrinsic_load_num_subgroups: {
uint32_t workgroup_size = config->max_workgroup_invocations;
- if (shader->info.stage == MESA_SHADER_COMPUTE && !shader->info.cs.workgroup_size_variable) {
- workgroup_size = shader->info.cs.workgroup_size[0] *
- shader->info.cs.workgroup_size[1] *
- shader->info.cs.workgroup_size[2];
+ if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
+ !shader->info.workgroup_size_variable) {
+ workgroup_size = shader->info.workgroup_size[0] *
+ shader->info.workgroup_size[1] *
+ shader->info.workgroup_size[2];
}
res = DIV_ROUND_UP(workgroup_size, config->min_subgroup_size);
if (intrin->intrinsic == nir_intrinsic_load_subgroup_id)
diff --git a/src/compiler/shader_enums.h b/src/compiler/shader_enums.h
index 9350459a100..2728c4417ed 100644
--- a/src/compiler/shader_enums.h
+++ b/src/compiler/shader_enums.h
@@ -73,6 +73,15 @@ gl_shader_stage_is_compute(gl_shader_stage stage)
}
static inline bool
+gl_shader_stage_uses_workgroup(gl_shader_stage stage)
+{
+ return stage == MESA_SHADER_COMPUTE ||
+ stage == MESA_SHADER_KERNEL ||
+ stage == MESA_SHADER_TASK ||
+ stage == MESA_SHADER_MESH;
+}
+
+static inline bool
gl_shader_stage_is_callable(gl_shader_stage stage)
{
return stage == MESA_SHADER_ANY_HIT ||
diff --git a/src/compiler/shader_info.h b/src/compiler/shader_info.h
index 7c18e565433..1bb2f8b966d 100644
--- a/src/compiler/shader_info.h
+++ b/src/compiler/shader_info.h
@@ -198,6 +198,11 @@ typedef struct shader_info {
*/
unsigned shared_size;
+ /**
+ * Local workgroup size used by compute/task/mesh shaders.
+ */
+ uint16_t workgroup_size[3];
+
uint16_t inlinable_uniform_dw_offsets[MAX_INLINABLE_UNIFORMS];
uint8_t num_inlinable_uniforms:4;
@@ -259,6 +264,11 @@ typedef struct shader_info {
*/
bool zero_initialize_shared_memory:1;
+ /**
+ * Used for ARB_compute_variable_group_size.
+ */
+ bool workgroup_size_variable:1;
+
union {
struct {
/* Which inputs are doubles */
@@ -389,10 +399,8 @@ typedef struct shader_info {
} fs;
struct {
- uint16_t workgroup_size[3];
uint16_t workgroup_size_hint[3];
- bool workgroup_size_variable:1;
uint8_t user_data_components_amd:3;
/*
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index d94577d23c8..2abf1144b01 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -4803,9 +4803,9 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
case SpvExecutionModeLocalSize:
vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
- b->shader->info.cs.workgroup_size[0] = mode->operands[0];
- b->shader->info.cs.workgroup_size[1] = mode->operands[1];
- b->shader->info.cs.workgroup_size[2] = mode->operands[2];
+ b->shader->info.workgroup_size[0] = mode->operands[0];
+ b->shader->info.workgroup_size[1] = mode->operands[1];
+ b->shader->info.workgroup_size[2] = mode->operands[2];
break;
case SpvExecutionModeOutputVertices:
@@ -5016,9 +5016,9 @@ vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_poin
switch (mode->exec_mode) {
case SpvExecutionModeLocalSizeId:
- b->shader->info.cs.workgroup_size[0] = vtn_constant_uint(b, mode->operands[0]);
- b->shader->info.cs.workgroup_size[1] = vtn_constant_uint(b, mode->operands[1]);
- b->shader->info.cs.workgroup_size[2] = vtn_constant_uint(b, mode->operands[2]);
+ b->shader->info.workgroup_size[0] = vtn_constant_uint(b, mode->operands[0]);
+ b->shader->info.workgroup_size[1] = vtn_constant_uint(b, mode->operands[1]);
+ b->shader->info.workgroup_size[2] = vtn_constant_uint(b, mode->operands[2]);
break;
case SpvExecutionModeLocalSizeHintId:
@@ -5986,16 +5986,16 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
vtn_handle_execution_mode_id, NULL);
if (b->workgroup_size_builtin) {
- vtn_assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
+ vtn_assert(gl_shader_stage_uses_workgroup(stage));
vtn_assert(b->workgroup_size_builtin->type->type ==
glsl_vector_type(GLSL_TYPE_UINT, 3));
nir_const_value *const_size =
b->workgroup_size_builtin->constant->values;
- b->shader->info.cs.workgroup_size[0] = const_size[0].u32;
- b->shader->info.cs.workgroup_size[1] = const_size[1].u32;
- b->shader->info.cs.workgroup_size[2] = const_size[2].u32;
+ b->shader->info.workgroup_size[0] = const_size[0].u32;
+ b->shader->info.workgroup_size[1] = const_size[1].u32;
+ b->shader->info.workgroup_size[2] = const_size[2].u32;
}
/* Set types on all vtn_values */
diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c
index c88c30543d2..497761b80b0 100644
--- a/src/freedreno/ir3/ir3_compiler_nir.c
+++ b/src/freedreno/ir3/ir3_compiler_nir.c
@@ -4102,10 +4102,10 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
so->need_pixlod = true;
if (so->type == MESA_SHADER_COMPUTE) {
- so->local_size[0] = ctx->s->info.cs.workgroup_size[0];
- so->local_size[1] = ctx->s->info.cs.workgroup_size[1];
- so->local_size[2] = ctx->s->info.cs.workgroup_size[2];
- so->local_size_variable = ctx->s->info.cs.workgroup_size_variable;
+ so->local_size[0] = ctx->s->info.workgroup_size[0];
+ so->local_size[1] = ctx->s->info.workgroup_size[1];
+ so->local_size[2] = ctx->s->info.workgroup_size[2];
+ so->local_size_variable = ctx->s->info.workgroup_size_variable;
}
out:
diff --git a/src/gallium/auxiliary/nir/nir_to_tgsi_info.c b/src/gallium/auxiliary/nir/nir_to_tgsi_info.c
index aa9ce92dbae..24a18ec904f 100644
--- a/src/gallium/auxiliary/nir/nir_to_tgsi_info.c
+++ b/src/gallium/auxiliary/nir/nir_to_tgsi_info.c
@@ -487,9 +487,9 @@ void nir_tgsi_scan_shader(const struct nir_shader *nir,
}
if (gl_shader_stage_is_compute(nir->info.stage)) {
- info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.workgroup_size[0];
- info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.workgroup_size[1];
- info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.workgroup_size[2];
+ info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.workgroup_size[0];
+ info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.workgroup_size[1];
+ info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.workgroup_size[2];
}
i = 0;
diff --git a/src/gallium/auxiliary/nir/tgsi_to_nir.c b/src/gallium/auxiliary/nir/tgsi_to_nir.c
index 5b625ef66a1..e60b92e867d 100644
--- a/src/gallium/auxiliary/nir/tgsi_to_nir.c
+++ b/src/gallium/auxiliary/nir/tgsi_to_nir.c
@@ -2354,15 +2354,15 @@ ttn_compile_init(const void *tgsi_tokens,
break;
case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
if (s->info.stage == MESA_SHADER_COMPUTE)
- s->info.cs.workgroup_size[0] = value;
+ s->info.workgroup_size[0] = value;
break;
case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
if (s->info.stage == MESA_SHADER_COMPUTE)
- s->info.cs.workgroup_size[1] = value;
+ s->info.workgroup_size[1] = value;
break;
case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
if (s->info.stage == MESA_SHADER_COMPUTE)
- s->info.cs.workgroup_size[2] = value;
+ s->info.workgroup_size[2] = value;
break;
case TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD:
if (s->info.stage == MESA_SHADER_COMPUTE)
@@ -2381,10 +2381,10 @@ ttn_compile_init(const void *tgsi_tokens,
}
if (s->info.stage == MESA_SHADER_COMPUTE &&
- (!s->info.cs.workgroup_size[0] ||
- !s->info.cs.workgroup_size[1] ||
- !s->info.cs.workgroup_size[2]))
- s->info.cs.workgroup_size_variable = true;
+ (!s->info.workgroup_size[0] ||
+ !s->info.workgroup_size[1] ||
+ !s->info.workgroup_size[2]))
+ s->info.workgroup_size_variable = true;
c->inputs = rzalloc_array(c, struct nir_variable *, s->num_inputs);
c->outputs = rzalloc_array(c, struct nir_variable *, s->num_outputs);
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.c b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
index 5b18dfe8b75..950a448f959 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_ureg.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
@@ -2354,11 +2354,11 @@ ureg_setup_compute_shader(struct ureg_program *ureg,
const struct shader_info *info)
{
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH,
- info->cs.workgroup_size[0]);
+ info->workgroup_size[0]);
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT,
- info->cs.workgroup_size[1]);
+ info->workgroup_size[1]);
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH,
- info->cs.workgroup_size[2]);
+ info->workgroup_size[2]);
if (info->shared_size)
ureg_DECL_memory(ureg, TGSI_MEMORY_TYPE_SHARED);
diff --git a/src/gallium/drivers/freedreno/a5xx/fd5_compute.c b/src/gallium/drivers/freedreno/a5xx/fd5_compute.c
index 734221433e8..6f236c1f48c 100644
--- a/src/gallium/drivers/freedreno/a5xx/fd5_compute.c
+++ b/src/gallium/drivers/freedreno/a5xx/fd5_compute.c
@@ -142,7 +142,7 @@ fd5_launch_grid(struct fd_context *ctx,
}
const unsigned *local_size =
- info->block; // v->shader->nir->info->cs.workgroup_size;
+ info->block; // v->shader->nir->info->workgroup_size;
const unsigned *num_groups = info->grid;
/* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
const unsigned work_dim = info->work_dim ? info->work_dim : 3;
diff --git a/src/gallium/drivers/freedreno/a6xx/fd6_compute.c b/src/gallium/drivers/freedreno/a6xx/fd6_compute.c
index bcb7433c754..9e8d68a6aae 100644
--- a/src/gallium/drivers/freedreno/a6xx/fd6_compute.c
+++ b/src/gallium/drivers/freedreno/a6xx/fd6_compute.c
@@ -134,7 +134,7 @@ fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info) in_dt
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
const unsigned *local_size =
- info->block; // v->shader->nir->info->cs.workgroup_size;
+ info->block; // v->shader->nir->info->workgroup_size;
const unsigned *num_groups = info->grid;
/* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
const unsigned work_dim = info->work_dim ? info->work_dim : 3;
diff --git a/src/gallium/drivers/iris/iris_program.c b/src/gallium/drivers/iris/iris_program.c
index 2170e67267d..ec3c493eb0b 100644
--- a/src/gallium/drivers/iris/iris_program.c
+++ b/src/gallium/drivers/iris/iris_program.c
@@ -528,7 +528,7 @@ iris_setup_uniforms(const struct brw_compiler *compiler,
break;
}
case nir_intrinsic_load_workgroup_size: {
- assert(nir->info.cs.workgroup_size_variable);
+ assert(nir->info.workgroup_size_variable);
if (variable_group_size_idx == -1) {
variable_group_size_idx = num_system_values;
num_system_values += 3;
diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp b/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
index bb2e8c7d062..e12fee25529 100644
--- a/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
+++ b/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
@@ -1289,9 +1289,9 @@ Converter::parseNIR()
switch(prog->getType()) {
case Program::TYPE_COMPUTE:
- info->prop.cp.numThreads[0] = nir->info.cs.workgroup_size[0];
- info->prop.cp.numThreads[1] = nir->info.cs.workgroup_size[1];
- info->prop.cp.numThreads[2] = nir->info.cs.workgroup_size[2];
+ info->prop.cp.numThreads[0] = nir->info.workgroup_size[0];
+ info->prop.cp.numThreads[1] = nir->info.workgroup_size[1];
+ info->prop.cp.numThreads[2] = nir->info.workgroup_size[2];
info_out->bin.smemSize += nir->info.shared_size;
break;
case Program::TYPE_FRAGMENT:
diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
index cedfc4c1025..94dd486f525 100644
--- a/src/gallium/drivers/radeonsi/si_shader.c
+++ b/src/gallium/drivers/radeonsi/si_shader.c
@@ -218,10 +218,10 @@ unsigned si_get_max_workgroup_size(const struct si_shader *shader)
}
/* Compile a variable block size using the maximum variable size. */
- if (shader->selector->info.base.cs.workgroup_size_variable)
+ if (shader->selector->info.base.workgroup_size_variable)
return SI_MAX_VARIABLE_THREADS_PER_BLOCK;
- uint16_t *local_size = shader->selector->info.base.cs.workgroup_size;
+ uint16_t *local_size = shader->selector->info.base.workgroup_size;
unsigned max_work_group_size = (uint32_t)local_size[0] *
(uint32_t)local_size[1] *
(uint32_t)local_size[2];
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm.c b/src/gallium/drivers/radeonsi/si_shader_llvm.c
index 265ac084819..2babb7e54a7 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm.c
@@ -411,7 +411,7 @@ static LLVMValueRef si_llvm_get_block_size(struct ac_shader_abi *abi)
{
struct si_shader_context *ctx = si_shader_context_from_abi(abi);
- assert(ctx->shader->selector->info.base.cs.workgroup_size_variable &&
+ assert(ctx->shader->selector->info.base.workgroup_size_variable &&
ctx->shader->selector->info.uses_variable_block_size);
LLVMValueRef chan[3] = {
diff --git a/src/gallium/drivers/radeonsi/si_shaderlib_nir.c b/src/gallium/drivers/radeonsi/si_shaderlib_nir.c
index c55985468b4..27f765769e7 100644
--- a/src/gallium/drivers/radeonsi/si_shaderlib_nir.c
+++ b/src/gallium/drivers/radeonsi/si_shaderlib_nir.c
@@ -59,9 +59,9 @@ void *si_create_dcc_retile_cs(struct si_context *sctx, struct radeon_surf *surf)
sctx->b.screen->get_compiler_options(sctx->b.screen, PIPE_SHADER_IR_NIR, PIPE_SHADER_COMPUTE);
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options, "dcc_retile");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
b.shader->info.cs.user_data_components_amd = 3;
b.shader->info.num_ssbos = 1;
@@ -107,9 +107,9 @@ void *gfx9_create_clear_dcc_msaa_cs(struct si_context *sctx, struct si_texture *
sctx->b.screen->get_compiler_options(sctx->b.screen, PIPE_SHADER_IR_NIR, PIPE_SHADER_COMPUTE);
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options, "clear_dcc_msaa");
- b.shader->info.cs.workgroup_size[0] = 8;
- b.shader->info.cs.workgroup_size[1] = 8;
- b.shader->info.cs.workgroup_size[2] = 1;
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+ b.shader->info.workgroup_size[2] = 1;
b.shader->info.cs.user_data_components_amd = 2;
b.shader->info.num_ssbos = 1;
diff --git a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
index 7e906ce3253..a1feb2ba7bd 100644
--- a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
+++ b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
@@ -3809,10 +3809,10 @@ nir_to_spirv(struct nir_shader *s, const struct zink_so_info *so_info, uint32_t
if (s->info.shared_size)
create_shared_block(&ctx, s->info.shared_size);
- if (s->info.cs.workgroup_size[0] || s->info.cs.workgroup_size[1] || s->info.cs.workgroup_size[2])
+ if (s->info.workgroup_size[0] || s->info.workgroup_size[1] || s->info.workgroup_size[2])
spirv_builder_emit_exec_mode_literal3(&ctx.builder, entry_point, SpvExecutionModeLocalSize,
- (uint32_t[3]){(uint32_t)s->info.cs.workgroup_size[0], (uint32_t)s->info.cs.workgroup_size[1],
- (uint32_t)s->info.cs.workgroup_size[2]});
+ (uint32_t[3]){(uint32_t)s->info.workgroup_size[0], (uint32_t)s->info.workgroup_size[1],
+ (uint32_t)s->info.workgroup_size[2]});
else {
SpvId sizes[3];
uint32_t ids[] = {ZINK_WORKGROUP_SIZE_X, ZINK_WORKGROUP_SIZE_Y, ZINK_WORKGROUP_SIZE_Z};
diff --git a/src/gallium/drivers/zink/zink_program.c b/src/gallium/drivers/zink/zink_program.c
index a2cd48f54ad..0419c7b1d47 100644
--- a/src/gallium/drivers/zink/zink_program.c
+++ b/src/gallium/drivers/zink/zink_program.c
@@ -589,9 +589,9 @@ void
zink_program_update_compute_pipeline_state(struct zink_context *ctx, struct zink_compute_program *comp, const uint block[3])
{
struct zink_shader *zs = comp->shader;
- bool use_local_size = !(zs->nir->info.cs.workgroup_size[0] ||
- zs->nir->info.cs.workgroup_size[1] ||
- zs->nir->info.cs.workgroup_size[2]);
+ bool use_local_size = !(zs->nir->info.workgroup_size[0] ||
+ zs->nir->info.workgroup_size[1] ||
+ zs->nir->info.workgroup_size[2]);
if (ctx->compute_pipeline_state.use_local_size != use_local_size)
ctx->compute_pipeline_state.dirty = true;
ctx->compute_pipeline_state.use_local_size = use_local_size;
diff --git a/src/gallium/frontends/clover/nir/invocation.cpp b/src/gallium/frontends/clover/nir/invocation.cpp
index a5499f854ca..d807e270ce1 100644
--- a/src/gallium/frontends/clover/nir/invocation.cpp
+++ b/src/gallium/frontends/clover/nir/invocation.cpp
@@ -428,10 +428,10 @@ module clover::nir::spirv_to_nir(const module &mod, const device &dev,
throw build_error();
}
- nir->info.cs.workgroup_size_variable = sym.reqd_work_group_size[0] == 0;
- nir->info.cs.workgroup_size[0] = sym.reqd_work_group_size[0];
- nir->info.cs.workgroup_size[1] = sym.reqd_work_group_size[1];
- nir->info.cs.workgroup_size[2] = sym.reqd_work_group_size[2];
+ nir->info.workgroup_size_variable = sym.reqd_work_group_size[0] == 0;
+ nir->info.workgroup_size[0] = sym.reqd_work_group_size[0];
+ nir->info.workgroup_size[1] = sym.reqd_work_group_size[1];
+ nir->info.workgroup_size[2] = sym.reqd_work_group_size[2];
nir_validate_shader(nir, "clover");
// Inline all functions first.
diff --git a/src/gallium/frontends/lavapipe/lvp_execute.c b/src/gallium/frontends/lavapipe/lvp_execute.c
index a5d3e392bc2..5fd213ce19a 100644
--- a/src/gallium/frontends/lavapipe/lvp_execute.c
+++ b/src/gallium/frontends/lavapipe/lvp_execute.c
@@ -348,9 +348,9 @@ static void handle_compute_pipeline(struct lvp_cmd_buffer_entry *cmd,
{
struct lvp_pipeline *pipeline = cmd->u.pipeline.pipeline;
- state->dispatch_info.block[0] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.workgroup_size[0];
- state->dispatch_info.block[1] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.workgroup_size[1];
- state->dispatch_info.block[2] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.workgroup_size[2];
+ state->dispatch_info.block[0] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[0];
+ state->dispatch_info.block[1] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[1];
+ state->dispatch_info.block[2] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[2];
state->pctx->bind_compute_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
}
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index 051dd37f8f5..d83224b2b9b 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -9534,10 +9534,10 @@ lower_simd(nir_builder *b, nir_instr *instr, void *options)
/* If the whole workgroup fits in one thread, we can lower subgroup_id
* to a constant zero.
*/
- if (!b->shader->info.cs.workgroup_size_variable) {
- unsigned local_workgroup_size = b->shader->info.cs.workgroup_size[0] *
- b->shader->info.cs.workgroup_size[1] *
- b->shader->info.cs.workgroup_size[2];
+ if (!b->shader->info.workgroup_size_variable) {
+ unsigned local_workgroup_size = b->shader->info.workgroup_size[0] *
+ b->shader->info.workgroup_size[1] *
+ b->shader->info.workgroup_size[2];
if (local_workgroup_size <= simd_width)
return nir_imm_int(b, 0);
}
@@ -9599,15 +9599,15 @@ brw_compile_cs(const struct brw_compiler *compiler,
unsigned min_dispatch_width;
unsigned max_dispatch_width;
- if (nir->info.cs.workgroup_size_variable) {
+ if (nir->info.workgroup_size_variable) {
generate_all = true;
min_dispatch_width = 8;
max_dispatch_width = 32;
} else {
generate_all = false;
- prog_data->local_size[0] = nir->info.cs.workgroup_size[0];
- prog_data->local_size[1] = nir->info.cs.workgroup_size[1];
- prog_data->local_size[2] = nir->info.cs.workgroup_size[2];
+ prog_data->local_size[0] = nir->info.workgroup_size[0];
+ prog_data->local_size[1] = nir->info.workgroup_size[1];
+ prog_data->local_size[2] = nir->info.workgroup_size[2];
unsigned local_workgroup_size = prog_data->local_size[0] *
prog_data->local_size[1] *
prog_data->local_size[2];
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index 4110cf0b621..fde08300f69 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -113,7 +113,7 @@ fs_visitor::nir_setup_uniforms()
assert(uniforms == prog_data->nr_params);
uint32_t *param;
- if (nir->info.cs.workgroup_size_variable &&
+ if (nir->info.workgroup_size_variable &&
compiler->lower_variable_group_size) {
param = brw_stage_prog_data_add_params(prog_data, 3);
for (unsigned i = 0; i < 3; i++) {
@@ -3671,7 +3671,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
* invocations are already executed lock-step. Instead of an actual
* barrier just emit a scheduling fence, that will generate no code.
*/
- if (!nir->info.cs.workgroup_size_variable &&
+ if (!nir->info.workgroup_size_variable &&
workgroup_size() <= dispatch_width) {
bld.exec_all().group(1, 0).emit(FS_OPCODE_SCHEDULING_FENCE);
break;
@@ -3816,7 +3816,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
case nir_intrinsic_load_workgroup_size: {
assert(compiler->lower_variable_group_size);
- assert(nir->info.cs.workgroup_size_variable);
+ assert(nir->info.workgroup_size_variable);
for (unsigned i = 0; i < 3; i++) {
bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD),
group_size[i]);
@@ -4324,7 +4324,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
*
* TODO: Check if applies for many HW threads sharing same Data Port.
*/
- if (!nir->info.cs.workgroup_size_variable &&
+ if (!nir->info.workgroup_size_variable &&
slm_fence && workgroup_size() <= dispatch_width)
slm_fence = false;
diff --git a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c
index da5913e42e8..b8144bb7b58 100644
--- a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c
+++ b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c
@@ -81,13 +81,13 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
nir_ssa_def *size_x;
nir_ssa_def *size_y;
- if (state->nir->info.cs.workgroup_size_variable) {
+ if (state->nir->info.workgroup_size_variable) {
nir_ssa_def *size_xyz = nir_load_workgroup_size(b);
size_x = nir_channel(b, size_xyz, 0);
size_y = nir_channel(b, size_xyz, 1);
} else {
- size_x = nir_imm_int(b, nir->info.cs.workgroup_size[0]);
- size_y = nir_imm_int(b, nir->info.cs.workgroup_size[1]);
+ size_x = nir_imm_int(b, nir->info.workgroup_size[0]);
+ size_y = nir_imm_int(b, nir->info.workgroup_size[1]);
}
nir_ssa_def *size_xy = nir_imul(b, size_x, size_y);
@@ -120,8 +120,8 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
id_x = nir_umod(b, linear, size_x);
id_y = nir_umod(b, nir_udiv(b, linear, size_x), size_y);
local_index = linear;
- } else if (!nir->info.cs.workgroup_size_variable &&
- nir->info.cs.workgroup_size[1] % 4 == 0) {
+ } else if (!nir->info.workgroup_size_variable &&
+ nir->info.workgroup_size[1] % 4 == 0) {
/* 1x4 block X-major lid order. Same as X-major except increments in
* blocks of width=1 height=4. Always optimal for tileY and usually
* optimal for linear accesses.
@@ -213,16 +213,16 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
case nir_intrinsic_load_num_subgroups: {
nir_ssa_def *size;
- if (state->nir->info.cs.workgroup_size_variable) {
+ if (state->nir->info.workgroup_size_variable) {
nir_ssa_def *size_xyz = nir_load_workgroup_size(b);
nir_ssa_def *size_x = nir_channel(b, size_xyz, 0);
nir_ssa_def *size_y = nir_channel(b, size_xyz, 1);
nir_ssa_def *size_z = nir_channel(b, size_xyz, 2);
size = nir_imul(b, nir_imul(b, size_x, size_y), size_z);
} else {
- size = nir_imm_int(b, nir->info.cs.workgroup_size[0] *
- nir->info.cs.workgroup_size[1] *
- nir->info.cs.workgroup_size[2]);
+ size = nir_imm_int(b, nir->info.workgroup_size[0] *
+ nir->info.workgroup_size[1] *
+ nir->info.workgroup_size[2]);
}
/* Calculate the equivalent of DIV_ROUND_UP. */
@@ -273,15 +273,15 @@ brw_nir_lower_cs_intrinsics(nir_shader *nir)
};
/* Constraints from NV_compute_shader_derivatives. */
- if (!nir->info.cs.workgroup_size_variable) {
+ if (!nir->info.workgroup_size_variable) {
if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
- assert(nir->info.cs.workgroup_size[0] % 2 == 0);
- assert(nir->info.cs.workgroup_size[1] % 2 == 0);
+ assert(nir->info.workgroup_size[0] % 2 == 0);
+ assert(nir->info.workgroup_size[1] % 2 == 0);
} else if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
ASSERTED unsigned workgroup_size =
- nir->info.cs.workgroup_size[0] *
- nir->info.cs.workgroup_size[1] *
- nir->info.cs.workgroup_size[2];
+ nir->info.workgroup_size[0] *
+ nir->info.workgroup_size[1] *
+ nir->info.workgroup_size[2];
assert(workgroup_size % 4 == 0);
}
}
diff --git a/src/intel/compiler/brw_nir_rt.c b/src/intel/compiler/brw_nir_rt.c
index 93f0a8fc541..5943c5283a0 100644
--- a/src/intel/compiler/brw_nir_rt.c
+++ b/src/intel/compiler/brw_nir_rt.c
@@ -426,7 +426,7 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
"RT Ray-Gen Trampoline");
ralloc_steal(mem_ctx, b.shader);
- b.shader->info.cs.workgroup_size_variable = true;
+ b.shader->info.workgroup_size_variable = true;
/* The RT global data and raygen BINDLESS_SHADER_RECORD addresses are
* passed in as push constants in the first register. We deal with the
diff --git a/src/mesa/main/compute.c b/src/mesa/main/compute.c
index 17aef1a433a..bcf2aaa78f5 100644
--- a/src/mesa/main/compute.c
+++ b/src/mesa/main/compute.c
@@ -89,7 +89,7 @@ validate_DispatchCompute(struct gl_context *ctx, const GLuint *num_groups)
* program for the compute shader stage has a variable work group size."
*/
struct gl_program *prog = ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
- if (prog->info.cs.workgroup_size_variable) {
+ if (prog->info.workgroup_size_variable) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glDispatchCompute(variable work group size forbidden)");
return GL_FALSE;
@@ -113,7 +113,7 @@ validate_DispatchComputeGroupSizeARB(struct gl_context *ctx,
* shader stage has a fixed work group size."
*/
struct gl_program *prog = ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
- if (!prog->info.cs.workgroup_size_variable) {
+ if (!prog->info.workgroup_size_variable) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glDispatchComputeGroupSizeARB(fixed work group size "
"forbidden)");
@@ -269,7 +269,7 @@ valid_dispatch_indirect(struct gl_context *ctx, GLintptr indirect)
* compute shader stage has a variable work group size."
*/
struct gl_program *prog = ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
- if (prog->info.cs.workgroup_size_variable) {
+ if (prog->info.workgroup_size_variable) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"%s(variable work group size forbidden)", name);
return GL_FALSE;
diff --git a/src/mesa/main/shaderapi.c b/src/mesa/main/shaderapi.c
index 11f164dc93e..22f6c0cf708 100644
--- a/src/mesa/main/shaderapi.c
+++ b/src/mesa/main/shaderapi.c
@@ -935,7 +935,7 @@ get_programiv(struct gl_context *ctx, GLuint program, GLenum pname,
}
for (i = 0; i < 3; i++)
params[i] = shProg->_LinkedShaders[MESA_SHADER_COMPUTE]->
- Program->info.cs.workgroup_size[i];
+ Program->info.workgroup_size[i];
return;
}
case GL_PROGRAM_SEPARABLE:
diff --git a/src/mesa/state_tracker/st_cb_compute.c b/src/mesa/state_tracker/st_cb_compute.c
index b69cc7a58c8..3ae017b22a4 100644
--- a/src/mesa/state_tracker/st_cb_compute.c
+++ b/src/mesa/state_tracker/st_cb_compute.c
@@ -59,7 +59,7 @@ static void st_dispatch_compute_common(struct gl_context *ctx,
st_validate_state(st, ST_PIPELINE_COMPUTE);
for (unsigned i = 0; i < 3; i++) {
- info.block[i] = group_size ? group_size[i] : prog->info.cs.workgroup_size[i];
+ info.block[i] = group_size ? group_size[i] : prog->info.workgroup_size[i];
info.grid[i] = num_groups ? num_groups[i] : 0;
}
diff --git a/src/microsoft/clc/clc_compiler.c b/src/microsoft/clc/clc_compiler.c
index 6eff749f3cc..b82db218278 100644
--- a/src/microsoft/clc/clc_compiler.c
+++ b/src/microsoft/clc/clc_compiler.c
@@ -1080,7 +1080,7 @@ clc_to_dxil(struct clc_context *ctx,
clc_error(logger, "spirv_to_nir() failed");
goto err_free_dxil;
}
- nir->info.cs.workgroup_size_variable = true;
+ nir->info.workgroup_size_variable = true;
NIR_PASS_V(nir, nir_lower_goto_ifs);
NIR_PASS_V(nir, nir_opt_dead_cf);
@@ -1338,33 +1338,33 @@ clc_to_dxil(struct clc_context *ctx,
nir_variable *work_properties_var =
add_work_properties_var(dxil, nir, &cbv_id);
- memcpy(metadata->local_size, nir->info.cs.workgroup_size,
+ memcpy(metadata->local_size, nir->info.workgroup_size,
sizeof(metadata->local_size));
memcpy(metadata->local_size_hint, nir->info.cs.workgroup_size_hint,
sizeof(metadata->local_size));
// Patch the localsize before calling clc_nir_lower_system_values().
if (conf) {
- for (unsigned i = 0; i < ARRAY_SIZE(nir->info.cs.workgroup_size); i++) {
+ for (unsigned i = 0; i < ARRAY_SIZE(nir->info.workgroup_size); i++) {
if (!conf->local_size[i] ||
- conf->local_size[i] == nir->info.cs.workgroup_size[i])
+ conf->local_size[i] == nir->info.workgroup_size[i])
continue;
- if (nir->info.cs.workgroup_size[i] &&
- nir->info.cs.workgroup_size[i] != conf->local_size[i]) {
+ if (nir->info.workgroup_size[i] &&
+ nir->info.workgroup_size[i] != conf->local_size[i]) {
debug_printf("D3D12: runtime local size does not match reqd_work_group_size() values\n");
goto err_free_dxil;
}
- nir->info.cs.workgroup_size[i] = conf->local_size[i];
+ nir->info.workgroup_size[i] = conf->local_size[i];
}
- memcpy(metadata->local_size, nir->info.cs.workgroup_size,
+ memcpy(metadata->local_size, nir->info.workgroup_size,
sizeof(metadata->local_size));
} else {
/* Make sure there's at least one thread that's set to run */
- for (unsigned i = 0; i < ARRAY_SIZE(nir->info.cs.workgroup_size); i++) {
- if (nir->info.cs.workgroup_size[i] == 0)
- nir->info.cs.workgroup_size[i] = 1;
+ for (unsigned i = 0; i < ARRAY_SIZE(nir->info.workgroup_size); i++) {
+ if (nir->info.workgroup_size[i] == 0)
+ nir->info.workgroup_size[i] = 1;
}
}
diff --git a/src/microsoft/clc/clc_nir.c b/src/microsoft/clc/clc_nir.c
index fddff035c71..7d37e2a72c7 100644
--- a/src/microsoft/clc/clc_nir.c
+++ b/src/microsoft/clc/clc_nir.c
@@ -73,9 +73,9 @@ lower_load_local_group_size(nir_builder *b, nir_intrinsic_instr *intr)
b->cursor = nir_after_instr(&intr->instr);
nir_const_value v[3] = {
- nir_const_value_for_int(b->shader->info.cs.workgroup_size[0], 32),
- nir_const_value_for_int(b->shader->info.cs.workgroup_size[1], 32),
- nir_const_value_for_int(b->shader->info.cs.workgroup_size[2], 32)
+ nir_const_value_for_int(b->shader->info.workgroup_size[0], 32),
+ nir_const_value_for_int(b->shader->info.workgroup_size[1], 32),
+ nir_const_value_for_int(b->shader->info.workgroup_size[2], 32)
};
nir_ssa_def *size = nir_build_imm(b, 3, 32, v);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, size);
diff --git a/src/microsoft/compiler/nir_to_dxil.c b/src/microsoft/compiler/nir_to_dxil.c
index c25d3fc9ad2..1d6f64b63ac 100644
--- a/src/microsoft/compiler/nir_to_dxil.c
+++ b/src/microsoft/compiler/nir_to_dxil.c
@@ -1162,9 +1162,9 @@ static const struct dxil_mdnode *
emit_threads(struct ntd_context *ctx)
{
const nir_shader *s = ctx->shader;
- const struct dxil_mdnode *threads_x = dxil_get_metadata_int32(&ctx->mod, MAX2(s->info.cs.workgroup_size[0], 1));
- const struct dxil_mdnode *threads_y = dxil_get_metadata_int32(&ctx->mod, MAX2(s->info.cs.workgroup_size[1], 1));
- const struct dxil_mdnode *threads_z = dxil_get_metadata_int32(&ctx->mod, MAX2(s->info.cs.workgroup_size[2], 1));
+ const struct dxil_mdnode *threads_x = dxil_get_metadata_int32(&ctx->mod, MAX2(s->info.workgroup_size[0], 1));
+ const struct dxil_mdnode *threads_y = dxil_get_metadata_int32(&ctx->mod, MAX2(s->info.workgroup_size[1], 1));
+ const struct dxil_mdnode *threads_z = dxil_get_metadata_int32(&ctx->mod, MAX2(s->info.workgroup_size[2], 1));
if (!threads_x || !threads_y || !threads_z)
return false;