summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Ekstrand <jason.ekstrand@intel.com>2019-01-10 01:47:14 -0600
committerJason Ekstrand <jason@jlekstrand.net>2019-01-10 20:34:00 +0000
commit8ea8727a87b7fe0ee89aa8fcb583b126b57ed3f9 (patch)
tree3260965c6a05fa2c68e476a41cb5c65ef548c457
parent031e94dc72bda818e440bb66a8caf52e3d669748 (diff)
anv/pipeline: Constant fold after apply_pipeline_layout
Thanks to the new NIR load_descriptor intrinsic added by the UBO/SSBO lowering series, we weren't getting UBO pushing because the UBO range detection pass couldn't see the constants it needed. This fixes that problem with a quick round of constant folding. Because we're folding we no longer need to go out of our way to generate constants when we lower the vulkan_resource_index intrinsic and we can make it a bit simpler. Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
-rw-r--r--src/intel/vulkan/anv_nir_apply_pipeline_layout.c16
-rw-r--r--src/intel/vulkan/anv_pipeline.c1
2 files changed, 5 insertions, 12 deletions
diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
index a0fd226b0a0..b3daf702bc0 100644
--- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
+++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
@@ -144,19 +144,11 @@ lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
uint32_t array_size =
state->layout->set[set].layout->binding[binding].array_size;
- nir_ssa_def *block_index;
- if (nir_src_is_const(intrin->src[0])) {
- unsigned array_index = nir_src_as_uint(intrin->src[0]);
- array_index = MIN2(array_index, array_size - 1);
- block_index = nir_imm_int(b, surface_index + array_index);
- } else {
- block_index = nir_ssa_for_src(b, intrin->src[0], 1);
-
- if (state->add_bounds_checks)
- block_index = nir_umin(b, block_index, nir_imm_int(b, array_size - 1));
+ nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
+ if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
+ array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
- block_index = nir_iadd(b, nir_imm_int(b, surface_index), block_index);
- }
+ nir_ssa_def *block_index = nir_iadd_imm(b, array_index, surface_index);
assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
index d1efaaf060b..b99981d7a5c 100644
--- a/src/intel/vulkan/anv_pipeline.c
+++ b/src/intel/vulkan/anv_pipeline.c
@@ -544,6 +544,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
pipeline->device->robust_buffer_access,
layout, nir, prog_data,
&stage->bind_map);
+ NIR_PASS_V(nir, nir_opt_constant_folding);
}
if (nir->info.stage != MESA_SHADER_COMPUTE)