summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Ekstrand <jason.ekstrand@intel.com>2018-10-12 17:54:41 -0500
committerJason Ekstrand <jason.ekstrand@intel.com>2019-01-10 19:15:27 -0600
commiteb4b1477dcd37270291bfae94495192e1a760a69 (patch)
tree59a1afa68d70dcf4f25c94498dd27a8b1fec87be
parentf6aa9f71851614b06c11221f01e78493551ed68b (diff)
anv/pipeline: Cache the pre-lowered NIR
This adds a second level of caching for the pre-lowered NIR that's only based off of the shader module, entrypoint and specialization constants. This is enough for spirv_to_nir as well as our first round of lowering and optimization. Caching at this level should allow for faster shader recompiles due to state changes. The NIR caching does not get serialized to disk via either the VkPipelineCache serialization mechanism or the transparent on-disk cache. We could but it's usually not that expensive to fall back to SPIR-V for the odd cache miss especially if it only happens once for several misses and it simplifies the cache. Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
-rw-r--r--src/intel/vulkan/anv_pipeline.c49
1 files changed, 39 insertions, 10 deletions
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
index 5dfab4fa716..899160746d4 100644
--- a/src/intel/vulkan/anv_pipeline.c
+++ b/src/intel/vulkan/anv_pipeline.c
@@ -491,6 +491,41 @@ anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
_mesa_sha1_final(&ctx, sha1_out);
}
+static nir_shader *
+anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
+ struct anv_pipeline_cache *cache,
+ void *mem_ctx,
+ struct anv_pipeline_stage *stage)
+{
+ const struct brw_compiler *compiler =
+ pipeline->device->instance->physicalDevice.compiler;
+ const nir_shader_compiler_options *nir_options =
+ compiler->glsl_compiler_options[stage->stage].NirOptions;
+ nir_shader *nir;
+
+ nir = anv_device_search_for_nir(pipeline->device, cache,
+ nir_options,
+ stage->shader_sha1,
+ mem_ctx);
+ if (nir) {
+ assert(nir->info.stage == stage->stage);
+ return nir;
+ }
+
+ nir = anv_shader_compile_to_nir(pipeline->device,
+ mem_ctx,
+ stage->module,
+ stage->entrypoint,
+ stage->stage,
+ stage->spec_info);
+ if (nir) {
+ anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
+ return nir;
+ }
+
+ return NULL;
+}
+
static void
anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
void *mem_ctx,
@@ -1001,11 +1036,9 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
.sampler_to_descriptor = stages[s].sampler_to_descriptor
};
- stages[s].nir = anv_shader_compile_to_nir(pipeline->device, pipeline_ctx,
- stages[s].module,
- stages[s].entrypoint,
- stages[s].stage,
- stages[s].spec_info);
+ stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
+ pipeline_ctx,
+ &stages[s]);
if (stages[s].nir == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
@@ -1174,11 +1207,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
void *mem_ctx = ralloc_context(NULL);
- stage.nir = anv_shader_compile_to_nir(pipeline->device, mem_ctx,
- stage.module,
- stage.entrypoint,
- stage.stage,
- stage.spec_info);
+ stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
if (stage.nir == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);