summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlejandro PiƱeiro <apinheiro@igalia.com>2021-04-16 01:06:34 +0200
committerMarge Bot <eric+marge@anholt.net>2021-04-19 23:10:35 +0000
commitf5133f6bce1b0fb2ad967c4d9e28a8378971859d (patch)
treec3d55308679dec1187155d59152fee923224623a
parent48d31a6280c4de07279435606a5c0524c1787cad (diff)
v3dv/pipeline: track descriptor maps per stage, not per pipeline
One of the conclusions of our recent clean up on the limits was that the pipeline limits needed to be the per-stage limits multiplied by the number of stages. But until now we only have a set of descriptor maps for the full pipeline. That would work if we could set the same limit per pipeline that per stage, but that is not the case. So if, for example, we have the fragment shader using V3D_MAX_TEXTURE_SAMPLERS textures, and then the vertex shader, with a different descriptor set, using one texture, we would get an index greater that V3D_MAX_TEXTURE_SAMPLERS. We assert that index as an error on the vulkan backend, but fwiw, it would be also asserted on the compiler. With this commit we track and allocate a descriptor map per stage, although we reuse the vertex shader descriptor map for the vertex bin. Reviewed-by: Iago Toral Quiroga <itoral@igalia.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10272>
-rw-r--r--src/broadcom/vulkan/v3dv_pipeline.c140
-rw-r--r--src/broadcom/vulkan/v3dv_pipeline_cache.c92
-rw-r--r--src/broadcom/vulkan/v3dv_private.h13
-rw-r--r--src/broadcom/vulkan/v3dv_uniforms.c27
4 files changed, 204 insertions, 68 deletions
diff --git a/src/broadcom/vulkan/v3dv_pipeline.c b/src/broadcom/vulkan/v3dv_pipeline.c
index e7b42e5425d..97bbb0a9afa 100644
--- a/src/broadcom/vulkan/v3dv_pipeline.c
+++ b/src/broadcom/vulkan/v3dv_pipeline.c
@@ -568,11 +568,46 @@ lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
instr->intrinsic = nir_intrinsic_load_uniform;
}
+static struct v3dv_descriptor_map*
+pipeline_get_descriptor_map(struct v3dv_pipeline *pipeline,
+ VkDescriptorType desc_type,
+ gl_shader_stage gl_stage,
+ bool is_sampler)
+{
+ broadcom_shader_stage broadcom_stage =
+ gl_shader_stage_to_broadcom(gl_stage);
+
+ assert(pipeline->shared_data &&
+ pipeline->shared_data->maps[broadcom_stage]);
+
+ switch(desc_type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ return &pipeline->shared_data->maps[broadcom_stage]->sampler_map;
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ return &pipeline->shared_data->maps[broadcom_stage]->texture_map;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return is_sampler ?
+ &pipeline->shared_data->maps[broadcom_stage]->sampler_map :
+ &pipeline->shared_data->maps[broadcom_stage]->texture_map;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ return &pipeline->shared_data->maps[broadcom_stage]->ubo_map;
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ return &pipeline->shared_data->maps[broadcom_stage]->ssbo_map;
+ default:
+ unreachable("Descriptor type unknown or not having a descriptor map");
+ }
+}
+
/* Gathers info from the intrinsic (set and binding) and then lowers it so it
* could be used by the v3d_compiler */
static void
lower_vulkan_resource_index(nir_builder *b,
nir_intrinsic_instr *instr,
+ nir_shader *shader,
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{
@@ -586,13 +621,13 @@ lower_vulkan_resource_index(nir_builder *b,
struct v3dv_descriptor_set_binding_layout *binding_layout =
&set_layout->binding[binding];
unsigned index = 0;
+ const VkDescriptorType desc_type = nir_intrinsic_desc_type(instr);
- switch (nir_intrinsic_desc_type(instr)) {
+ switch (desc_type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
struct v3dv_descriptor_map *descriptor_map =
- nir_intrinsic_desc_type(instr) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ?
- &pipeline->shared_data->ubo_map : &pipeline->shared_data->ssbo_map;
+ pipeline_get_descriptor_map(pipeline, desc_type, shader->info.stage, false);
if (!const_val)
unreachable("non-constant vulkan_resource_index array index");
@@ -602,7 +637,7 @@ lower_vulkan_resource_index(nir_builder *b,
binding_layout->array_size,
32 /* return_size: doesn't really apply for this case */);
- if (nir_intrinsic_desc_type(instr) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+ if (desc_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
/* skip index 0 which is used for push constants */
index++;
}
@@ -630,6 +665,7 @@ lower_vulkan_resource_index(nir_builder *b,
*/
static uint8_t
lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
+ nir_shader *shader,
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{
@@ -707,9 +743,9 @@ lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
uint8_t return_size = relaxed_precision || instr->is_shadow ? 16 : 32;
- struct v3dv_descriptor_map *map = is_sampler ?
- &pipeline->shared_data->sampler_map :
- &pipeline->shared_data->texture_map;
+ struct v3dv_descriptor_map *map =
+ pipeline_get_descriptor_map(pipeline, binding_layout->type,
+ shader->info.stage, is_sampler);
int desc_index =
descriptor_map_add(map,
deref->var->data.descriptor_set,
@@ -728,6 +764,7 @@ lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
static bool
lower_sampler(nir_builder *b, nir_tex_instr *instr,
+ nir_shader *shader,
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{
@@ -737,13 +774,14 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
if (texture_idx >= 0)
- return_size = lower_tex_src_to_offset(b, instr, texture_idx, pipeline, layout);
+ return_size = lower_tex_src_to_offset(b, instr, texture_idx, shader,
+ pipeline, layout);
int sampler_idx =
nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
if (sampler_idx >= 0)
- lower_tex_src_to_offset(b, instr, sampler_idx, pipeline, layout);
+ lower_tex_src_to_offset(b, instr, sampler_idx, shader, pipeline, layout);
if (texture_idx < 0 && sampler_idx < 0)
return false;
@@ -763,6 +801,7 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
static void
lower_image_deref(nir_builder *b,
nir_intrinsic_instr *instr,
+ nir_shader *shader,
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{
@@ -812,8 +851,12 @@ lower_image_deref(nir_builder *b,
assert(binding_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
binding_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
+ struct v3dv_descriptor_map *map =
+ pipeline_get_descriptor_map(pipeline, binding_layout->type,
+ shader->info.stage, false);
+
int desc_index =
- descriptor_map_add(&pipeline->shared_data->texture_map,
+ descriptor_map_add(map,
deref->var->data.descriptor_set,
deref->var->data.binding,
array_index,
@@ -833,6 +876,7 @@ lower_image_deref(nir_builder *b,
static bool
lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
+ nir_shader *shader,
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{
@@ -851,7 +895,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
return true;
case nir_intrinsic_vulkan_resource_index:
- lower_vulkan_resource_index(b, instr, pipeline, layout);
+ lower_vulkan_resource_index(b, instr, shader, pipeline, layout);
return true;
case nir_intrinsic_load_vulkan_descriptor: {
@@ -879,7 +923,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
case nir_intrinsic_image_deref_atomic_comp_swap:
case nir_intrinsic_image_deref_size:
case nir_intrinsic_image_deref_samples:
- lower_image_deref(b, instr, pipeline, layout);
+ lower_image_deref(b, instr, shader, pipeline, layout);
return true;
default:
@@ -889,6 +933,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
static bool
lower_impl(nir_function_impl *impl,
+ nir_shader *shader,
struct v3dv_pipeline *pipeline,
const struct v3dv_pipeline_layout *layout)
{
@@ -902,11 +947,12 @@ lower_impl(nir_function_impl *impl,
switch (instr->type) {
case nir_instr_type_tex:
progress |=
- lower_sampler(&b, nir_instr_as_tex(instr), pipeline, layout);
+ lower_sampler(&b, nir_instr_as_tex(instr), shader, pipeline, layout);
break;
case nir_instr_type_intrinsic:
progress |=
- lower_intrinsic(&b, nir_instr_as_intrinsic(instr), pipeline, layout);
+ lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader,
+ pipeline, layout);
break;
default:
break;
@@ -926,7 +972,7 @@ lower_pipeline_layout_info(nir_shader *shader,
nir_foreach_function(function, shader) {
if (function->impl)
- progress |= lower_impl(function->impl, pipeline, layout);
+ progress |= lower_impl(function->impl, shader, pipeline, layout);
}
return progress;
@@ -983,13 +1029,16 @@ pipeline_populate_v3d_key(struct v3d_key *key,
uint32_t ucp_enables,
bool robust_buffer_access)
{
+ assert(p_stage->pipeline->shared_data &&
+ p_stage->pipeline->shared_data->maps[p_stage->stage]);
+
/* The following values are default values used at pipeline create. We use
* there 32 bit as default return size.
*/
struct v3dv_descriptor_map *sampler_map =
- &p_stage->pipeline->shared_data->sampler_map;
+ &p_stage->pipeline->shared_data->maps[p_stage->stage]->sampler_map;
struct v3dv_descriptor_map *texture_map =
- &p_stage->pipeline->shared_data->texture_map;
+ &p_stage->pipeline->shared_data->maps[p_stage->stage]->texture_map;
key->num_tex_used = texture_map->num_desc;
assert(key->num_tex_used <= V3D_MAX_TEXTURE_SAMPLERS);
@@ -1596,6 +1645,9 @@ pipeline_lower_nir(struct v3dv_pipeline *pipeline,
struct v3dv_pipeline_stage *p_stage,
struct v3dv_pipeline_layout *layout)
{
+ assert(pipeline->shared_data &&
+ pipeline->shared_data->maps[p_stage->stage]);
+
nir_shader_gather_info(p_stage->nir, nir_shader_get_entrypoint(p_stage->nir));
/* We add this because we need a valid sampler for nir_lower_tex to do
@@ -1606,12 +1658,12 @@ pipeline_lower_nir(struct v3dv_pipeline *pipeline,
* another for the case we need a 32bit return size.
*/
UNUSED unsigned index =
- descriptor_map_add(&pipeline->shared_data->sampler_map,
+ descriptor_map_add(&pipeline->shared_data->maps[p_stage->stage]->sampler_map,
-1, -1, -1, 0, 16);
assert(index == V3DV_NO_SAMPLER_16BIT_IDX);
index =
- descriptor_map_add(&pipeline->shared_data->sampler_map,
+ descriptor_map_add(&pipeline->shared_data->maps[p_stage->stage]->sampler_map,
-2, -2, -2, 0, 32);
assert(index == V3DV_NO_SAMPLER_32BIT_IDX);
@@ -1860,25 +1912,64 @@ pipeline_populate_compute_key(struct v3dv_pipeline *pipeline,
static struct v3dv_pipeline_shared_data *
v3dv_pipeline_shared_data_new_empty(const unsigned char sha1_key[20],
- struct v3dv_device *device)
+ struct v3dv_device *device,
+ bool is_graphics_pipeline)
{
- size_t size = sizeof(struct v3dv_pipeline_shared_data);
/* We create new_entry using the device alloc. Right now shared_data is ref
* and unref by both the pipeline and the pipeline cache, so we can't
* ensure that the cache or pipeline alloc will be available on the last
* unref.
*/
struct v3dv_pipeline_shared_data *new_entry =
- vk_zalloc2(&device->vk.alloc, NULL, size, 8,
+ vk_zalloc2(&device->vk.alloc, NULL,
+ sizeof(struct v3dv_pipeline_shared_data), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_entry == NULL)
return NULL;
+ for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
+ /* We don't need specific descriptor map for vertex_bin, we can share
+ * with vertex
+ */
+ if (stage == BROADCOM_SHADER_VERTEX_BIN)
+ continue;
+
+ if ((is_graphics_pipeline && stage == BROADCOM_SHADER_COMPUTE) ||
+ (!is_graphics_pipeline && stage != BROADCOM_SHADER_COMPUTE)) {
+ continue;
+ }
+
+ struct v3dv_descriptor_maps *new_maps =
+ vk_zalloc2(&device->vk.alloc, NULL,
+ sizeof(struct v3dv_descriptor_maps), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (new_maps == NULL)
+ goto fail;
+
+ new_entry->maps[stage] = new_maps;
+ }
+
+ new_entry->maps[BROADCOM_SHADER_VERTEX_BIN] =
+ new_entry->maps[BROADCOM_SHADER_VERTEX];
+
new_entry->ref_cnt = 1;
memcpy(new_entry->sha1_key, sha1_key, 20);
return new_entry;
+
+fail:
+ if (new_entry != NULL) {
+ for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
+ if (new_entry->maps[stage] != NULL)
+ vk_free(&device->vk.alloc, new_entry->maps[stage]);
+ }
+ }
+
+ vk_free(&device->vk.alloc, new_entry);
+
+ return NULL;
}
/*
@@ -2004,7 +2095,7 @@ pipeline_compile_graphics(struct v3dv_pipeline *pipeline,
}
pipeline->shared_data =
- v3dv_pipeline_shared_data_new_empty(pipeline_sha1, pipeline->device);
+ v3dv_pipeline_shared_data_new_empty(pipeline_sha1, pipeline->device, true);
/* If not, we try to get the nir shaders (from the SPIR-V shader, or from
* the pipeline cache again) and compile.
*/
@@ -3158,7 +3249,8 @@ pipeline_compile_compute(struct v3dv_pipeline *pipeline,
}
pipeline->shared_data = v3dv_pipeline_shared_data_new_empty(pipeline_sha1,
- pipeline->device);
+ pipeline->device,
+ false);
/* If not found on cache, compile it */
p_stage->nir = pipeline_stage_get_nir(p_stage, pipeline, cache);
diff --git a/src/broadcom/vulkan/v3dv_pipeline_cache.c b/src/broadcom/vulkan/v3dv_pipeline_cache.c
index 7d1d114850e..16de4a2dbfc 100644
--- a/src/broadcom/vulkan/v3dv_pipeline_cache.c
+++ b/src/broadcom/vulkan/v3dv_pipeline_cache.c
@@ -324,6 +324,14 @@ v3dv_pipeline_shared_data_destroy(struct v3dv_device *device,
for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
if (shared_data->variants[stage] != NULL)
v3dv_shader_variant_destroy(device, shared_data->variants[stage]);
+
+ /* We don't free the vertex_bin descriptor maps as we are sharing them
+ * with the vertex shader.
+ */
+ if (shared_data->maps[stage] != NULL &&
+ stage != BROADCOM_SHADER_VERTEX_BIN) {
+ vk_free(&device->vk.alloc, shared_data->maps[stage]);
+ }
}
if (shared_data->assembly_bo)
@@ -335,11 +343,8 @@ v3dv_pipeline_shared_data_destroy(struct v3dv_device *device,
static struct v3dv_pipeline_shared_data *
v3dv_pipeline_shared_data_new(struct v3dv_pipeline_cache *cache,
const unsigned char sha1_key[20],
+ struct v3dv_descriptor_maps **maps,
struct v3dv_shader_variant **variants,
- const struct v3dv_descriptor_map *ubo_map,
- const struct v3dv_descriptor_map *ssbo_map,
- const struct v3dv_descriptor_map *sampler_map,
- const struct v3dv_descriptor_map *texture_map,
const uint64_t *total_assembly,
const uint32_t total_assembly_size)
{
@@ -359,13 +364,10 @@ v3dv_pipeline_shared_data_new(struct v3dv_pipeline_cache *cache,
new_entry->ref_cnt = 1;
memcpy(new_entry->sha1_key, sha1_key, 20);
- memcpy(&new_entry->ubo_map, ubo_map, sizeof(struct v3dv_descriptor_map));
- memcpy(&new_entry->ssbo_map, ssbo_map, sizeof(struct v3dv_descriptor_map));
- memcpy(&new_entry->sampler_map, sampler_map, sizeof(struct v3dv_descriptor_map));
- memcpy(&new_entry->texture_map, texture_map, sizeof(struct v3dv_descriptor_map));
-
- for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++)
+ for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
+ new_entry->maps[stage] = maps[stage];
new_entry->variants[stage] = variants[stage];
+ }
struct v3dv_bo *bo = v3dv_bo_alloc(cache->device, total_assembly_size,
"pipeline shader assembly", true);
@@ -541,17 +543,29 @@ v3dv_pipeline_shared_data_create_from_blob(struct v3dv_pipeline_cache *cache,
{
const unsigned char *sha1_key = blob_read_bytes(blob, 20);
- const struct v3dv_descriptor_map *ubo_map =
- blob_read_bytes(blob, sizeof(struct v3dv_descriptor_map));
- const struct v3dv_descriptor_map *ssbo_map =
- blob_read_bytes(blob, sizeof(struct v3dv_descriptor_map));
- const struct v3dv_descriptor_map *sampler_map =
- blob_read_bytes(blob, sizeof(struct v3dv_descriptor_map));
- const struct v3dv_descriptor_map *texture_map =
- blob_read_bytes(blob, sizeof(struct v3dv_descriptor_map));
+ struct v3dv_descriptor_maps *maps[BROADCOM_SHADER_STAGES] = { 0 };
- if (blob->overrun)
- return NULL;
+ uint8_t descriptor_maps_count = blob_read_uint8(blob);
+ for (uint8_t count = 0; count < descriptor_maps_count; count++) {
+ uint8_t stage = blob_read_uint8(blob);
+
+ const struct v3dv_descriptor_maps *current_maps =
+ blob_read_bytes(blob, sizeof(struct v3dv_descriptor_maps));
+
+ if (blob->overrun)
+ return NULL;
+
+ maps[stage] = vk_zalloc2(&cache->device->vk.alloc, NULL,
+ sizeof(struct v3dv_descriptor_maps), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (maps[stage] == NULL)
+ return NULL;
+
+ memcpy(maps[stage], current_maps, sizeof(struct v3dv_descriptor_maps));
+ if (stage == BROADCOM_SHADER_VERTEX)
+ maps[BROADCOM_SHADER_VERTEX_BIN] = maps[stage];
+ }
uint8_t variant_count = blob_read_uint8(blob);
@@ -571,8 +585,7 @@ v3dv_pipeline_shared_data_create_from_blob(struct v3dv_pipeline_cache *cache,
if (blob->overrun)
return NULL;
- return v3dv_pipeline_shared_data_new(cache, sha1_key, variants,
- ubo_map, ssbo_map, sampler_map, texture_map,
+ return v3dv_pipeline_shared_data_new(cache, sha1_key, maps, variants,
total_assembly, total_assembly_size);
}
@@ -820,14 +833,33 @@ v3dv_pipeline_shared_data_write_to_blob(const struct v3dv_pipeline_shared_data *
{
blob_write_bytes(blob, cache_entry->sha1_key, 20);
- blob_write_bytes(blob, &cache_entry->ubo_map,
- sizeof(struct v3dv_descriptor_map));
- blob_write_bytes(blob, &cache_entry->ssbo_map,
- sizeof(struct v3dv_descriptor_map));
- blob_write_bytes(blob, &cache_entry->sampler_map,
- sizeof(struct v3dv_descriptor_map));
- blob_write_bytes(blob, &cache_entry->texture_map,
- sizeof(struct v3dv_descriptor_map));
+ uint8_t descriptor_maps_count = 0;
+ for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
+ if (stage == BROADCOM_SHADER_VERTEX_BIN)
+ continue;
+ if (cache_entry->maps[stage] == NULL)
+ continue;
+ descriptor_maps_count++;
+ }
+
+ /* Right now we only support compute pipeline, or graphics pipeline with
+ * vertex, vertex bin, and fragment shader, but vertex and vertex bin
+ * descriptor maps are shared.
+ */
+ assert(descriptor_maps_count == 2 ||
+ (descriptor_maps_count == 1 && cache_entry->variants[BROADCOM_SHADER_COMPUTE]));
+ blob_write_uint8(blob, descriptor_maps_count);
+
+ for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
+ if (cache_entry->maps[stage] == NULL)
+ continue;
+ if (stage == BROADCOM_SHADER_VERTEX_BIN)
+ continue;
+
+ blob_write_uint8(blob, stage);
+ blob_write_bytes(blob, cache_entry->maps[stage],
+ sizeof(struct v3dv_descriptor_maps));
+ }
uint8_t variant_count = 0;
for (uint8_t stage = 0; stage < BROADCOM_SHADER_STAGES; stage++) {
diff --git a/src/broadcom/vulkan/v3dv_private.h b/src/broadcom/vulkan/v3dv_private.h
index 78dafc92923..67bde1d774d 100644
--- a/src/broadcom/vulkan/v3dv_private.h
+++ b/src/broadcom/vulkan/v3dv_private.h
@@ -1661,6 +1661,13 @@ v3dv_pipeline_combined_index_key_unpack(uint32_t combined_index_key,
*sampler_index = sampler;
}
+struct v3dv_descriptor_maps {
+ struct v3dv_descriptor_map ubo_map;
+ struct v3dv_descriptor_map ssbo_map;
+ struct v3dv_descriptor_map sampler_map;
+ struct v3dv_descriptor_map texture_map;
+};
+
/* The structure represents data shared between different objects, like the
* pipeline and the pipeline cache, so we ref count it to know when it should
* be freed.
@@ -1670,11 +1677,7 @@ struct v3dv_pipeline_shared_data {
unsigned char sha1_key[20];
- struct v3dv_descriptor_map ubo_map;
- struct v3dv_descriptor_map ssbo_map;
- struct v3dv_descriptor_map sampler_map;
- struct v3dv_descriptor_map texture_map;
-
+ struct v3dv_descriptor_maps *maps[BROADCOM_SHADER_STAGES];
struct v3dv_shader_variant *variants[BROADCOM_SHADER_STAGES];
struct v3dv_bo *assembly_bo;
diff --git a/src/broadcom/vulkan/v3dv_uniforms.c b/src/broadcom/vulkan/v3dv_uniforms.c
index adbaaa7861d..3fc6e1bfb7f 100644
--- a/src/broadcom/vulkan/v3dv_uniforms.c
+++ b/src/broadcom/vulkan/v3dv_uniforms.c
@@ -125,6 +125,7 @@ check_push_constants_ubo(struct v3dv_cmd_buffer *cmd_buffer,
static void
write_tmu_p0(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline,
+ broadcom_shader_stage stage,
struct v3dv_cl_out **uniforms,
uint32_t data,
struct texture_bo_list *tex_bos,
@@ -138,7 +139,7 @@ write_tmu_p0(struct v3dv_cmd_buffer *cmd_buffer,
/* We need to ensure that the texture bo is added to the job */
struct v3dv_bo *texture_bo =
v3dv_descriptor_map_get_texture_bo(descriptor_state,
- &pipeline->shared_data->texture_map,
+ &pipeline->shared_data->maps[stage]->texture_map,
pipeline->layout, texture_idx);
assert(texture_bo);
assert(texture_idx < V3D_MAX_TEXTURE_SAMPLERS);
@@ -146,7 +147,7 @@ write_tmu_p0(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_cl_reloc state_reloc =
v3dv_descriptor_map_get_texture_shader_state(descriptor_state,
- &pipeline->shared_data->texture_map,
+ &pipeline->shared_data->maps[stage]->texture_map,
pipeline->layout,
texture_idx);
@@ -168,6 +169,7 @@ write_tmu_p0(struct v3dv_cmd_buffer *cmd_buffer,
static void
write_tmu_p1(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline,
+ broadcom_shader_stage stage,
struct v3dv_cl_out **uniforms,
uint32_t data,
struct state_bo_list *state_bos)
@@ -181,12 +183,12 @@ write_tmu_p1(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_cl_reloc sampler_state_reloc =
v3dv_descriptor_map_get_sampler_state(descriptor_state,
- &pipeline->shared_data->sampler_map,
+ &pipeline->shared_data->maps[stage]->sampler_map,
pipeline->layout, sampler_idx);
const struct v3dv_sampler *sampler =
v3dv_descriptor_map_get_sampler(descriptor_state,
- &pipeline->shared_data->sampler_map,
+ &pipeline->shared_data->maps[stage]->sampler_map,
pipeline->layout, sampler_idx);
assert(sampler);
@@ -217,6 +219,7 @@ write_tmu_p1(struct v3dv_cmd_buffer *cmd_buffer,
static void
write_ubo_ssbo_uniforms(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline,
+ broadcom_shader_stage stage,
struct v3dv_cl_out **uniforms,
enum quniform_contents content,
uint32_t data,
@@ -227,7 +230,8 @@ write_ubo_ssbo_uniforms(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_descriptor_map *map =
content == QUNIFORM_UBO_ADDR || content == QUNIFORM_GET_UBO_SIZE ?
- &pipeline->shared_data->ubo_map : &pipeline->shared_data->ssbo_map;
+ &pipeline->shared_data->maps[stage]->ubo_map :
+ &pipeline->shared_data->maps[stage]->ssbo_map;
uint32_t offset =
content == QUNIFORM_UBO_ADDR ?
@@ -344,6 +348,7 @@ get_texture_size_from_buffer_view(struct v3dv_buffer_view *buffer_view,
static uint32_t
get_texture_size(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline,
+ broadcom_shader_stage stage,
enum quniform_contents contents,
uint32_t data)
{
@@ -353,7 +358,7 @@ get_texture_size(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_descriptor *descriptor =
v3dv_descriptor_map_get_descriptor(descriptor_state,
- &pipeline->shared_data->texture_map,
+ &pipeline->shared_data->maps[stage]->texture_map,
pipeline->layout,
texture_idx, NULL);
@@ -438,17 +443,20 @@ v3dv_write_uniforms_wg_offsets(struct v3dv_cmd_buffer *cmd_buffer,
case QUNIFORM_UBO_ADDR:
case QUNIFORM_GET_SSBO_SIZE:
case QUNIFORM_GET_UBO_SIZE:
- write_ubo_ssbo_uniforms(cmd_buffer, pipeline, &uniforms,
+ write_ubo_ssbo_uniforms(cmd_buffer, pipeline, variant->stage, &uniforms,
uinfo->contents[i], data, &buffer_bos);
+
break;
case QUNIFORM_IMAGE_TMU_CONFIG_P0:
case QUNIFORM_TMU_CONFIG_P0:
- write_tmu_p0(cmd_buffer, pipeline, &uniforms, data, &tex_bos, &state_bos);
+ write_tmu_p0(cmd_buffer, pipeline, variant->stage,
+ &uniforms, data, &tex_bos, &state_bos);
break;
case QUNIFORM_TMU_CONFIG_P1:
- write_tmu_p1(cmd_buffer, pipeline, &uniforms, data, &state_bos);
+ write_tmu_p1(cmd_buffer, pipeline, variant->stage,
+ &uniforms, data, &state_bos);
break;
case QUNIFORM_IMAGE_WIDTH:
@@ -464,6 +472,7 @@ v3dv_write_uniforms_wg_offsets(struct v3dv_cmd_buffer *cmd_buffer,
cl_aligned_u32(&uniforms,
get_texture_size(cmd_buffer,
pipeline,
+ variant->stage,
uinfo->contents[i],
data));
break;