summaryrefslogtreecommitdiff
path: root/src/virtio/vulkan/vn_descriptor_set.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/virtio/vulkan/vn_descriptor_set.c')
-rw-r--r--src/virtio/vulkan/vn_descriptor_set.c892
1 files changed, 536 insertions, 356 deletions
diff --git a/src/virtio/vulkan/vn_descriptor_set.c b/src/virtio/vulkan/vn_descriptor_set.c
index a88e071acf2..bdc8745f95c 100644
--- a/src/virtio/vulkan/vn_descriptor_set.c
+++ b/src/virtio/vulkan/vn_descriptor_set.c
@@ -16,6 +16,74 @@
#include "venus-protocol/vn_protocol_driver_descriptor_update_template.h"
#include "vn_device.h"
+#include "vn_pipeline.h"
+
+void
+vn_descriptor_set_layout_destroy(struct vn_device *dev,
+ struct vn_descriptor_set_layout *layout)
+{
+ VkDevice dev_handle = vn_device_to_handle(dev);
+ VkDescriptorSetLayout layout_handle =
+ vn_descriptor_set_layout_to_handle(layout);
+ const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
+
+ vn_async_vkDestroyDescriptorSetLayout(dev->primary_ring, dev_handle,
+ layout_handle, NULL);
+
+ vn_object_base_fini(&layout->base);
+ vk_free(alloc, layout);
+}
+
+static void
+vn_descriptor_set_destroy(struct vn_device *dev,
+ struct vn_descriptor_set *set,
+ const VkAllocationCallbacks *alloc)
+{
+ list_del(&set->head);
+
+ vn_descriptor_set_layout_unref(dev, set->layout);
+
+ vn_object_base_fini(&set->base);
+ vk_free(alloc, set);
+}
+
+/* Map VkDescriptorType to contiguous enum vn_descriptor_type */
+static enum vn_descriptor_type
+vn_descriptor_type(VkDescriptorType type)
+{
+ switch (type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ return VN_DESCRIPTOR_TYPE_SAMPLER;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return VN_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ return VN_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ return VN_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ return VN_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ return VN_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ return VN_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ return VN_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ return VN_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ return VN_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ return VN_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
+ return VN_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK;
+ case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
+ return VN_DESCRIPTOR_TYPE_MUTABLE_EXT;
+ default:
+ break;
+ }
+
+ unreachable("bad VkDescriptorType");
+}
/* descriptor set layout commands */
@@ -28,8 +96,8 @@ vn_GetDescriptorSetLayoutSupport(
struct vn_device *dev = vn_device_from_handle(device);
/* TODO per-device cache */
- vn_call_vkGetDescriptorSetLayoutSupport(dev->instance, device, pCreateInfo,
- pSupport);
+ vn_call_vkGetDescriptorSetLayoutSupport(dev->primary_ring, device,
+ pCreateInfo, pSupport);
}
static void
@@ -46,6 +114,10 @@ vn_descriptor_set_layout_init(
vk_find_struct_const(create_info->pNext,
DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
+ const VkMutableDescriptorTypeCreateInfoEXT *mutable_descriptor_info =
+ vk_find_struct_const(create_info->pNext,
+ MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
+
/* 14.2.1. Descriptor Set Layout
*
* If bindingCount is zero or if this structure is not included in
@@ -55,11 +127,18 @@ vn_descriptor_set_layout_init(
if (binding_flags && !binding_flags->bindingCount)
binding_flags = NULL;
+ layout->is_push_descriptor =
+ create_info->flags &
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
+
+ layout->refcount = VN_REFCOUNT_INIT(1);
layout->last_binding = last_binding;
for (uint32_t i = 0; i < create_info->bindingCount; i++) {
const VkDescriptorSetLayoutBinding *binding_info =
&create_info->pBindings[i];
+ const enum vn_descriptor_type type =
+ vn_descriptor_type(binding_info->descriptorType);
struct vn_descriptor_set_layout_binding *binding =
&layout->bindings[binding_info->binding];
@@ -84,20 +163,31 @@ vn_descriptor_set_layout_init(
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT);
}
- binding->type = binding_info->descriptorType;
+ binding->type = type;
binding->count = binding_info->descriptorCount;
- switch (binding_info->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ switch (type) {
+ case VN_DESCRIPTOR_TYPE_SAMPLER:
+ case VN_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
binding->has_immutable_samplers = binding_info->pImmutableSamplers;
break;
+ case VN_DESCRIPTOR_TYPE_MUTABLE_EXT:
+ assert(mutable_descriptor_info->mutableDescriptorTypeListCount &&
+ mutable_descriptor_info->pMutableDescriptorTypeLists[i]
+ .descriptorTypeCount);
+ const VkMutableDescriptorTypeListEXT *list =
+ &mutable_descriptor_info->pMutableDescriptorTypeLists[i];
+ for (uint32_t j = 0; j < list->descriptorTypeCount; j++) {
+ BITSET_SET(binding->mutable_descriptor_types,
+ vn_descriptor_type(list->pDescriptorTypes[j]));
+ }
+ break;
default:
break;
}
}
- vn_async_vkCreateDescriptorSetLayout(dev->instance, dev_handle,
+ vn_async_vkCreateDescriptorSetLayout(dev->primary_ring, dev_handle,
create_info, NULL, &layout_handle);
}
@@ -109,26 +199,20 @@ vn_CreateDescriptorSetLayout(
VkDescriptorSetLayout *pSetLayout)
{
struct vn_device *dev = vn_device_from_handle(device);
- const VkAllocationCallbacks *alloc =
- pAllocator ? pAllocator : &dev->base.base.alloc;
+ /* ignore pAllocator as the layout is reference-counted */
+ const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
+
+ STACK_ARRAY(VkDescriptorSetLayoutBinding, bindings,
+ pCreateInfo->bindingCount);
uint32_t last_binding = 0;
- VkDescriptorSetLayoutBinding *local_bindings = NULL;
VkDescriptorSetLayoutCreateInfo local_create_info;
if (pCreateInfo->bindingCount) {
- /* the encoder does not ignore
- * VkDescriptorSetLayoutBinding::pImmutableSamplers when it should
- */
- const size_t binding_size =
- sizeof(*pCreateInfo->pBindings) * pCreateInfo->bindingCount;
- local_bindings = vk_alloc(alloc, binding_size, VN_DEFAULT_ALIGN,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (!local_bindings)
- return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
-
- memcpy(local_bindings, pCreateInfo->pBindings, binding_size);
+ typed_memcpy(bindings, pCreateInfo->pBindings,
+ pCreateInfo->bindingCount);
+
for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
- VkDescriptorSetLayoutBinding *binding = &local_bindings[i];
+ VkDescriptorSetLayoutBinding *binding = &bindings[i];
if (last_binding < binding->binding)
last_binding = binding->binding;
@@ -144,17 +228,18 @@ vn_CreateDescriptorSetLayout(
}
local_create_info = *pCreateInfo;
- local_create_info.pBindings = local_bindings;
+ local_create_info.pBindings = bindings;
pCreateInfo = &local_create_info;
}
const size_t layout_size =
offsetof(struct vn_descriptor_set_layout, bindings[last_binding + 1]);
+ /* allocated with the device scope */
struct vn_descriptor_set_layout *layout =
vk_zalloc(alloc, layout_size, VN_DEFAULT_ALIGN,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!layout) {
- vk_free(alloc, local_bindings);
+ STACK_ARRAY_FINISH(bindings);
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
@@ -163,7 +248,7 @@ vn_CreateDescriptorSetLayout(
vn_descriptor_set_layout_init(dev, pCreateInfo, last_binding, layout);
- vk_free(alloc, local_bindings);
+ STACK_ARRAY_FINISH(bindings);
*pSetLayout = vn_descriptor_set_layout_to_handle(layout);
@@ -178,17 +263,11 @@ vn_DestroyDescriptorSetLayout(VkDevice device,
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_set_layout *layout =
vn_descriptor_set_layout_from_handle(descriptorSetLayout);
- const VkAllocationCallbacks *alloc =
- pAllocator ? pAllocator : &dev->base.base.alloc;
if (!layout)
return;
- vn_async_vkDestroyDescriptorSetLayout(dev->instance, device,
- descriptorSetLayout, NULL);
-
- vn_object_base_fini(&layout->base);
- vk_free(alloc, layout);
+ vn_descriptor_set_layout_unref(dev, layout);
}
/* descriptor pool commands */
@@ -199,44 +278,107 @@ vn_CreateDescriptorPool(VkDevice device,
const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool)
{
+ VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
- struct vn_descriptor_pool *pool =
- vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!pool)
+ const VkDescriptorPoolInlineUniformBlockCreateInfo *iub_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO);
+
+ uint32_t mutable_states_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
+ const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
+ if (pool_size->type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
+ mutable_states_count++;
+ }
+ struct vn_descriptor_pool *pool;
+ struct vn_descriptor_pool_state_mutable *mutable_states;
+
+ VK_MULTIALLOC(ma);
+ vk_multialloc_add(&ma, &pool, __typeof__(*pool), 1);
+ vk_multialloc_add(&ma, &mutable_states, __typeof__(*mutable_states),
+ mutable_states_count);
+
+ if (!vk_multialloc_zalloc(&ma, alloc, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vn_object_base_init(&pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL,
&dev->base);
pool->allocator = *alloc;
+ pool->mutable_states = mutable_states;
+
+ const VkMutableDescriptorTypeCreateInfoEXT *mutable_descriptor_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
/* Without VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, the set
* allocation must not fail due to a fragmented pool per spec. In this
* case, set allocation can be asynchronous with pool resource tracking.
*/
- pool->async_set_allocation = !(
- pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
+ pool->async_set_allocation =
+ !VN_PERF(NO_ASYNC_SET_ALLOC) &&
+ !(pCreateInfo->flags &
+ VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
pool->max.set_count = pCreateInfo->maxSets;
+ if (iub_info)
+ pool->max.iub_binding_count = iub_info->maxInlineUniformBlockBindings;
+
+ uint32_t next_mutable_state = 0;
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
+ const enum vn_descriptor_type type =
+ vn_descriptor_type(pool_size->type);
- assert(pool_size->type < VN_NUM_DESCRIPTOR_TYPES);
+ if (type != VN_DESCRIPTOR_TYPE_MUTABLE_EXT) {
+ pool->max.descriptor_counts[type] += pool_size->descriptorCount;
+ continue;
+ }
- pool->max.descriptor_counts[pool_size->type] +=
- pool_size->descriptorCount;
+ struct vn_descriptor_pool_state_mutable *mutable_state = NULL;
+ BITSET_DECLARE(mutable_types, VN_NUM_DESCRIPTOR_TYPES);
+ if (!mutable_descriptor_info ||
+ i >= mutable_descriptor_info->mutableDescriptorTypeListCount) {
+ BITSET_ONES(mutable_types);
+ } else {
+ const VkMutableDescriptorTypeListEXT *list =
+ &mutable_descriptor_info->pMutableDescriptorTypeLists[i];
+
+ for (uint32_t j = 0; j < list->descriptorTypeCount; j++) {
+ BITSET_SET(mutable_types,
+ vn_descriptor_type(list->pDescriptorTypes[j]));
+ }
+ }
+ for (uint32_t j = 0; j < next_mutable_state; j++) {
+ if (BITSET_EQUAL(mutable_types, pool->mutable_states[j].types)) {
+ mutable_state = &pool->mutable_states[j];
+ break;
+ }
+ }
+
+ if (!mutable_state) {
+ /* The application must ensure that partial overlap does not exist in
+ * pPoolSizes. so this entry must have a disjoint set of types.
+ */
+ mutable_state = &pool->mutable_states[next_mutable_state++];
+ BITSET_COPY(mutable_state->types, mutable_types);
+ }
+
+ mutable_state->max += pool_size->descriptorCount;
}
+ pool->mutable_states_count = next_mutable_state;
list_inithead(&pool->descriptor_sets);
VkDescriptorPool pool_handle = vn_descriptor_pool_to_handle(pool);
- vn_async_vkCreateDescriptorPool(dev->instance, device, pCreateInfo, NULL,
- &pool_handle);
+ vn_async_vkCreateDescriptorPool(dev->primary_ring, device, pCreateInfo,
+ NULL, &pool_handle);
+
+ vn_tls_set_async_pipeline_create();
*pDescriptorPool = pool_handle;
@@ -248,6 +390,7 @@ vn_DestroyDescriptorPool(VkDevice device,
VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator)
{
+ VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(descriptorPool);
@@ -258,61 +401,119 @@ vn_DestroyDescriptorPool(VkDevice device,
alloc = pAllocator ? pAllocator : &pool->allocator;
- /* We must emit vkDestroyDescriptorPool before freeing the sets in
- * pool->descriptor_sets. Otherwise, another thread might reuse their
- * object ids while they still refer to the sets in the renderer.
- */
- vn_async_vkDestroyDescriptorPool(dev->instance, device, descriptorPool,
+ vn_async_vkDestroyDescriptorPool(dev->primary_ring, device, descriptorPool,
NULL);
list_for_each_entry_safe(struct vn_descriptor_set, set,
- &pool->descriptor_sets, head) {
- list_del(&set->head);
-
- vn_object_base_fini(&set->base);
- vk_free(alloc, set);
- }
+ &pool->descriptor_sets, head)
+ vn_descriptor_set_destroy(dev, set, alloc);
vn_object_base_fini(&pool->base);
vk_free(alloc, pool);
}
+static struct vn_descriptor_pool_state_mutable *
+vn_get_mutable_state(const struct vn_descriptor_pool *pool,
+ const struct vn_descriptor_set_layout_binding *binding)
+{
+ for (uint32_t i = 0; i < pool->mutable_states_count; i++) {
+ struct vn_descriptor_pool_state_mutable *mutable_state =
+ &pool->mutable_states[i];
+ BITSET_DECLARE(shared_types, VN_NUM_DESCRIPTOR_TYPES);
+ BITSET_AND(shared_types, mutable_state->types,
+ binding->mutable_descriptor_types);
+
+ /* The application must ensure that partial overlap does not exist in
+ * pPoolSizes, so there only exists one matching entry.
+ */
+ if (BITSET_EQUAL(shared_types, binding->mutable_descriptor_types))
+ return mutable_state;
+ }
+ unreachable("bad mutable descriptor binding");
+}
+
+static inline void
+vn_pool_restore_mutable_states(struct vn_descriptor_pool *pool,
+ const struct vn_descriptor_set_layout *layout,
+ uint32_t binding_index,
+ uint32_t descriptor_count)
+{
+ assert(layout->bindings[binding_index].type ==
+ VN_DESCRIPTOR_TYPE_MUTABLE_EXT);
+ assert(descriptor_count);
+ struct vn_descriptor_pool_state_mutable *mutable_state =
+ vn_get_mutable_state(pool, &layout->bindings[binding_index]);
+ assert(mutable_state && mutable_state->used >= descriptor_count);
+ mutable_state->used -= descriptor_count;
+}
+
static bool
vn_descriptor_pool_alloc_descriptors(
struct vn_descriptor_pool *pool,
const struct vn_descriptor_set_layout *layout,
uint32_t last_binding_descriptor_count)
{
- struct vn_descriptor_pool_state recovery;
-
- if (!pool->async_set_allocation)
- return true;
+ assert(pool->async_set_allocation);
if (pool->used.set_count == pool->max.set_count)
return false;
/* backup current pool state to recovery */
- recovery = pool->used;
+ struct vn_descriptor_pool_state recovery = pool->used;
+ pool->used.set_count++;
- ++pool->used.set_count;
-
- for (uint32_t i = 0; i <= layout->last_binding; i++) {
- const VkDescriptorType type = layout->bindings[i].type;
+ uint32_t i = 0;
+ for (; i <= layout->last_binding; i++) {
+ const struct vn_descriptor_set_layout_binding *binding =
+ &layout->bindings[i];
+ const enum vn_descriptor_type type = binding->type;
const uint32_t count = i == layout->last_binding
? last_binding_descriptor_count
- : layout->bindings[i].count;
+ : binding->count;
- pool->used.descriptor_counts[type] += count;
+ /* Skip resource accounting for either of below:
+ * - reserved binding entry that has a valid type with a zero count
+ * - invalid binding entry from sparse binding indices
+ */
+ if (!count)
+ continue;
- if (pool->used.descriptor_counts[type] >
- pool->max.descriptor_counts[type]) {
- /* restore pool state before this allocation */
- pool->used = recovery;
- return false;
+ if (type == VN_DESCRIPTOR_TYPE_MUTABLE_EXT) {
+ /* A mutable descriptor can be allocated if below are satisfied:
+ * - vn_descriptor_pool_state_mutable::types is a superset
+ * - vn_descriptor_pool_state_mutable::{max - used} is enough
+ */
+ struct vn_descriptor_pool_state_mutable *mutable_state =
+ vn_get_mutable_state(pool, binding);
+ assert(mutable_state);
+ if (mutable_state->used + count > mutable_state->max)
+ goto restore;
+
+ mutable_state->used += count;
+ } else {
+ if (type == VN_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK &&
+ ++pool->used.iub_binding_count > pool->max.iub_binding_count)
+ goto restore;
+
+ pool->used.descriptor_counts[type] += count;
+ if (pool->used.descriptor_counts[type] >
+ pool->max.descriptor_counts[type])
+ goto restore;
}
}
return true;
+
+restore:
+ /* restore pool state before this allocation */
+ pool->used = recovery;
+ for (uint32_t j = 0; j < i; j++) {
+ /* mutable state at binding i is not changed */
+ const uint32_t count = layout->bindings[j].count;
+ if (count && layout->bindings[j].type == VN_DESCRIPTOR_TYPE_MUTABLE_EXT)
+ vn_pool_restore_mutable_states(pool, layout, j, count);
+ }
+ return false;
}
static void
@@ -321,27 +522,38 @@ vn_descriptor_pool_free_descriptors(
const struct vn_descriptor_set_layout *layout,
uint32_t last_binding_descriptor_count)
{
- if (!pool->async_set_allocation)
- return;
+ assert(pool->async_set_allocation);
for (uint32_t i = 0; i <= layout->last_binding; i++) {
const uint32_t count = i == layout->last_binding
? last_binding_descriptor_count
: layout->bindings[i].count;
+ if (!count)
+ continue;
+
+ const enum vn_descriptor_type type = layout->bindings[i].type;
+ if (type == VN_DESCRIPTOR_TYPE_MUTABLE_EXT) {
+ vn_pool_restore_mutable_states(pool, layout, i, count);
+ } else {
+ pool->used.descriptor_counts[type] -= count;
- pool->used.descriptor_counts[layout->bindings[i].type] -= count;
+ if (type == VN_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
+ pool->used.iub_binding_count--;
+ }
}
- --pool->used.set_count;
+ pool->used.set_count--;
}
-static void
+static inline void
vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool *pool)
{
- if (!pool->async_set_allocation)
- return;
+ assert(pool->async_set_allocation);
memset(&pool->used, 0, sizeof(pool->used));
+
+ for (uint32_t i = 0; i < pool->mutable_states_count; i++)
+ pool->mutable_states[i].used = 0;
}
VkResult
@@ -349,23 +561,21 @@ vn_ResetDescriptorPool(VkDevice device,
VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags)
{
+ VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(descriptorPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
- vn_async_vkResetDescriptorPool(dev->instance, device, descriptorPool,
+ vn_async_vkResetDescriptorPool(dev->primary_ring, device, descriptorPool,
flags);
list_for_each_entry_safe(struct vn_descriptor_set, set,
- &pool->descriptor_sets, head) {
- list_del(&set->head);
-
- vn_object_base_fini(&set->base);
- vk_free(alloc, set);
- }
+ &pool->descriptor_sets, head)
+ vn_descriptor_set_destroy(dev, set, alloc);
- vn_descriptor_pool_reset_descriptors(pool);
+ if (pool->async_set_allocation)
+ vn_descriptor_pool_reset_descriptors(pool);
return VK_SUCCESS;
}
@@ -381,8 +591,6 @@ vn_AllocateDescriptorSets(VkDevice device,
struct vn_descriptor_pool *pool =
vn_descriptor_pool_from_handle(pAllocateInfo->descriptorPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
- const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_info =
- NULL;
VkResult result;
/* 14.2.3. Allocation of Descriptor Sets
@@ -390,18 +598,17 @@ vn_AllocateDescriptorSets(VkDevice device,
* If descriptorSetCount is zero or this structure is not included in
* the pNext chain, then the variable lengths are considered to be zero.
*/
- variable_info = vk_find_struct_const(
- pAllocateInfo->pNext,
- DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
-
+ const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_info =
+ vk_find_struct_const(
+ pAllocateInfo->pNext,
+ DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
if (variable_info && !variable_info->descriptorSetCount)
variable_info = NULL;
- for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
- const struct vn_descriptor_set_layout *layout =
+ uint32_t i = 0;
+ for (; i < pAllocateInfo->descriptorSetCount; i++) {
+ struct vn_descriptor_set_layout *layout =
vn_descriptor_set_layout_from_handle(pAllocateInfo->pSetLayouts[i]);
- uint32_t last_binding_descriptor_count = 0;
- struct vn_descriptor_set *set = NULL;
/* 14.2.3. Allocation of Descriptor Sets
*
@@ -409,6 +616,7 @@ vn_AllocateDescriptorSets(VkDevice device,
* variable count descriptor binding, then pDescriptorCounts[i] is
* ignored.
*/
+ uint32_t last_binding_descriptor_count = 0;
if (!layout->has_variable_descriptor_count) {
last_binding_descriptor_count =
layout->bindings[layout->last_binding].count;
@@ -416,19 +624,21 @@ vn_AllocateDescriptorSets(VkDevice device,
last_binding_descriptor_count = variable_info->pDescriptorCounts[i];
}
- if (!vn_descriptor_pool_alloc_descriptors(
+ if (pool->async_set_allocation &&
+ !vn_descriptor_pool_alloc_descriptors(
pool, layout, last_binding_descriptor_count)) {
- pDescriptorSets[i] = VK_NULL_HANDLE;
result = VK_ERROR_OUT_OF_POOL_MEMORY;
goto fail;
}
- set = vk_zalloc(alloc, sizeof(*set), VN_DEFAULT_ALIGN,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ struct vn_descriptor_set *set =
+ vk_zalloc(alloc, sizeof(*set), VN_DEFAULT_ALIGN,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set) {
- vn_descriptor_pool_free_descriptors(pool, layout,
- last_binding_descriptor_count);
- pDescriptorSets[i] = VK_NULL_HANDLE;
+ if (pool->async_set_allocation) {
+ vn_descriptor_pool_free_descriptors(
+ pool, layout, last_binding_descriptor_count);
+ }
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail;
}
@@ -436,20 +646,33 @@ vn_AllocateDescriptorSets(VkDevice device,
vn_object_base_init(&set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET,
&dev->base);
- set->layout = layout;
+ /* We might reorder vkCmdBindDescriptorSets after
+ * vkDestroyDescriptorSetLayout due to batching. The spec says
+ *
+ * VkDescriptorSetLayout objects may be accessed by commands that
+ * operate on descriptor sets allocated using that layout, and those
+ * descriptor sets must not be updated with vkUpdateDescriptorSets
+ * after the descriptor set layout has been destroyed. Otherwise, a
+ * VkDescriptorSetLayout object passed as a parameter to create
+ * another object is not further accessed by that object after the
+ * duration of the command it is passed into.
+ *
+ * It is ambiguous but the reordering is likely invalid. Let's keep the
+ * layout alive with the set to defer vkDestroyDescriptorSetLayout.
+ */
+ set->layout = vn_descriptor_set_layout_ref(dev, layout);
set->last_binding_descriptor_count = last_binding_descriptor_count;
list_addtail(&set->head, &pool->descriptor_sets);
- VkDescriptorSet set_handle = vn_descriptor_set_to_handle(set);
- pDescriptorSets[i] = set_handle;
+ pDescriptorSets[i] = vn_descriptor_set_to_handle(set);
}
if (pool->async_set_allocation) {
- vn_async_vkAllocateDescriptorSets(dev->instance, device, pAllocateInfo,
- pDescriptorSets);
+ vn_async_vkAllocateDescriptorSets(dev->primary_ring, device,
+ pAllocateInfo, pDescriptorSets);
} else {
result = vn_call_vkAllocateDescriptorSets(
- dev->instance, device, pAllocateInfo, pDescriptorSets);
+ dev->primary_ring, device, pAllocateInfo, pDescriptorSets);
if (result != VK_SUCCESS)
goto fail;
}
@@ -457,17 +680,16 @@ vn_AllocateDescriptorSets(VkDevice device,
return VK_SUCCESS;
fail:
- for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
+ for (uint32_t j = 0; j < i; j++) {
struct vn_descriptor_set *set =
- vn_descriptor_set_from_handle(pDescriptorSets[i]);
- if (!set)
- break;
+ vn_descriptor_set_from_handle(pDescriptorSets[j]);
- vn_descriptor_pool_free_descriptors(pool, set->layout,
- set->last_binding_descriptor_count);
- list_del(&set->head);
- vn_object_base_fini(&set->base);
- vk_free(alloc, set);
+ if (pool->async_set_allocation) {
+ vn_descriptor_pool_free_descriptors(
+ pool, set->layout, set->last_binding_descriptor_count);
+ }
+
+ vn_descriptor_set_destroy(dev, set, alloc);
}
memset(pDescriptorSets, 0,
@@ -487,7 +709,9 @@ vn_FreeDescriptorSets(VkDevice device,
vn_descriptor_pool_from_handle(descriptorPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
- vn_async_vkFreeDescriptorSets(dev->instance, device, descriptorPool,
+ assert(!pool->async_set_allocation);
+
+ vn_async_vkFreeDescriptorSets(dev->primary_ring, device, descriptorPool,
descriptorSetCount, pDescriptorSets);
for (uint32_t i = 0; i < descriptorSetCount; i++) {
@@ -497,52 +721,17 @@ vn_FreeDescriptorSets(VkDevice device,
if (!set)
continue;
- list_del(&set->head);
-
- vn_object_base_fini(&set->base);
- vk_free(alloc, set);
+ vn_descriptor_set_destroy(dev, set, alloc);
}
return VK_SUCCESS;
}
-static struct vn_update_descriptor_sets *
-vn_update_descriptor_sets_alloc(uint32_t write_count,
- uint32_t image_count,
- uint32_t buffer_count,
- uint32_t view_count,
- const VkAllocationCallbacks *alloc,
- VkSystemAllocationScope scope)
+uint32_t
+vn_descriptor_set_count_write_images(uint32_t write_count,
+ const VkWriteDescriptorSet *writes)
{
- const size_t writes_offset = sizeof(struct vn_update_descriptor_sets);
- const size_t images_offset =
- writes_offset + sizeof(VkWriteDescriptorSet) * write_count;
- const size_t buffers_offset =
- images_offset + sizeof(VkDescriptorImageInfo) * image_count;
- const size_t views_offset =
- buffers_offset + sizeof(VkDescriptorBufferInfo) * buffer_count;
- const size_t alloc_size = views_offset + sizeof(VkBufferView) * view_count;
-
- void *storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN, scope);
- if (!storage)
- return NULL;
-
- struct vn_update_descriptor_sets *update = storage;
- update->write_count = write_count;
- update->writes = storage + writes_offset;
- update->images = storage + images_offset;
- update->buffers = storage + buffers_offset;
- update->views = storage + views_offset;
-
- return update;
-}
-
-static struct vn_update_descriptor_sets *
-vn_update_descriptor_sets_parse_writes(uint32_t write_count,
- const VkWriteDescriptorSet *writes,
- const VkAllocationCallbacks *alloc)
-{
- uint32_t img_count = 0;
+ uint32_t img_info_count = 0;
for (uint32_t i = 0; i < write_count; i++) {
const VkWriteDescriptorSet *write = &writes[i];
switch (write->descriptorType) {
@@ -551,67 +740,58 @@ vn_update_descriptor_sets_parse_writes(uint32_t write_count,
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- img_count += write->descriptorCount;
+ img_info_count += write->descriptorCount;
break;
default:
break;
}
}
+ return img_info_count;
+}
- struct vn_update_descriptor_sets *update =
- vn_update_descriptor_sets_alloc(write_count, img_count, 0, 0, alloc,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (!update)
- return NULL;
+const VkWriteDescriptorSet *
+vn_descriptor_set_get_writes(uint32_t write_count,
+ const VkWriteDescriptorSet *writes,
+ VkPipelineLayout pipeline_layout_handle,
+ struct vn_descriptor_set_writes *local)
+{
+ const struct vn_pipeline_layout *pipeline_layout =
+ vn_pipeline_layout_from_handle(pipeline_layout_handle);
- /* the encoder does not ignore
- * VkWriteDescriptorSet::{pImageInfo,pBufferInfo,pTexelBufferView} when it
- * should
- *
- * TODO make the encoder smarter
- */
- memcpy(update->writes, writes, sizeof(*writes) * write_count);
- img_count = 0;
- for (uint32_t i = 0; i < write_count; i++) {
- const struct vn_descriptor_set *set =
- vn_descriptor_set_from_handle(writes[i].dstSet);
- const struct vn_descriptor_set_layout_binding *binding =
- &set->layout->bindings[writes[i].dstBinding];
- VkWriteDescriptorSet *write = &update->writes[i];
- VkDescriptorImageInfo *imgs = &update->images[img_count];
+ typed_memcpy(local->writes, writes, write_count);
+ uint32_t img_info_count = 0;
+ for (uint32_t i = 0; i < write_count; i++) {
+ const struct vn_descriptor_set_layout *set_layout =
+ pipeline_layout
+ ? pipeline_layout->push_descriptor_set_layout
+ : vn_descriptor_set_from_handle(writes[i].dstSet)->layout;
+ VkWriteDescriptorSet *write = &local->writes[i];
+ VkDescriptorImageInfo *img_infos = &local->img_infos[img_info_count];
+ bool ignore_sampler = true;
+ bool ignore_iview = false;
switch (write->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
+ ignore_iview = true;
+ FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ ignore_sampler =
+ set_layout->bindings[write->dstBinding].has_immutable_samplers;
+ FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- memcpy(imgs, write->pImageInfo,
- sizeof(*imgs) * write->descriptorCount);
- img_count += write->descriptorCount;
-
+ typed_memcpy(img_infos, write->pImageInfo, write->descriptorCount);
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- switch (write->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- imgs[j].imageView = VK_NULL_HANDLE;
- break;
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- if (binding->has_immutable_samplers)
- imgs[j].sampler = VK_NULL_HANDLE;
- break;
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- imgs[j].sampler = VK_NULL_HANDLE;
- break;
- default:
- break;
- }
+ if (ignore_sampler)
+ img_infos[j].sampler = VK_NULL_HANDLE;
+ if (ignore_iview)
+ img_infos[j].imageView = VK_NULL_HANDLE;
}
-
- write->pImageInfo = imgs;
+ write->pImageInfo = img_infos;
write->pBufferInfo = NULL;
write->pTexelBufferView = NULL;
+ img_info_count += write->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
@@ -625,6 +805,8 @@ vn_update_descriptor_sets_parse_writes(uint32_t write_count,
write->pImageInfo = NULL;
write->pTexelBufferView = NULL;
break;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
+ case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
default:
write->pImageInfo = NULL;
write->pBufferInfo = NULL;
@@ -632,8 +814,7 @@ vn_update_descriptor_sets_parse_writes(uint32_t write_count,
break;
}
}
-
- return update;
+ return local->writes;
}
void
@@ -644,120 +825,66 @@ vn_UpdateDescriptorSets(VkDevice device,
const VkCopyDescriptorSet *pDescriptorCopies)
{
struct vn_device *dev = vn_device_from_handle(device);
- const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
-
- struct vn_update_descriptor_sets *update =
- vn_update_descriptor_sets_parse_writes(descriptorWriteCount,
- pDescriptorWrites, alloc);
- if (!update) {
- /* TODO update one-by-one? */
- vn_log(dev->instance, "TODO descriptor set update ignored due to OOM");
- return;
- }
-
- vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
- update->writes, descriptorCopyCount,
- pDescriptorCopies);
-
- vk_free(alloc, update);
+ const uint32_t img_info_count = vn_descriptor_set_count_write_images(
+ descriptorWriteCount, pDescriptorWrites);
+
+ STACK_ARRAY(VkWriteDescriptorSet, writes, descriptorWriteCount);
+ STACK_ARRAY(VkDescriptorImageInfo, img_infos, img_info_count);
+ struct vn_descriptor_set_writes local = {
+ .writes = writes,
+ .img_infos = img_infos,
+ };
+ pDescriptorWrites = vn_descriptor_set_get_writes(
+ descriptorWriteCount, pDescriptorWrites, VK_NULL_HANDLE, &local);
+
+ vn_async_vkUpdateDescriptorSets(dev->primary_ring, device,
+ descriptorWriteCount, pDescriptorWrites,
+ descriptorCopyCount, pDescriptorCopies);
+
+ STACK_ARRAY_FINISH(writes);
+ STACK_ARRAY_FINISH(img_infos);
}
/* descriptor update template commands */
-static struct vn_update_descriptor_sets *
-vn_update_descriptor_sets_parse_template(
- const VkDescriptorUpdateTemplateCreateInfo *create_info,
- const VkAllocationCallbacks *alloc,
- struct vn_descriptor_update_template_entry *entries)
+static void
+vn_descriptor_update_template_init(
+ struct vn_descriptor_update_template *templ,
+ const VkDescriptorUpdateTemplateCreateInfo *create_info)
{
- uint32_t img_count = 0;
- uint32_t buf_count = 0;
- uint32_t view_count = 0;
+ templ->entry_count = create_info->descriptorUpdateEntryCount;
for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
const VkDescriptorUpdateTemplateEntry *entry =
&create_info->pDescriptorUpdateEntries[i];
-
+ templ->entries[i] = *entry;
switch (entry->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- img_count += entry->descriptorCount;
+ templ->img_info_count += entry->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- view_count += entry->descriptorCount;
+ templ->bview_count += entry->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- buf_count += entry->descriptorCount;
+ templ->buf_info_count += entry->descriptorCount;
break;
- default:
- unreachable("unhandled descriptor type");
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
+ templ->iub_count += 1;
break;
- }
- }
-
- struct vn_update_descriptor_sets *update = vn_update_descriptor_sets_alloc(
- create_info->descriptorUpdateEntryCount, img_count, buf_count,
- view_count, alloc, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!update)
- return NULL;
-
- img_count = 0;
- buf_count = 0;
- view_count = 0;
- for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
- const VkDescriptorUpdateTemplateEntry *entry =
- &create_info->pDescriptorUpdateEntries[i];
- VkWriteDescriptorSet *write = &update->writes[i];
-
- write->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- write->pNext = NULL;
- write->dstBinding = entry->dstBinding;
- write->dstArrayElement = entry->dstArrayElement;
- write->descriptorCount = entry->descriptorCount;
- write->descriptorType = entry->descriptorType;
-
- entries[i].offset = entry->offset;
- entries[i].stride = entry->stride;
-
- switch (entry->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- write->pImageInfo = &update->images[img_count];
- write->pBufferInfo = NULL;
- write->pTexelBufferView = NULL;
- img_count += entry->descriptorCount;
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- write->pImageInfo = NULL;
- write->pBufferInfo = NULL;
- write->pTexelBufferView = &update->views[view_count];
- view_count += entry->descriptorCount;
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- write->pImageInfo = NULL;
- write->pBufferInfo = &update->buffers[buf_count];
- write->pTexelBufferView = NULL;
- buf_count += entry->descriptorCount;
+ case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
break;
default:
+ unreachable("unhandled descriptor type");
break;
}
}
-
- return update;
}
VkResult
@@ -767,13 +894,14 @@ vn_CreateDescriptorUpdateTemplate(
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{
+ VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
const VkAllocationCallbacks *alloc =
pAllocator ? pAllocator : &dev->base.base.alloc;
const size_t templ_size =
offsetof(struct vn_descriptor_update_template,
- entries[pCreateInfo->descriptorUpdateEntryCount + 1]);
+ entries[pCreateInfo->descriptorUpdateEntryCount]);
struct vn_descriptor_update_template *templ = vk_zalloc(
alloc, templ_size, VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!templ)
@@ -782,19 +910,19 @@ vn_CreateDescriptorUpdateTemplate(
vn_object_base_init(&templ->base,
VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, &dev->base);
- templ->update = vn_update_descriptor_sets_parse_template(
- pCreateInfo, alloc, templ->entries);
- if (!templ->update) {
- vk_free(alloc, templ);
- return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (pCreateInfo->templateType ==
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
+ struct vn_pipeline_layout *pipeline_layout =
+ vn_pipeline_layout_from_handle(pCreateInfo->pipelineLayout);
+ templ->push.pipeline_bind_point = pCreateInfo->pipelineBindPoint;
+ templ->push.set_layout = pipeline_layout->push_descriptor_set_layout;
}
- mtx_init(&templ->mutex, mtx_plain);
+ vn_descriptor_update_template_init(templ, pCreateInfo);
/* no host object */
- VkDescriptorUpdateTemplate templ_handle =
+ *pDescriptorUpdateTemplate =
vn_descriptor_update_template_to_handle(templ);
- *pDescriptorUpdateTemplate = templ_handle;
return VK_SUCCESS;
}
@@ -805,6 +933,7 @@ vn_DestroyDescriptorUpdateTemplate(
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator)
{
+ VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
struct vn_descriptor_update_template *templ =
vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
@@ -815,92 +944,143 @@ vn_DestroyDescriptorUpdateTemplate(
return;
/* no host object */
- vk_free(alloc, templ->update);
- mtx_destroy(&templ->mutex);
-
vn_object_base_fini(&templ->base);
vk_free(alloc, templ);
}
void
-vn_UpdateDescriptorSetWithTemplate(
- VkDevice device,
- VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplate descriptorUpdateTemplate,
- const void *pData)
+vn_descriptor_set_fill_update_with_template(
+ struct vn_descriptor_update_template *templ,
+ VkDescriptorSet set_handle,
+ const uint8_t *data,
+ struct vn_descriptor_set_update *update)
{
- struct vn_device *dev = vn_device_from_handle(device);
- struct vn_descriptor_set *set =
- vn_descriptor_set_from_handle(descriptorSet);
- struct vn_descriptor_update_template *templ =
- vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
- struct vn_update_descriptor_sets *update = templ->update;
-
- /* duplicate update instead to avoid locking? */
- mtx_lock(&templ->mutex);
-
- for (uint32_t i = 0; i < update->write_count; i++) {
- const struct vn_descriptor_update_template_entry *entry =
- &templ->entries[i];
- const struct vn_descriptor_set_layout_binding *binding =
- &set->layout->bindings[update->writes[i].dstBinding];
- VkWriteDescriptorSet *write = &update->writes[i];
-
- write->dstSet = vn_descriptor_set_to_handle(set);
-
- switch (write->descriptorType) {
+ struct vn_descriptor_set *set = vn_descriptor_set_from_handle(set_handle);
+ const struct vn_descriptor_set_layout *set_layout =
+ templ->push.set_layout ? templ->push.set_layout : set->layout;
+
+ update->write_count = templ->entry_count;
+
+ uint32_t img_info_offset = 0;
+ uint32_t buf_info_offset = 0;
+ uint32_t bview_offset = 0;
+ uint32_t iub_offset = 0;
+ for (uint32_t i = 0; i < templ->entry_count; i++) {
+ const VkDescriptorUpdateTemplateEntry *entry = &templ->entries[i];
+ const uint8_t *ptr = data + entry->offset;
+ bool ignore_sampler = true;
+ bool ignore_iview = false;
+ VkDescriptorImageInfo *img_infos = NULL;
+ VkDescriptorBufferInfo *buf_infos = NULL;
+ VkBufferView *bview_handles = NULL;
+ VkWriteDescriptorSetInlineUniformBlock *iub = NULL;
+ switch (entry->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
+ ignore_iview = true;
+ FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ ignore_sampler =
+ set_layout->bindings[entry->dstBinding].has_immutable_samplers;
+ FALLTHROUGH;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- for (uint32_t j = 0; j < write->descriptorCount; j++) {
- const bool need_sampler =
- (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
- write->descriptorType ==
- VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
- !binding->has_immutable_samplers;
- const bool need_view =
- write->descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER;
- const VkDescriptorImageInfo *src =
- pData + entry->offset + entry->stride * j;
- VkDescriptorImageInfo *dst =
- (VkDescriptorImageInfo *)&write->pImageInfo[j];
-
- dst->sampler = need_sampler ? src->sampler : VK_NULL_HANDLE;
- dst->imageView = need_view ? src->imageView : VK_NULL_HANDLE;
- dst->imageLayout = src->imageLayout;
+ img_infos = &update->img_infos[img_info_offset];
+ for (uint32_t j = 0; j < entry->descriptorCount; j++) {
+ const VkDescriptorImageInfo *src = (const void *)ptr;
+ img_infos[j] = (VkDescriptorImageInfo){
+ .sampler = ignore_sampler ? VK_NULL_HANDLE : src->sampler,
+ .imageView = ignore_iview ? VK_NULL_HANDLE : src->imageView,
+ .imageLayout = src->imageLayout,
+ };
+ ptr += entry->stride;
}
+ img_info_offset += entry->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- for (uint32_t j = 0; j < write->descriptorCount; j++) {
- const VkBufferView *src =
- pData + entry->offset + entry->stride * j;
- VkBufferView *dst = (VkBufferView *)&write->pTexelBufferView[j];
- *dst = *src;
+ bview_handles = &update->bview_handles[bview_offset];
+ for (uint32_t j = 0; j < entry->descriptorCount; j++) {
+ bview_handles[j] = *(const VkBufferView *)ptr;
+ ptr += entry->stride;
}
+ bview_offset += entry->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- for (uint32_t j = 0; j < write->descriptorCount; j++) {
- const VkDescriptorBufferInfo *src =
- pData + entry->offset + entry->stride * j;
- VkDescriptorBufferInfo *dst =
- (VkDescriptorBufferInfo *)&write->pBufferInfo[j];
- *dst = *src;
+ buf_infos = &update->buf_infos[buf_info_offset];
+ for (uint32_t j = 0; j < entry->descriptorCount; j++) {
+ buf_infos[j] = *(const VkDescriptorBufferInfo *)ptr;
+ ptr += entry->stride;
}
+ buf_info_offset += entry->descriptorCount;
+ break;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
+ iub = &update->iubs[iub_offset];
+ *iub = (VkWriteDescriptorSetInlineUniformBlock){
+ .sType =
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK,
+ .dataSize = entry->descriptorCount,
+ .pData = (const void *)ptr,
+ };
+ iub_offset++;
+ break;
+ case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
break;
default:
unreachable("unhandled descriptor type");
break;
}
+ update->writes[i] = (VkWriteDescriptorSet){
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .pNext = iub,
+ .dstSet = set_handle,
+ .dstBinding = entry->dstBinding,
+ .dstArrayElement = entry->dstArrayElement,
+ .descriptorCount = entry->descriptorCount,
+ .descriptorType = entry->descriptorType,
+ .pImageInfo = img_infos,
+ .pBufferInfo = buf_infos,
+ .pTexelBufferView = bview_handles,
+ };
}
+}
- vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
- update->writes, 0, NULL);
+void
+vn_UpdateDescriptorSetWithTemplate(
+ VkDevice device,
+ VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const void *pData)
+{
+ struct vn_device *dev = vn_device_from_handle(device);
+ struct vn_descriptor_update_template *templ =
+ vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
- mtx_unlock(&templ->mutex);
+ STACK_ARRAY(VkWriteDescriptorSet, writes, templ->entry_count);
+ STACK_ARRAY(VkDescriptorImageInfo, img_infos, templ->img_info_count);
+ STACK_ARRAY(VkDescriptorBufferInfo, buf_infos, templ->buf_info_count);
+ STACK_ARRAY(VkBufferView, bview_handles, templ->bview_count);
+ STACK_ARRAY(VkWriteDescriptorSetInlineUniformBlock, iubs,
+ templ->iub_count);
+ struct vn_descriptor_set_update update = {
+ .writes = writes,
+ .img_infos = img_infos,
+ .buf_infos = buf_infos,
+ .bview_handles = bview_handles,
+ .iubs = iubs,
+ };
+ vn_descriptor_set_fill_update_with_template(templ, descriptorSet, pData,
+ &update);
+
+ vn_async_vkUpdateDescriptorSets(
+ dev->primary_ring, device, update.write_count, update.writes, 0, NULL);
+
+ STACK_ARRAY_FINISH(writes);
+ STACK_ARRAY_FINISH(img_infos);
+ STACK_ARRAY_FINISH(buf_infos);
+ STACK_ARRAY_FINISH(bview_handles);
+ STACK_ARRAY_FINISH(iubs);
}