summaryrefslogtreecommitdiff
path: root/src/vulkan
diff options
context:
space:
mode:
Diffstat (limited to 'src/vulkan')
-rw-r--r--src/vulkan/device-select-layer/VkLayer_MESA_device_select.json2
-rw-r--r--src/vulkan/device-select-layer/device_select_layer.c290
-rw-r--r--src/vulkan/device-select-layer/device_select_wayland.c6
-rw-r--r--src/vulkan/device-select-layer/device_select_x11.c10
-rw-r--r--src/vulkan/device-select-layer/meson.build12
-rw-r--r--src/vulkan/meson.build63
-rw-r--r--src/vulkan/overlay-layer/README52
-rw-r--r--src/vulkan/overlay-layer/README.rst100
-rw-r--r--src/vulkan/overlay-layer/VkLayer_MESA_overlay.json2
-rwxr-xr-xsrc/vulkan/overlay-layer/mesa-overlay-control.py16
-rw-r--r--src/vulkan/overlay-layer/meson.build6
-rw-r--r--src/vulkan/overlay-layer/overlay.cpp222
-rw-r--r--src/vulkan/overlay-layer/overlay_params.c18
-rw-r--r--src/vulkan/overlay-layer/overlay_params.h2
-rwxr-xr-xsrc/vulkan/registry/update-aliases.py191
-rw-r--r--src/vulkan/registry/vk.xml14160
-rw-r--r--src/vulkan/runtime/meson.build326
-rw-r--r--src/vulkan/runtime/rmv/vk_rmv_common.c144
-rw-r--r--src/vulkan/runtime/rmv/vk_rmv_common.h148
-rw-r--r--src/vulkan/runtime/rmv/vk_rmv_exporter.c1727
-rw-r--r--src/vulkan/runtime/rmv/vk_rmv_tokens.h304
-rw-r--r--src/vulkan/runtime/vk_acceleration_structure.c94
-rw-r--r--src/vulkan/runtime/vk_acceleration_structure.h (renamed from src/vulkan/wsi/wsi_common_win32.h)30
-rw-r--r--src/vulkan/runtime/vk_android.c361
-rw-r--r--src/vulkan/runtime/vk_android.h79
-rw-r--r--src/vulkan/runtime/vk_blend.c115
-rw-r--r--src/vulkan/runtime/vk_blend.h25
-rw-r--r--src/vulkan/runtime/vk_buffer.c145
-rw-r--r--src/vulkan/runtime/vk_buffer.h78
-rw-r--r--src/vulkan/runtime/vk_buffer_view.c83
-rw-r--r--src/vulkan/runtime/vk_buffer_view.h75
-rw-r--r--src/vulkan/runtime/vk_cmd_copy.c (renamed from src/vulkan/util/vk_cmd_copy.c)72
-rw-r--r--src/vulkan/runtime/vk_cmd_enqueue.c471
-rw-r--r--src/vulkan/runtime/vk_command_buffer.c372
-rw-r--r--src/vulkan/runtime/vk_command_buffer.h253
-rw-r--r--src/vulkan/runtime/vk_command_pool.c266
-rw-r--r--src/vulkan/runtime/vk_command_pool.h104
-rw-r--r--src/vulkan/runtime/vk_debug_report.c (renamed from src/vulkan/util/vk_debug_report.c)11
-rw-r--r--src/vulkan/runtime/vk_debug_report.h (renamed from src/vulkan/util/vk_debug_report.h)0
-rw-r--r--src/vulkan/runtime/vk_debug_utils.c459
-rw-r--r--src/vulkan/runtime/vk_debug_utils.h74
-rw-r--r--src/vulkan/runtime/vk_deferred_operation.c (renamed from src/vulkan/util/vk_deferred_operation.c)0
-rw-r--r--src/vulkan/runtime/vk_deferred_operation.h (renamed from src/vulkan/util/vk_deferred_operation.h)0
-rw-r--r--src/vulkan/runtime/vk_descriptor_set_layout.c96
-rw-r--r--src/vulkan/runtime/vk_descriptor_set_layout.h99
-rw-r--r--src/vulkan/runtime/vk_descriptor_update_template.c97
-rw-r--r--src/vulkan/runtime/vk_descriptor_update_template.h90
-rw-r--r--src/vulkan/runtime/vk_descriptors.c (renamed from src/vulkan/util/vk_descriptors.c)47
-rw-r--r--src/vulkan/runtime/vk_descriptors.h (renamed from src/vulkan/util/vk_descriptors.h)2
-rw-r--r--src/vulkan/runtime/vk_device.c812
-rw-r--r--src/vulkan/runtime/vk_device.h442
-rw-r--r--src/vulkan/runtime/vk_device_memory.c221
-rw-r--r--src/vulkan/runtime/vk_device_memory.h87
-rw-r--r--src/vulkan/runtime/vk_drm_syncobj.c449
-rw-r--r--src/vulkan/runtime/vk_drm_syncobj.h (renamed from src/vulkan/util/vk_physical_device.h)46
-rw-r--r--src/vulkan/runtime/vk_fence.c491
-rw-r--r--src/vulkan/runtime/vk_fence.h82
-rw-r--r--src/vulkan/runtime/vk_format_info_gen.py245
-rw-r--r--src/vulkan/runtime/vk_framebuffer.c83
-rw-r--r--src/vulkan/runtime/vk_framebuffer.h (renamed from src/vulkan/wsi/wsi_common_x11.h)43
-rw-r--r--src/vulkan/runtime/vk_graphics_state.c3280
-rw-r--r--src/vulkan/runtime/vk_graphics_state.h1274
-rw-r--r--src/vulkan/runtime/vk_image.c1040
-rw-r--r--src/vulkan/runtime/vk_image.h (renamed from src/vulkan/util/vk_image.h)163
-rw-r--r--src/vulkan/runtime/vk_instance.c644
-rw-r--r--src/vulkan/runtime/vk_instance.h253
-rw-r--r--src/vulkan/runtime/vk_limits.h99
-rw-r--r--src/vulkan/runtime/vk_log.c357
-rw-r--r--src/vulkan/runtime/vk_log.h97
-rw-r--r--src/vulkan/runtime/vk_meta.c592
-rw-r--r--src/vulkan/runtime/vk_meta.h299
-rw-r--r--src/vulkan/runtime/vk_meta_blit_resolve.c1013
-rw-r--r--src/vulkan/runtime/vk_meta_clear.c609
-rw-r--r--src/vulkan/runtime/vk_meta_draw_rects.c337
-rw-r--r--src/vulkan/runtime/vk_meta_private.h87
-rw-r--r--src/vulkan/runtime/vk_nir.c203
-rw-r--r--src/vulkan/runtime/vk_nir.h57
-rw-r--r--src/vulkan/runtime/vk_nir_convert_ycbcr.c459
-rw-r--r--src/vulkan/runtime/vk_nir_convert_ycbcr.h56
-rw-r--r--src/vulkan/runtime/vk_object.c (renamed from src/vulkan/util/vk_object.c)123
-rw-r--r--src/vulkan/runtime/vk_object.h (renamed from src/vulkan/util/vk_object.h)145
-rw-r--r--src/vulkan/runtime/vk_physical_device.c (renamed from src/vulkan/util/vk_physical_device.c)75
-rw-r--r--src/vulkan/runtime/vk_physical_device.h152
-rw-r--r--src/vulkan/runtime/vk_pipeline.c2186
-rw-r--r--src/vulkan/runtime/vk_pipeline.h210
-rw-r--r--src/vulkan/runtime/vk_pipeline_cache.c852
-rw-r--r--src/vulkan/runtime/vk_pipeline_cache.h314
-rw-r--r--src/vulkan/runtime/vk_pipeline_layout.c144
-rw-r--r--src/vulkan/runtime/vk_pipeline_layout.h118
-rw-r--r--src/vulkan/runtime/vk_query_pool.c101
-rw-r--r--src/vulkan/runtime/vk_query_pool.h (renamed from src/vulkan/util/vk_device.h)59
-rw-r--r--src/vulkan/runtime/vk_queue.c1339
-rw-r--r--src/vulkan/runtime/vk_queue.h250
-rw-r--r--src/vulkan/runtime/vk_render_pass.c2500
-rw-r--r--src/vulkan/runtime/vk_render_pass.h461
-rw-r--r--src/vulkan/runtime/vk_sampler.c169
-rw-r--r--src/vulkan/runtime/vk_sampler.h98
-rw-r--r--src/vulkan/runtime/vk_semaphore.c723
-rw-r--r--src/vulkan/runtime/vk_semaphore.h78
-rw-r--r--src/vulkan/runtime/vk_shader.c573
-rw-r--r--src/vulkan/runtime/vk_shader.h260
-rw-r--r--src/vulkan/runtime/vk_shader_module.c147
-rw-r--r--src/vulkan/runtime/vk_shader_module.h (renamed from src/vulkan/util/vk_shader_module.h)29
-rw-r--r--src/vulkan/runtime/vk_standard_sample_locations.c156
-rw-r--r--src/vulkan/runtime/vk_standard_sample_locations.h47
-rw-r--r--src/vulkan/runtime/vk_sync.c446
-rw-r--r--src/vulkan/runtime/vk_sync.h410
-rw-r--r--src/vulkan/runtime/vk_sync_binary.c141
-rw-r--r--src/vulkan/runtime/vk_sync_binary.h79
-rw-r--r--src/vulkan/runtime/vk_sync_dummy.c59
-rw-r--r--src/vulkan/runtime/vk_sync_dummy.h (renamed from src/vulkan/wsi/wsi_common_wayland.h)29
-rw-r--r--src/vulkan/runtime/vk_sync_timeline.c541
-rw-r--r--src/vulkan/runtime/vk_sync_timeline.h133
-rw-r--r--src/vulkan/runtime/vk_synchronization.c473
-rw-r--r--src/vulkan/runtime/vk_synchronization.h109
-rw-r--r--src/vulkan/runtime/vk_texcompress_astc.c637
-rw-r--r--src/vulkan/runtime/vk_texcompress_astc.h121
-rw-r--r--src/vulkan/runtime/vk_texcompress_etc2.c565
-rw-r--r--src/vulkan/runtime/vk_texcompress_etc2.h127
-rw-r--r--src/vulkan/runtime/vk_video.c2072
-rw-r--r--src/vulkan/runtime/vk_video.h348
-rw-r--r--src/vulkan/runtime/vk_ycbcr_conversion.c112
-rw-r--r--src/vulkan/runtime/vk_ycbcr_conversion.h55
-rw-r--r--src/vulkan/util/gen_enum_to_str.py303
-rw-r--r--src/vulkan/util/meson.build133
-rw-r--r--src/vulkan/util/vk_alloc.c2
-rw-r--r--src/vulkan/util/vk_alloc.h62
-rw-r--r--src/vulkan/util/vk_cmd_queue_gen.py386
-rw-r--r--src/vulkan/util/vk_commands_gen.py143
-rw-r--r--src/vulkan/util/vk_device.c261
-rw-r--r--src/vulkan/util/vk_dispatch_table_gen.py420
-rw-r--r--src/vulkan/util/vk_dispatch_trampolines_gen.py193
-rw-r--r--src/vulkan/util/vk_entrypoints.py147
-rw-r--r--src/vulkan/util/vk_entrypoints_gen.py126
-rw-r--r--src/vulkan/util/vk_extensions.py266
-rw-r--r--src/vulkan/util/vk_extensions_gen.py174
-rw-r--r--src/vulkan/util/vk_format.c520
-rw-r--r--src/vulkan/util/vk_format.h176
-rw-r--r--src/vulkan/util/vk_icd_gen.py16
-rw-r--r--src/vulkan/util/vk_image.c606
-rw-r--r--src/vulkan/util/vk_instance.c230
-rw-r--r--src/vulkan/util/vk_instance.h97
-rw-r--r--src/vulkan/util/vk_physical_device_features_gen.py473
-rw-r--r--src/vulkan/util/vk_physical_device_properties_gen.py332
-rw-r--r--src/vulkan/util/vk_render_pass.c291
-rw-r--r--src/vulkan/util/vk_shader_module.c77
-rw-r--r--src/vulkan/util/vk_struct_type_cast_gen.py116
-rw-r--r--src/vulkan/util/vk_synchronization_helpers_gen.py225
-rw-r--r--src/vulkan/util/vk_util.c6
-rw-r--r--src/vulkan/util/vk_util.h152
-rw-r--r--src/vulkan/vulkan-android.sym12
-rw-r--r--src/vulkan/vulkan-icd-android-symbols.txt4
-rw-r--r--src/vulkan/vulkan-icd-symbols.txt5
-rw-r--r--src/vulkan/vulkan.sym11
-rw-r--r--src/vulkan/vulkan_api.def.in4
-rw-r--r--src/vulkan/wsi/meson.build63
-rw-r--r--src/vulkan/wsi/wsi_common.c1833
-rw-r--r--src/vulkan/wsi/wsi_common.h218
-rw-r--r--src/vulkan/wsi/wsi_common_display.c1219
-rw-r--r--src/vulkan/wsi/wsi_common_display.h130
-rw-r--r--src/vulkan/wsi/wsi_common_drm.c1155
-rw-r--r--src/vulkan/wsi/wsi_common_drm.h38
-rw-r--r--src/vulkan/wsi/wsi_common_headless.c574
-rw-r--r--src/vulkan/wsi/wsi_common_private.h310
-rw-r--r--src/vulkan/wsi/wsi_common_queue.h8
-rw-r--r--src/vulkan/wsi/wsi_common_wayland.c2027
-rw-r--r--src/vulkan/wsi/wsi_common_win32.c680
-rw-r--r--src/vulkan/wsi/wsi_common_win32.cpp1013
-rw-r--r--src/vulkan/wsi/wsi_common_x11.c1897
169 files changed, 62267 insertions, 7952 deletions
diff --git a/src/vulkan/device-select-layer/VkLayer_MESA_device_select.json b/src/vulkan/device-select-layer/VkLayer_MESA_device_select.json
index 361ae9fe74e..86962ae5a05 100644
--- a/src/vulkan/device-select-layer/VkLayer_MESA_device_select.json
+++ b/src/vulkan/device-select-layer/VkLayer_MESA_device_select.json
@@ -4,7 +4,7 @@
"name": "VK_LAYER_MESA_device_select",
"type": "GLOBAL",
"library_path": "libVkLayer_MESA_device_select.so",
- "api_version": "1.2.73",
+ "api_version": "1.3.211",
"implementation_version": "1",
"description": "Linux device selection layer",
"functions": {
diff --git a/src/vulkan/device-select-layer/device_select_layer.c b/src/vulkan/device-select-layer/device_select_layer.c
index daed2d935c0..73bb4313437 100644
--- a/src/vulkan/device-select-layer/device_select_layer.c
+++ b/src/vulkan/device-select-layer/device_select_layer.c
@@ -30,6 +30,7 @@
*/
#include <vulkan/vk_layer.h>
+#include <vulkan/vulkan.h>
#include <assert.h>
#include <stdio.h>
@@ -38,17 +39,16 @@
#include <unistd.h>
#include "device_select.h"
-#include "c99_compat.h"
-#include "hash_table.h"
+#include "util/hash_table.h"
#include "vk_util.h"
-#include "c11/threads.h"
+#include "util/simple_mtx.h"
+#include "util/u_debug.h"
struct instance_info {
PFN_vkDestroyInstance DestroyInstance;
PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices;
PFN_vkEnumeratePhysicalDeviceGroups EnumeratePhysicalDeviceGroups;
PFN_vkGetInstanceProcAddr GetInstanceProcAddr;
- PFN_GetPhysicalDeviceProcAddr GetPhysicalDeviceProcAddr;
PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties;
PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties;
PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2;
@@ -57,46 +57,38 @@ struct instance_info {
};
static struct hash_table *device_select_instance_ht = NULL;
-static mtx_t device_select_mutex;
-
-static once_flag device_select_is_init = ONCE_FLAG_INIT;
-
-static void device_select_once_init(void) {
- mtx_init(&device_select_mutex, mtx_plain);
-}
+static simple_mtx_t device_select_mutex = SIMPLE_MTX_INITIALIZER;
static void
device_select_init_instances(void)
{
- call_once(&device_select_is_init, device_select_once_init);
-
- mtx_lock(&device_select_mutex);
+ simple_mtx_lock(&device_select_mutex);
if (!device_select_instance_ht)
device_select_instance_ht = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
- mtx_unlock(&device_select_mutex);
+ simple_mtx_unlock(&device_select_mutex);
}
static void
device_select_try_free_ht(void)
{
- mtx_lock(&device_select_mutex);
+ simple_mtx_lock(&device_select_mutex);
if (device_select_instance_ht) {
if (_mesa_hash_table_num_entries(device_select_instance_ht) == 0) {
_mesa_hash_table_destroy(device_select_instance_ht, NULL);
device_select_instance_ht = NULL;
}
}
- mtx_unlock(&device_select_mutex);
+ simple_mtx_unlock(&device_select_mutex);
}
static void
device_select_layer_add_instance(VkInstance instance, struct instance_info *info)
{
device_select_init_instances();
- mtx_lock(&device_select_mutex);
+ simple_mtx_lock(&device_select_mutex);
_mesa_hash_table_insert(device_select_instance_ht, instance, info);
- mtx_unlock(&device_select_mutex);
+ simple_mtx_unlock(&device_select_mutex);
}
static struct instance_info *
@@ -104,20 +96,20 @@ device_select_layer_get_instance(VkInstance instance)
{
struct hash_entry *entry;
struct instance_info *info = NULL;
- mtx_lock(&device_select_mutex);
+ simple_mtx_lock(&device_select_mutex);
entry = _mesa_hash_table_search(device_select_instance_ht, (void *)instance);
if (entry)
info = (struct instance_info *)entry->data;
- mtx_unlock(&device_select_mutex);
+ simple_mtx_unlock(&device_select_mutex);
return info;
}
static void
device_select_layer_remove_instance(VkInstance instance)
{
- mtx_lock(&device_select_mutex);
+ simple_mtx_lock(&device_select_mutex);
_mesa_hash_table_remove_key(device_select_instance_ht, instance);
- mtx_unlock(&device_select_mutex);
+ simple_mtx_unlock(&device_select_mutex);
device_select_try_free_ht();
}
@@ -168,7 +160,6 @@ static VkResult device_select_CreateInstance(const VkInstanceCreateInfo *pCreate
info->has_vulkan11 = pCreateInfo->pApplicationInfo &&
pCreateInfo->pApplicationInfo->apiVersion >= VK_MAKE_VERSION(1, 1, 0);
- info->GetPhysicalDeviceProcAddr = (PFN_GetPhysicalDeviceProcAddr)info->GetInstanceProcAddr(*pInstance, "vk_layerGetPhysicalDeviceProcAddr");
#define DEVSEL_GET_CB(func) info->func = (PFN_vk##func)info->GetInstanceProcAddr(*pInstance, "vk" #func)
DEVSEL_GET_CB(DestroyInstance);
DEVSEL_GET_CB(EnumeratePhysicalDevices);
@@ -207,8 +198,8 @@ static void print_gpu(const struct instance_info *info, unsigned index, VkPhysic
VkPhysicalDevicePCIBusInfoPropertiesEXT ext_pci_properties = (VkPhysicalDevicePCIBusInfoPropertiesEXT) {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT
};
- VkPhysicalDeviceProperties2KHR properties = (VkPhysicalDeviceProperties2KHR){
- .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR
+ VkPhysicalDeviceProperties2 properties = (VkPhysicalDeviceProperties2){
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2
};
if (info->has_vulkan11 && info->has_pci_bus)
properties.pNext = &ext_pci_properties;
@@ -234,7 +225,7 @@ static void print_gpu(const struct instance_info *info, unsigned index, VkPhysic
}
fprintf(stderr, " GPU %d: %x:%x \"%s\" %s", index, properties.properties.vendorID,
properties.properties.deviceID, properties.properties.deviceName, type);
- if (info->has_pci_bus)
+ if (info->has_vulkan11 && info->has_pci_bus)
fprintf(stderr, " %04x:%02x:%02x.%x", ext_pci_properties.pciDomain,
ext_pci_properties.pciBus, ext_pci_properties.pciDevice,
ext_pci_properties.pciFunction);
@@ -249,8 +240,8 @@ static bool fill_drm_device_info(const struct instance_info *info,
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT
};
- VkPhysicalDeviceProperties2KHR properties = (VkPhysicalDeviceProperties2KHR){
- .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR
+ VkPhysicalDeviceProperties2 properties = (VkPhysicalDeviceProperties2){
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2
};
if (info->has_vulkan11 && info->has_pci_bus)
@@ -260,7 +251,7 @@ static bool fill_drm_device_info(const struct instance_info *info,
drm_device->cpu_device = properties.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_CPU;
drm_device->dev_info.vendor_id = properties.properties.vendorID;
drm_device->dev_info.device_id = properties.properties.deviceID;
- if (info->has_pci_bus) {
+ if (info->has_vulkan11 && info->has_pci_bus) {
drm_device->has_bus_info = true;
drm_device->bus_info.domain = ext_pci_properties.pciDomain;
drm_device->bus_info.bus = ext_pci_properties.pciBus;
@@ -293,6 +284,15 @@ static int device_select_find_dri_prime_tag_default(struct device_pci_info *pci_
const char *dri_prime)
{
int default_idx = -1;
+
+ /* Drop the trailing '!' if present. */
+ int ref = strlen("pci-xxxx_yy_zz_w");
+ int n = strlen(dri_prime);
+ if (n < ref)
+ return default_idx;
+ if (n == ref + 1 && dri_prime[n - 1] == '!')
+ n--;
+
for (unsigned i = 0; i < device_count; ++i) {
char *tag = NULL;
if (asprintf(&tag, "pci-%04x_%02x_%02x_%1u",
@@ -300,7 +300,7 @@ static int device_select_find_dri_prime_tag_default(struct device_pci_info *pci_
pci_infos[i].bus_info.bus,
pci_infos[i].bus_info.dev,
pci_infos[i].bus_info.func) >= 0) {
- if (strcmp(dri_prime, tag))
+ if (strncmp(dri_prime, tag, n) == 0)
default_idx = i;
}
free(tag);
@@ -308,6 +308,63 @@ static int device_select_find_dri_prime_tag_default(struct device_pci_info *pci_
return default_idx;
}
+static int device_select_find_boot_vga_vid_did(struct device_pci_info *pci_infos,
+ uint32_t device_count)
+{
+ char path[1024];
+ int fd;
+ int default_idx = -1;
+ uint8_t boot_vga = 0;
+ ssize_t size_ret;
+ #pragma pack(push, 1)
+ struct id {
+ uint16_t vid;
+ uint16_t did;
+ }id;
+ #pragma pack(pop)
+
+ for (unsigned i = 0; i < 64; i++) {
+ snprintf(path, 1023, "/sys/class/drm/card%d/device/boot_vga", i);
+ fd = open(path, O_RDONLY);
+ if (fd != -1) {
+ uint8_t val;
+ size_ret = read(fd, &val, 1);
+ close(fd);
+ if (size_ret == 1 && val == '1')
+ boot_vga = 1;
+ } else {
+ return default_idx;
+ }
+
+ if (boot_vga) {
+ snprintf(path, 1023, "/sys/class/drm/card%d/device/config", i);
+ fd = open(path, O_RDONLY);
+ if (fd != -1) {
+ size_ret = read(fd, &id, 4);
+ close(fd);
+ if (size_ret != 4)
+ return default_idx;
+ } else {
+ return default_idx;
+ }
+ break;
+ }
+ }
+
+ if (!boot_vga)
+ return default_idx;
+
+ for (unsigned i = 0; i < device_count; ++i) {
+ if (id.vid == pci_infos[i].dev_info.vendor_id &&
+ id.did == pci_infos[i].dev_info.device_id) {
+ default_idx = i;
+ break;
+ }
+ }
+
+ return default_idx;
+}
+
static int device_select_find_boot_vga_default(struct device_pci_info *pci_infos,
uint32_t device_count)
{
@@ -349,32 +406,50 @@ static int device_select_find_non_cpu(struct device_pci_info *pci_infos,
static int find_non_cpu_skip(struct device_pci_info *pci_infos,
uint32_t device_count,
- int skip_idx)
+ int skip_idx,
+ int skip_count)
{
for (unsigned i = 0; i < device_count; ++i) {
if (i == skip_idx)
continue;
if (pci_infos[i].cpu_device)
continue;
+ skip_count--;
+ if (skip_count > 0)
+ continue;
+
return i;
}
return -1;
}
+static bool should_debug_device_selection() {
+ return debug_get_bool_option("MESA_VK_DEVICE_SELECT_DEBUG", false) ||
+ debug_get_bool_option("DRI_PRIME_DEBUG", false);
+}
+
+static bool ends_with_exclamation_mark(const char *str) {
+ size_t n = strlen(str);
+ return n > 1 && str[n - 1] == '!';
+}
+
static uint32_t get_default_device(const struct instance_info *info,
const char *selection,
uint32_t physical_device_count,
- VkPhysicalDevice *pPhysicalDevices)
+ VkPhysicalDevice *pPhysicalDevices,
+ bool *expose_only_one_dev)
{
int default_idx = -1;
const char *dri_prime = getenv("DRI_PRIME");
- bool dri_prime_is_one = false;
+ bool debug = should_debug_device_selection();
+ int dri_prime_as_int = -1;
int cpu_count = 0;
- if (dri_prime && !strcmp(dri_prime, "1"))
- dri_prime_is_one = true;
+ if (dri_prime) {
+ if (strchr(dri_prime, ':') == NULL)
+ dri_prime_as_int = atoi(dri_prime);
- if (dri_prime && !dri_prime_is_one && !info->has_pci_bus) {
- fprintf(stderr, "device-select: cannot correctly use DRI_PRIME tag\n");
+ if (dri_prime_as_int < 0)
+ dri_prime_as_int = 0;
}
struct device_pci_info *pci_infos = (struct device_pci_info *)calloc(physical_device_count, sizeof(struct device_pci_info));
@@ -387,21 +462,80 @@ static uint32_t get_default_device(const struct instance_info *info,
if (selection)
default_idx = device_select_find_explicit_default(pci_infos, physical_device_count, selection);
- if (default_idx == -1 && info->has_pci_bus && dri_prime && !dri_prime_is_one)
- default_idx = device_select_find_dri_prime_tag_default(pci_infos, physical_device_count, dri_prime);
- if (default_idx == -1 && info->has_wayland)
+ if (default_idx != -1) {
+ *expose_only_one_dev = ends_with_exclamation_mark(selection);
+ }
+
+ if (default_idx == -1 && dri_prime && dri_prime_as_int == 0) {
+ /* Try DRI_PRIME=vendor_id:device_id */
+ default_idx = device_select_find_explicit_default(pci_infos, physical_device_count, dri_prime);
+ if (default_idx != -1) {
+ if (debug)
+ fprintf(stderr, "device-select: device_select_find_explicit_default selected %i\n", default_idx);
+ *expose_only_one_dev = ends_with_exclamation_mark(dri_prime);
+ }
+
+ if (default_idx == -1) {
+ /* Try DRI_PRIME=pci-xxxx_yy_zz_w */
+ if (!info->has_vulkan11 && !info->has_pci_bus)
+ fprintf(stderr, "device-select: cannot correctly use DRI_PRIME tag\n");
+ else
+ default_idx = device_select_find_dri_prime_tag_default(pci_infos, physical_device_count, dri_prime);
+
+ if (default_idx != -1) {
+ if (debug)
+ fprintf(stderr, "device-select: device_select_find_dri_prime_tag_default selected %i\n", default_idx);
+ *expose_only_one_dev = ends_with_exclamation_mark(dri_prime);
+ }
+ }
+ }
+ if (default_idx == -1 && info->has_wayland) {
default_idx = device_select_find_wayland_pci_default(pci_infos, physical_device_count);
- if (default_idx == -1 && info->has_xcb)
+ if (debug && default_idx != -1)
+ fprintf(stderr, "device-select: device_select_find_wayland_pci_default selected %i\n", default_idx);
+ }
+ if (default_idx == -1 && info->has_xcb) {
default_idx = device_select_find_xcb_pci_default(pci_infos, physical_device_count);
- if (default_idx == -1 && info->has_pci_bus)
- default_idx = device_select_find_boot_vga_default(pci_infos, physical_device_count);
- if (default_idx == -1 && cpu_count)
+ if (debug && default_idx != -1)
+ fprintf(stderr, "device-select: device_select_find_xcb_pci_default selected %i\n", default_idx);
+ }
+ if (default_idx == -1) {
+ if (info->has_vulkan11 && info->has_pci_bus)
+ default_idx = device_select_find_boot_vga_default(pci_infos, physical_device_count);
+ else
+ default_idx = device_select_find_boot_vga_vid_did(pci_infos, physical_device_count);
+ if (debug && default_idx != -1)
+ fprintf(stderr, "device-select: device_select_find_boot_vga selected %i\n", default_idx);
+ }
+ if (default_idx == -1 && cpu_count) {
default_idx = device_select_find_non_cpu(pci_infos, physical_device_count);
-
- /* DRI_PRIME=1 handling - pick any other device than default. */
- if (default_idx != -1 && dri_prime_is_one && physical_device_count > (cpu_count + 1)) {
- if (default_idx == 0 || default_idx == 1)
- default_idx = find_non_cpu_skip(pci_infos, physical_device_count, default_idx);
+ if (debug && default_idx != -1)
+ fprintf(stderr, "device-select: device_select_find_non_cpu selected %i\n", default_idx);
+ }
+ /* If no GPU has been selected so far, select the first non-CPU device. If none are available,
+ * pick the first CPU device.
+ */
+ if (default_idx == -1) {
+ default_idx = device_select_find_non_cpu(pci_infos, physical_device_count);
+ if (default_idx != -1) {
+ if (debug)
+ fprintf(stderr, "device-select: device_select_find_non_cpu selected %i\n", default_idx);
+ } else if (cpu_count) {
+ default_idx = 0;
+ }
+ }
+ /* DRI_PRIME=n handling - pick any other device than default. */
+ if (dri_prime_as_int > 0 && debug)
+ fprintf(stderr, "device-select: DRI_PRIME=%d, default_idx so far: %i\n", dri_prime_as_int, default_idx);
+ if (dri_prime_as_int > 0 && physical_device_count > (cpu_count + 1)) {
+ if (default_idx == 0 || default_idx == 1) {
+ default_idx = find_non_cpu_skip(pci_infos, physical_device_count, default_idx, dri_prime_as_int);
+ if (default_idx != -1) {
+ if (debug)
+ fprintf(stderr, "device-select: find_non_cpu_skip selected %i\n", default_idx);
+ *expose_only_one_dev = ends_with_exclamation_mark(dri_prime);
+ }
+ }
}
free(pci_infos);
return default_idx == -1 ? 0 : default_idx;
@@ -415,8 +549,9 @@ static VkResult device_select_EnumeratePhysicalDevices(VkInstance instance,
uint32_t physical_device_count = 0;
uint32_t selected_physical_device_count = 0;
const char* selection = getenv("MESA_VK_DEVICE_SELECT");
+ bool expose_only_one_dev = false;
VkResult result = info->EnumeratePhysicalDevices(instance, &physical_device_count, NULL);
- VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
+ VK_OUTARRAY_MAKE_TYPED(VkPhysicalDevice, out, pPhysicalDevices, pPhysicalDeviceCount);
if (result != VK_SUCCESS)
return result;
@@ -447,19 +582,22 @@ static VkResult device_select_EnumeratePhysicalDevices(VkInstance instance,
free(extensions);
}
}
- if (selection && strcmp(selection, "list") == 0) {
+ if (should_debug_device_selection() || (selection && strcmp(selection, "list") == 0)) {
fprintf(stderr, "selectable devices:\n");
for (unsigned i = 0; i < physical_device_count; ++i)
print_gpu(info, i, physical_devices[i]);
- exit(0);
- } else {
- unsigned selected_index = get_default_device(info, selection, physical_device_count, physical_devices);
- selected_physical_device_count = physical_device_count;
- selected_physical_devices[0] = physical_devices[selected_index];
- for (unsigned i = 0; i < physical_device_count - 1; ++i) {
- unsigned this_idx = i < selected_index ? i : i + 1;
- selected_physical_devices[i + 1] = physical_devices[this_idx];
- }
+
+ if (selection && strcmp(selection, "list") == 0)
+ exit(0);
+ }
+
+ unsigned selected_index = get_default_device(info, selection, physical_device_count,
+ physical_devices, &expose_only_one_dev);
+ selected_physical_device_count = physical_device_count;
+ selected_physical_devices[0] = physical_devices[selected_index];
+ for (unsigned i = 0; i < physical_device_count - 1; ++i) {
+ unsigned this_idx = i < selected_index ? i : i + 1;
+ selected_physical_devices[i + 1] = physical_devices[this_idx];
}
if (selected_physical_device_count == 0) {
@@ -468,8 +606,16 @@ static VkResult device_select_EnumeratePhysicalDevices(VkInstance instance,
assert(result == VK_SUCCESS);
+ /* do not give multiple device option to app if force default device */
+ const char *force_default_device = getenv("MESA_VK_DEVICE_SELECT_FORCE_DEFAULT_DEVICE");
+ if (force_default_device && !strcmp(force_default_device, "1") && selected_physical_device_count != 0)
+ expose_only_one_dev = true;
+
+ if (expose_only_one_dev)
+ selected_physical_device_count = 1;
+
for (unsigned i = 0; i < selected_physical_device_count; i++) {
- vk_outarray_append(&out, ent) {
+ vk_outarray_append_typed(VkPhysicalDevice, &out, ent) {
*ent = selected_physical_devices[i];
}
}
@@ -488,7 +634,7 @@ static VkResult device_select_EnumeratePhysicalDeviceGroups(VkInstance instance,
uint32_t physical_device_group_count = 0;
uint32_t selected_physical_device_group_count = 0;
VkResult result = info->EnumeratePhysicalDeviceGroups(instance, &physical_device_group_count, NULL);
- VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroups, pPhysicalDeviceGroupCount);
+ VK_OUTARRAY_MAKE_TYPED(VkPhysicalDeviceGroupProperties, out, pPhysicalDeviceGroups, pPhysicalDeviceGroupCount);
if (result != VK_SUCCESS)
return result;
@@ -501,6 +647,9 @@ static VkResult device_select_EnumeratePhysicalDeviceGroups(VkInstance instance,
goto out;
}
+ for (unsigned i = 0; i < physical_device_group_count; i++)
+ physical_device_groups[i].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES;
+
result = info->EnumeratePhysicalDeviceGroups(instance, &physical_device_group_count, physical_device_groups);
if (result != VK_SUCCESS)
goto out;
@@ -513,8 +662,8 @@ static VkResult device_select_EnumeratePhysicalDeviceGroups(VkInstance instance,
bool group_has_cpu_device = false;
for (unsigned j = 0; j < physical_device_groups[i].physicalDeviceCount; j++) {
VkPhysicalDevice physical_device = physical_device_groups[i].physicalDevices[j];
- VkPhysicalDeviceProperties2KHR properties = (VkPhysicalDeviceProperties2KHR){
- .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR
+ VkPhysicalDeviceProperties2 properties = (VkPhysicalDeviceProperties2){
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2
};
info->GetPhysicalDeviceProperties(physical_device, &properties.properties);
group_has_cpu_device = properties.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_CPU;
@@ -532,7 +681,7 @@ static VkResult device_select_EnumeratePhysicalDeviceGroups(VkInstance instance,
assert(result == VK_SUCCESS);
for (unsigned i = 0; i < selected_physical_device_group_count; i++) {
- vk_outarray_append(&out, ent) {
+ vk_outarray_append_typed(VkPhysicalDeviceGroupProperties, &out, ent) {
*ent = selected_physical_device_groups[i];
}
}
@@ -543,12 +692,6 @@ out:
return result;
}
-static void (*get_pdevice_proc_addr(VkInstance instance, const char* name))()
-{
- struct instance_info *info = device_select_layer_get_instance(instance);
- return info->GetPhysicalDeviceProcAddr(instance, name);
-}
-
static void (*get_instance_proc_addr(VkInstance instance, const char* name))()
{
if (strcmp(name, "vkGetInstanceProcAddr") == 0)
@@ -566,14 +709,13 @@ static void (*get_instance_proc_addr(VkInstance instance, const char* name))()
return info->GetInstanceProcAddr(instance, name);
}
-VK_LAYER_EXPORT VkResult vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct)
+PUBLIC VkResult vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct)
{
if (pVersionStruct->loaderLayerInterfaceVersion < 2)
return VK_ERROR_INITIALIZATION_FAILED;
pVersionStruct->loaderLayerInterfaceVersion = 2;
pVersionStruct->pfnGetInstanceProcAddr = get_instance_proc_addr;
- pVersionStruct->pfnGetPhysicalDeviceProcAddr = get_pdevice_proc_addr;
return VK_SUCCESS;
}
diff --git a/src/vulkan/device-select-layer/device_select_wayland.c b/src/vulkan/device-select-layer/device_select_wayland.c
index 161310b0292..a744cdbfb75 100644
--- a/src/vulkan/device-select-layer/device_select_wayland.c
+++ b/src/vulkan/device-select-layer/device_select_wayland.c
@@ -20,7 +20,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#include "macros.h"
+#include "util/macros.h"
#include <wayland-client.h>
#include "wayland-drm-client-protocol.h"
#include "device_select.h"
@@ -83,7 +83,7 @@ device_select_registry_global(void *data, struct wl_registry *registry, uint32_t
const char *interface, uint32_t version)
{
struct device_select_wayland_info *info = data;
- if (strcmp(interface, "wl_drm") == 0) {
+ if (strcmp(interface, wl_drm_interface.name) == 0) {
info->wl_drm = wl_registry_bind(registry, name, &wl_drm_interface, MIN2(version, 2));
wl_drm_add_listener(info->wl_drm, &ds_drm_listener, data);
}
@@ -137,6 +137,8 @@ int device_select_find_wayland_pci_default(struct device_pci_info *devices, uint
if (default_idx != -1)
break;
}
+
+ drmFreeDevice(&info.dev_info);
}
if (info.wl_drm)
diff --git a/src/vulkan/device-select-layer/device_select_x11.c b/src/vulkan/device-select-layer/device_select_x11.c
index 93b39f269a4..dbfd622cd78 100644
--- a/src/vulkan/device-select-layer/device_select_x11.c
+++ b/src/vulkan/device-select-layer/device_select_x11.c
@@ -67,12 +67,14 @@ int device_select_find_xcb_pci_default(struct device_pci_info *devices, uint32_t
int scrn;
xcb_connection_t *conn;
int default_idx = -1;
+ drmDevicePtr xdev = NULL;
+
conn = xcb_connect(NULL, &scrn);
if (!conn)
return -1;
xcb_query_extension_cookie_t dri3_cookie;
- xcb_query_extension_reply_t *dri3_reply;
+ xcb_query_extension_reply_t *dri3_reply = NULL;
dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
@@ -82,6 +84,7 @@ int device_select_find_xcb_pci_default(struct device_pci_info *devices, uint32_t
if (dri3_reply->present == 0)
goto out;
+
setup = xcb_get_setup(conn);
iter = xcb_setup_roots_iterator(setup);
@@ -91,8 +94,8 @@ int device_select_find_xcb_pci_default(struct device_pci_info *devices, uint32_t
if (dri3_fd == -1)
goto out;
- drmDevicePtr xdev;
int ret = drmGetDevice2(dri3_fd, 0, &xdev);
+ close(dri3_fd);
if (ret < 0)
goto out;
@@ -112,7 +115,10 @@ int device_select_find_xcb_pci_default(struct device_pci_info *devices, uint32_t
if (default_idx != -1)
break;
}
+
out:
+ free(dri3_reply);
+ drmFreeDevice(&xdev); /* Is NULL pointer safe. */
xcb_disconnect(conn);
return default_idx;
}
diff --git a/src/vulkan/device-select-layer/meson.build b/src/vulkan/device-select-layer/meson.build
index c1336b27c3f..e2da2cac034 100644
--- a/src/vulkan/device-select-layer/meson.build
+++ b/src/vulkan/device-select-layer/meson.build
@@ -28,14 +28,12 @@ vklayer_flags = []
if with_platform_x11
vklayer_files += files('device_select_x11.c')
vklayer_deps += dep_xcb_dri3
- vklayer_flags += [ '-DVK_USE_PLATFORM_XCB_KHR' ]
endif
if with_platform_wayland
vklayer_files += files('device_select_wayland.c')
vklayer_files += [ wayland_drm_client_protocol_h, wayland_drm_protocol_c ]
- vklayer_deps += [dep_wayland_client, dep_wl_protocols]
- vklayer_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
+ vklayer_deps += dep_wayland_client
endif
vklayer_mesa_device_select = shared_library(
@@ -43,8 +41,11 @@ vklayer_mesa_device_select = shared_library(
vklayer_files,
c_args : [no_override_init_args, vklayer_flags],
gnu_symbol_visibility : 'hidden',
- dependencies : [idep_vulkan_util, idep_mesautil, vklayer_deps, dep_libdrm, dep_dl],
- include_directories : [inc_include, inc_util, inc_src, inc_vulkan_wsi],
+ dependencies : [
+ idep_vulkan_util, idep_vulkan_wsi_headers, idep_mesautil, vklayer_deps,
+ dep_libdrm, dep_dl
+ ],
+ include_directories : [inc_include, inc_util, inc_src],
link_args : cc.get_supported_link_arguments(['-Wl,-Bsymbolic-functions', '-Wl,-z,relro']),
install : true
)
@@ -52,4 +53,5 @@ vklayer_mesa_device_select = shared_library(
install_data(
files('VkLayer_MESA_device_select.json'),
install_dir : join_paths(get_option('datadir'), 'vulkan', 'implicit_layer.d'),
+ install_tag : 'runtime',
)
diff --git a/src/vulkan/meson.build b/src/vulkan/meson.build
index 4590cfe4070..a94ed1af38c 100644
--- a/src/vulkan/meson.build
+++ b/src/vulkan/meson.build
@@ -19,50 +19,85 @@
# SOFTWARE.
vk_api_xml = files('registry/vk.xml')
-vulkan_icd_symbols = files('vulkan-icd-symbols.txt')
-inc_vulkan_wsi = include_directories('wsi')
+vulkan_icd_link_args = []
+vulkan_icd_link_depends = []
+
+if with_platform_android
+ vulkan_icd_symbols = files('vulkan-icd-android-symbols.txt')
+ if with_ld_version_script
+ vulkan_icd_link_args += ['-Wl,--version-script', join_paths(meson.current_source_dir(), 'vulkan-android.sym')]
+ vulkan_icd_link_depends += files('vulkan-android.sym')
+ endif
+else
+ vulkan_icd_symbols = files('vulkan-icd-symbols.txt')
+ if with_ld_version_script
+ vulkan_icd_link_args += ['-Wl,--version-script', join_paths(meson.current_source_dir(), 'vulkan.sym')]
+ vulkan_icd_link_depends += files('vulkan.sym')
+ endif
+endif
+
inc_vulkan_util = include_directories('util')
-vulkan_wsi_args = []
vulkan_wsi_deps = []
vulkan_wsi_list = []
+vulkan_api_def = custom_target(
+ 'vulkan_api.def',
+ input: 'vulkan_api.def.in',
+ output : 'vulkan_api.def',
+ command : gen_vs_module_defs_normal_command,
+)
+
if with_platform_x11
- vulkan_wsi_args += ['-DVK_USE_PLATFORM_XCB_KHR', '-DVK_USE_PLATFORM_XLIB_KHR']
vulkan_wsi_deps += [
dep_xcb,
dep_x11_xcb,
dep_xcb_dri3,
dep_xcb_present,
+ dep_xcb_xfixes,
dep_xcb_sync,
dep_xcb_xrandr,
dep_xcb_shm,
dep_xshmfence,
]
- vulkan_wsi_list += ['xcb', 'x11']
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_XCB_KHR'
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_XLIB_KHR'
+ if with_xcb_keysyms
+ vulkan_wsi_deps += dep_xcb_keysyms
+ endif
endif
if with_platform_wayland
- vulkan_wsi_args += ['-DVK_USE_PLATFORM_WAYLAND_KHR']
vulkan_wsi_deps += dep_wayland_client
- vulkan_wsi_list += ['wayland']
-endif
-if with_platform_windows
- vulkan_wsi_args += ['-DVK_USE_PLATFORM_WIN32_KHR']
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_WAYLAND_KHR'
endif
if system_has_kms_drm and not with_platform_android
- vulkan_wsi_args += '-DVK_USE_PLATFORM_DISPLAY_KHR'
vulkan_wsi_deps += [dep_libdrm]
- vulkan_wsi_list += ['drm']
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_DISPLAY_KHR'
endif
if with_xlib_lease
- vulkan_wsi_args += '-DVK_USE_PLATFORM_XLIB_XRANDR_EXT'
vulkan_wsi_deps += [dep_xlib_xrandr]
- vulkan_wsi_list += ['xlib_xrandr']
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_XLIB_XRANDR_EXT'
+endif
+if with_platform_android
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_ANDROID_KHR'
+endif
+if with_platform_windows
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_WIN32_KHR'
+endif
+if host_machine.system() == 'darwin'
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_MACOS_MVK'
+ vulkan_wsi_list += '-DVK_USE_PLATFORM_METAL_EXT'
endif
+idep_vulkan_wsi_defines = declare_dependency(
+ compile_args : vulkan_wsi_list,
+ dependencies : vulkan_wsi_deps,
+)
+vulkan_wsi_deps += idep_vulkan_wsi_defines
subdir('util')
+subdir('runtime')
subdir('wsi')
if with_vulkan_overlay_layer
subdir('overlay-layer')
diff --git a/src/vulkan/overlay-layer/README b/src/vulkan/overlay-layer/README
deleted file mode 100644
index 20ab73103e7..00000000000
--- a/src/vulkan/overlay-layer/README
+++ /dev/null
@@ -1,52 +0,0 @@
-A Vulkan layer to display information about the running application
-using an overlay.
-
-To turn on the layer run :
-
-VK_INSTANCE_LAYERS=VK_LAYER_MESA_overlay /path/to/my_vulkan_app
-
-List the available statistics :
-
-VK_INSTANCE_LAYERS=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=help /path/to/my_vulkan_app
-
-Turn on some statistics :
-VK_INSTANCE_LAYERS=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=submit,draw,pipeline_graphics /path/to/my_vulkan_app
-
-Position the layer :
-
-VK_INSTANCE_LAYERS=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=submit,draw,pipeline_graphics,position=top-right /path/to/my_vulkan_app
-
-Dump statistics into a file:
-
-VK_INSTANCE_LAYERS=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=position=top-right,output_file=/tmp/output.txt /path/to/my_vulkan_app
-
-Dump statistics into a file, controlling when such statistics will start
-to be captured:
-
-VK_INSTANCE_LAYERS=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=position=top-right,output_file=/tmp/output.txt,control=mesa_overlay /path/to/my_vulkan_app
-
-The above command will open a unix socket with abstract path
-'mesa_overlay'. Once a client connects to the socket, the overlay layer
-will immediately send the following commands to the client:
-
-:MesaOverlayControlVersion=1;
-:DeviceName=<device name>;
-:MesaVersion=<mesa version>;
-
-The client connected to the overlay layer can enable statistics
-capturing by sending the command:
-
-:capture=1;
-
-And disable it by emitting
-
-:capture=0;
-
-By default, capture is enabled when an output_file is specified, but it
-will be disabled by default when a control socket is in use. In the
-latter case, it needs to be explicitly enabled through the sockets, by
-using the commands above.
-
-The provided script overlay-control.py can be used to start/stop
-capture. The --path option can be used to specify the socket path. By
-default, it will try to connect to a path named "mesa_overlay".
diff --git a/src/vulkan/overlay-layer/README.rst b/src/vulkan/overlay-layer/README.rst
new file mode 100644
index 00000000000..3ebd92a9463
--- /dev/null
+++ b/src/vulkan/overlay-layer/README.rst
@@ -0,0 +1,100 @@
+A Vulkan layer to display information about the running application using an overlay.
+
+Building
+=======
+
+The overlay layer will be built if :code:`overlay` is passed as a :code:`vulkan-layers` argument. For example:
+
+.. code-block:: sh
+
+ meson -Dvulkan-layers=device-select,overlay builddir/
+ ninja -C builddir/
+ sudo ninja -C builddir/ install
+
+See `docs/install.rst <https://gitlab.freedesktop.org/mesa/mesa/-/blob/master/docs/install.rst>`__ for more information.
+
+Basic Usage
+=======
+
+Turn on the layer:
+
+.. code-block:: sh
+
+ VK_LOADER_LAYERS_ENABLE=VK_LAYER_MESA_overlay /path/to/my_vulkan_app
+
+
+List the available statistics:
+
+.. code-block:: sh
+
+ VK_LOADER_LAYERS_ENABLE=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=help /path/to/my_vulkan_app
+
+
+Turn on some statistics:
+
+.. code-block:: sh
+
+ VK_LOADER_LAYERS_ENABLE=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=submit,draw,pipeline_graphics /path/to/my_vulkan_app
+
+Position the overlay:
+
+.. code-block:: sh
+
+ VK_LOADER_LAYERS_ENABLE=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=submit,draw,pipeline_graphics,position=top-right /path/to/my_vulkan_app
+
+Logging Statistics
+=======
+
+Log statistics to a file:
+
+.. code-block:: sh
+
+ VK_LOADER_LAYERS_ENABLE=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=output_file=/tmp/output.txt /path/to/my_vulkan_app
+
+Logging is enabled for the entire lifecycle of the process unless a control socket is specified (see below).
+
+**Note:** some statistics (e.g. :code:`frame_timing` and :code:`gpu_timing`) log values for the entire sample interval instead of per-frame.
+For these statistics, logging the :code:`frame` statistic allows one to compute per-frame statistics after capture.
+
+Log statistics to a file, controlling when such statistics will start to be captured:
+
+.. code-block:: sh
+
+ VK_LOADER_LAYERS_ENABLE=VK_LAYER_MESA_overlay VK_LAYER_MESA_OVERLAY_CONFIG=output_file=/tmp/output.txt,control=mesa_overlay /path/to/my_vulkan_app
+
+The command above will open a Unix socket with the abstract path :code:`mesa_overlay`. When a control socket is specified,
+logging must be explicitly enabled through the control socket. :code:`mesa-overlay-control.py` provides a convenient CLI:
+
+.. code-block:: sh
+
+ mesa-overlay-control.py start-capture
+
+.. code-block:: sh
+
+ mesa-overlay-control.py stop-capture
+
+Direct Socket Control
+------
+
+The Unix socket may be used directly if needed. Once a client connects to the socket, the overlay layer will immediately
+send the following commands to the client:
+
+.. code-block:: sh
+
+ :MesaOverlayControlVersion=1;
+ :DeviceName=<device name>;
+ :MesaVersion=<mesa version>;
+
+The client connected to the overlay layer can enable statistics capturing by sending the command:
+
+.. code-block:: sh
+
+ :capture=1;
+
+And disable it by sending:
+
+.. code-block:: sh
+
+ :capture=0;
+
+.. _docs/install.rst: ../../docs/install.rst
diff --git a/src/vulkan/overlay-layer/VkLayer_MESA_overlay.json b/src/vulkan/overlay-layer/VkLayer_MESA_overlay.json
index 6877c3a3ecd..2fc73a58b8c 100644
--- a/src/vulkan/overlay-layer/VkLayer_MESA_overlay.json
+++ b/src/vulkan/overlay-layer/VkLayer_MESA_overlay.json
@@ -4,7 +4,7 @@
"name": "VK_LAYER_MESA_overlay",
"type": "GLOBAL",
"library_path": "libVkLayer_MESA_overlay.so",
- "api_version": "1.1.73",
+ "api_version": "1.3.211",
"implementation_version": "1",
"description": "Mesa Overlay layer"
}
diff --git a/src/vulkan/overlay-layer/mesa-overlay-control.py b/src/vulkan/overlay-layer/mesa-overlay-control.py
index 6947250cff8..aebb042ddca 100755
--- a/src/vulkan/overlay-layer/mesa-overlay-control.py
+++ b/src/vulkan/overlay-layer/mesa-overlay-control.py
@@ -1,11 +1,9 @@
#!/usr/bin/env python3
-import os
import socket
import sys
import select
from select import EPOLLIN, EPOLLPRI, EPOLLERR
import time
-from collections import namedtuple
import argparse
TIMEOUT = 1.0 # seconds
@@ -96,12 +94,12 @@ class MsgParser:
while remaining > 0 and ncmds > 0:
now = time.monotonic()
- if self.buffer == None:
+ if self.buffer is None:
self.buffer = self.conn.recv(remaining)
self.bufferpos = 0
# disconnected or error
- if self.buffer == None:
+ if self.buffer is None:
return None
for i in range(self.bufferpos, len(self.buffer)):
@@ -173,16 +171,14 @@ def control(args):
elif cmd == MESA_VERSION_HEADER:
mesa_version = param.decode('utf-8')
- if version != 1 or name == None or mesa_version == None:
+ if version != 1 or name is None or mesa_version is None:
print('ERROR: invalid protocol')
sys.exit(1)
-
if args.info:
- info = "Protocol Version: {}\n"
- info += "Device Name: {}\n"
- info += "Mesa Version: {}"
- print(info.format(version, name, mesa_version))
+ print(f"Protocol Version: {version}")
+ print(f"Device Name: {name}")
+ print(f"Mesa Version: {mesa_version}")
if args.cmd == 'start-capture':
conn.send(bytearray(':capture=1;', 'utf-8'))
diff --git a/src/vulkan/overlay-layer/meson.build b/src/vulkan/overlay-layer/meson.build
index 3225b3f512b..8a01300bd37 100644
--- a/src/vulkan/overlay-layer/meson.build
+++ b/src/vulkan/overlay-layer/meson.build
@@ -26,7 +26,7 @@ overlay_spv = []
foreach s : ['overlay.frag', 'overlay.vert']
overlay_spv += custom_target(
s + '.spv.h', input : s, output : s + '.spv.h',
- command : [prog_glslang, '-V', '-x', '-o', '@OUTPUT@', '@INPUT@'])
+ command : [prog_glslang, '-V', '-x', '-o', '@OUTPUT@', '@INPUT@'] + glslang_quiet)
endforeach
vklayer_files = files(
@@ -37,8 +37,7 @@ vklayer_files = files(
vklayer_mesa_overlay = shared_library(
'VkLayer_MESA_overlay',
vklayer_files, overlay_spv, sha1_h,
- c_args : [no_override_init_args, vulkan_wsi_args],
- cpp_args : [vulkan_wsi_args],
+ c_args : [no_override_init_args],
gnu_symbol_visibility : 'hidden',
dependencies : [idep_vulkan_util, idep_mesautil, vulkan_wsi_deps, libimgui_core_dep, dep_dl],
include_directories : [inc_include, inc_src],
@@ -49,6 +48,7 @@ vklayer_mesa_overlay = shared_library(
install_data(
files('VkLayer_MESA_overlay.json'),
install_dir : join_paths(get_option('datadir'), 'vulkan', 'explicit_layer.d'),
+ install_tag : 'runtime',
)
install_data(
diff --git a/src/vulkan/overlay-layer/overlay.cpp b/src/vulkan/overlay-layer/overlay.cpp
index 270dd9b39e1..328d8d89be3 100644
--- a/src/vulkan/overlay-layer/overlay.cpp
+++ b/src/vulkan/overlay-layer/overlay.cpp
@@ -25,7 +25,7 @@
#include <stdlib.h>
#include <assert.h>
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
#include <vulkan/vk_layer.h>
#include "git_sha1.h"
@@ -34,7 +34,7 @@
#include "overlay_params.h"
-#include "util/debug.h"
+#include "util/u_debug.h"
#include "util/hash_table.h"
#include "util/list.h"
#include "util/ralloc.h"
@@ -65,6 +65,8 @@ struct instance_data {
/* Dumping of frame stats to a file has been enabled and started. */
bool capture_started;
+
+ int socket;
};
struct frame_stat {
@@ -89,6 +91,8 @@ struct device_data {
struct queue_data **queues;
uint32_t n_queues;
+ bool pipeline_statistics_enabled;
+
/* For a single frame */
struct frame_stat frame_stats;
};
@@ -216,7 +220,7 @@ static const VkQueryPipelineStatisticFlags overlay_query_flags =
#define OVERLAY_QUERY_COUNT (11)
static struct hash_table_u64 *vk_object_to_data = NULL;
-static simple_mtx_t vk_object_to_data_mutex = _SIMPLE_MTX_INITIALIZER_NP;
+static simple_mtx_t vk_object_to_data_mutex = SIMPLE_MTX_INITIALIZER;
thread_local ImGuiContext* __MesaImGui;
@@ -269,7 +273,7 @@ static void unmap_object(uint64_t obj)
static VkLayerInstanceCreateInfo *get_instance_chain_info(const VkInstanceCreateInfo *pCreateInfo,
VkLayerFunction func)
{
- vk_foreach_struct(item, pCreateInfo->pNext) {
+ vk_foreach_struct_const(item, pCreateInfo->pNext) {
if (item->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO &&
((VkLayerInstanceCreateInfo *) item)->function == func)
return (VkLayerInstanceCreateInfo *) item;
@@ -281,7 +285,7 @@ static VkLayerInstanceCreateInfo *get_instance_chain_info(const VkInstanceCreate
static VkLayerDeviceCreateInfo *get_device_chain_info(const VkDeviceCreateInfo *pCreateInfo,
VkLayerFunction func)
{
- vk_foreach_struct(item, pCreateInfo->pNext) {
+ vk_foreach_struct_const(item, pCreateInfo->pNext) {
if (item->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO &&
((VkLayerDeviceCreateInfo *) item)->function == func)
return (VkLayerDeviceCreateInfo *)item;
@@ -290,6 +294,16 @@ static VkLayerDeviceCreateInfo *get_device_chain_info(const VkDeviceCreateInfo *
return NULL;
}
+static void
+free_chain(struct VkBaseOutStructure *chain)
+{
+ while (chain) {
+ void *node = chain;
+ chain = chain->pNext;
+ free(node);
+ }
+}
+
static struct VkBaseOutStructure *
clone_chain(const struct VkBaseInStructure *chain)
{
@@ -297,6 +311,11 @@ clone_chain(const struct VkBaseInStructure *chain)
vk_foreach_struct_const(item, chain) {
size_t item_size = vk_structure_type_size(item);
+ if (item_size == 0) {
+ free_chain(head);
+ return NULL;
+ }
+
struct VkBaseOutStructure *new_item =
(struct VkBaseOutStructure *)malloc(item_size);;
@@ -312,16 +331,6 @@ clone_chain(const struct VkBaseInStructure *chain)
return head;
}
-static void
-free_chain(struct VkBaseOutStructure *chain)
-{
- while (chain) {
- void *node = chain;
- chain = chain->pNext;
- free(node);
- }
-}
-
/**/
static struct instance_data *new_instance_data(VkInstance instance)
@@ -329,6 +338,7 @@ static struct instance_data *new_instance_data(VkInstance instance)
struct instance_data *data = rzalloc(NULL, struct instance_data);
data->instance = instance;
data->control_client = -1;
+ data->socket = -1;
map_object(HKEY(data->instance), data);
return data;
}
@@ -337,8 +347,8 @@ static void destroy_instance_data(struct instance_data *data)
{
if (data->params.output_file)
fclose(data->params.output_file);
- if (data->params.control >= 0)
- os_socket_close(data->params.control);
+ if (data->socket >= 0)
+ os_socket_close(data->socket);
unmap_object(HKEY(data->instance));
ralloc_free(data);
}
@@ -710,7 +720,7 @@ static void control_client_check(struct device_data *device_data)
if (instance_data->control_client >= 0)
return;
- int socket = os_socket_accept(instance_data->params.control);
+ int socket = os_socket_accept(instance_data->socket);
if (socket == -1) {
if (errno != EAGAIN && errno != EWOULDBLOCK && errno != ECONNABORTED)
fprintf(stderr, "ERROR on socket: %s\n", strerror(errno));
@@ -776,7 +786,18 @@ static void snapshot_swapchain_frame(struct swapchain_data *data)
uint32_t f_idx = data->n_frames % ARRAY_SIZE(data->frames_stats);
uint64_t now = os_time_get(); /* us */
- if (instance_data->params.control >= 0) {
+ if (instance_data->params.control && instance_data->socket < 0) {
+ int ret = os_socket_listen_abstract(instance_data->params.control, 1);
+ if (ret >= 0) {
+ os_socket_block(ret, false);
+ instance_data->socket = ret;
+ } else {
+ fprintf(stderr, "ERROR: Couldn't create socket pipe at '%s'\n", instance_data->params.control);
+ fprintf(stderr, "ERROR: '%s'\n", strerror(errno));
+ }
+ }
+
+ if (instance_data->socket >= 0) {
control_client_check(device_data);
process_control_socket(instance_data);
}
@@ -1218,8 +1239,8 @@ static struct overlay_draw *render_swapchain_display(struct swapchain_data *data
VK_SUBPASS_CONTENTS_INLINE);
/* Create/Resize vertex & index buffers */
- size_t vertex_size = ALIGN(draw_data->TotalVtxCount * sizeof(ImDrawVert), device_data->properties.limits.nonCoherentAtomSize);
- size_t index_size = ALIGN(draw_data->TotalIdxCount * sizeof(ImDrawIdx), device_data->properties.limits.nonCoherentAtomSize);
+ size_t vertex_size = align_uintptr(draw_data->TotalVtxCount * sizeof(ImDrawVert), device_data->properties.limits.nonCoherentAtomSize);
+ size_t index_size = align_uintptr(draw_data->TotalIdxCount * sizeof(ImDrawIdx), device_data->properties.limits.nonCoherentAtomSize);
if (draw->vertex_buffer_size < vertex_size) {
CreateOrResizeBuffer(device_data,
&draw->vertex_buffer,
@@ -2182,7 +2203,7 @@ static void overlay_CmdBindPipeline(
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_graphics]++; break;
case VK_PIPELINE_BIND_POINT_COMPUTE: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_compute]++; break;
- case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_raytracing]++; break;
+ case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_raytracing]++; break;
default: break;
}
struct device_data *device_data = cmd_buffer_data->device;
@@ -2203,34 +2224,44 @@ static VkResult overlay_BeginCommandBuffer(
* we have the right inheritance.
*/
if (cmd_buffer_data->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
- VkCommandBufferBeginInfo *begin_info = (VkCommandBufferBeginInfo *)
- clone_chain((const struct VkBaseInStructure *)pBeginInfo);
- VkCommandBufferInheritanceInfo *parent_inhe_info = (VkCommandBufferInheritanceInfo *)
- vk_find_struct(begin_info, COMMAND_BUFFER_INHERITANCE_INFO);
- VkCommandBufferInheritanceInfo inhe_info = {
- VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
- NULL,
- VK_NULL_HANDLE,
- 0,
- VK_NULL_HANDLE,
- VK_FALSE,
- 0,
- overlay_query_flags,
- };
+ VkCommandBufferBeginInfo begin_info = *pBeginInfo;
- if (parent_inhe_info)
- parent_inhe_info->pipelineStatistics = overlay_query_flags;
- else {
- inhe_info.pNext = begin_info->pNext;
- begin_info->pNext = &inhe_info;
- }
+ struct VkBaseOutStructure *new_pnext =
+ clone_chain((const struct VkBaseInStructure *)pBeginInfo->pNext);
+ VkCommandBufferInheritanceInfo inhe_info;
- VkResult result = device_data->vtable.BeginCommandBuffer(commandBuffer, pBeginInfo);
+ /* If there was no pNext chain given or we managed to copy it, we can
+ * add our stuff in there.
+ *
+ * Otherwise, keep the old pointer. We failed to copy the pNext chain,
+ * meaning there is an unknown extension somewhere in there.
+ */
+ if (new_pnext || pBeginInfo->pNext == NULL) {
+ begin_info.pNext = new_pnext;
+
+ VkCommandBufferInheritanceInfo *parent_inhe_info = (VkCommandBufferInheritanceInfo *)
+ vk_find_struct(new_pnext, COMMAND_BUFFER_INHERITANCE_INFO);
+ inhe_info = (VkCommandBufferInheritanceInfo) {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
+ NULL,
+ VK_NULL_HANDLE,
+ 0,
+ VK_NULL_HANDLE,
+ VK_FALSE,
+ 0,
+ overlay_query_flags,
+ };
+
+ if (parent_inhe_info)
+ parent_inhe_info->pipelineStatistics = overlay_query_flags;
+ else
+ __vk_append_struct(&begin_info, &inhe_info);
+ }
- if (!parent_inhe_info)
- begin_info->pNext = inhe_info.pNext;
+ VkResult result = device_data->vtable.BeginCommandBuffer(
+ commandBuffer, &begin_info);
- free_chain((struct VkBaseOutStructure *)begin_info);
+ free_chain(new_pnext);
return result;
}
@@ -2334,7 +2365,7 @@ static VkResult overlay_AllocateCommandBuffers(
VkQueryPool pipeline_query_pool = VK_NULL_HANDLE;
VkQueryPool timestamp_query_pool = VK_NULL_HANDLE;
- if (device_data->instance->pipeline_statistics_enabled &&
+ if (device_data->pipeline_statistics_enabled &&
pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
VkQueryPoolCreateInfo pool_info = {
VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
@@ -2452,6 +2483,46 @@ static VkResult overlay_QueueSubmit(
return device_data->vtable.QueueSubmit(queue, submitCount, pSubmits, fence);
}
+static VkResult overlay_QueueSubmit2(
+ VkQueue queue,
+ uint32_t submitCount,
+ const VkSubmitInfo2* pSubmits,
+ VkFence fence)
+{
+ struct queue_data *queue_data = FIND(struct queue_data, queue);
+ struct device_data *device_data = queue_data->device;
+
+ device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
+
+ for (uint32_t s = 0; s < submitCount; s++) {
+ for (uint32_t c = 0; c < pSubmits[s].commandBufferInfoCount; c++) {
+ struct command_buffer_data *cmd_buffer_data =
+ FIND(struct command_buffer_data, pSubmits[s].pCommandBufferInfos[c].commandBuffer);
+
+ /* Merge the submitted command buffer stats into the device. */
+ for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
+ device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
+
+ /* Attach the command buffer to the queue so we remember to read its
+ * pipeline statistics & timestamps at QueuePresent().
+ */
+ if (!cmd_buffer_data->pipeline_query_pool &&
+ !cmd_buffer_data->timestamp_query_pool)
+ continue;
+
+ if (list_is_empty(&cmd_buffer_data->link)) {
+ list_addtail(&cmd_buffer_data->link,
+ &queue_data->running_command_buffer);
+ } else {
+ fprintf(stderr, "Command buffer submitted multiple times before present.\n"
+ "This could lead to invalid data.\n");
+ }
+ }
+ }
+
+ return device_data->vtable.QueueSubmit2(queue, submitCount, pSubmits, fence);
+}
+
static VkResult overlay_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
@@ -2477,29 +2548,33 @@ static VkResult overlay_CreateDevice(
VkPhysicalDeviceFeatures device_features = {};
VkPhysicalDeviceFeatures *device_features_ptr = NULL;
- VkDeviceCreateInfo *device_info = (VkDeviceCreateInfo *)
- clone_chain((const struct VkBaseInStructure *)pCreateInfo);
+ VkDeviceCreateInfo create_info = *pCreateInfo;
- VkPhysicalDeviceFeatures2 *device_features2 = (VkPhysicalDeviceFeatures2 *)
- vk_find_struct(device_info, PHYSICAL_DEVICE_FEATURES_2);
- if (device_features2) {
- /* Can't use device_info->pEnabledFeatures when VkPhysicalDeviceFeatures2 is present */
- device_features_ptr = &device_features2->features;
- } else {
- if (device_info->pEnabledFeatures)
- device_features = *(device_info->pEnabledFeatures);
- device_features_ptr = &device_features;
- device_info->pEnabledFeatures = &device_features;
- }
+ struct VkBaseOutStructure *new_pnext =
+ clone_chain((const struct VkBaseInStructure *) pCreateInfo->pNext);
+ if (new_pnext != NULL) {
+ create_info.pNext = new_pnext;
- if (instance_data->pipeline_statistics_enabled) {
- device_features_ptr->inheritedQueries = true;
- device_features_ptr->pipelineStatisticsQuery = true;
- }
+ VkPhysicalDeviceFeatures2 *device_features2 = (VkPhysicalDeviceFeatures2 *)
+ vk_find_struct(new_pnext, PHYSICAL_DEVICE_FEATURES_2);
+ if (device_features2) {
+ /* Can't use device_info->pEnabledFeatures when VkPhysicalDeviceFeatures2 is present */
+ device_features_ptr = &device_features2->features;
+ } else {
+ if (create_info.pEnabledFeatures)
+ device_features = *(create_info.pEnabledFeatures);
+ device_features_ptr = &device_features;
+ create_info.pEnabledFeatures = &device_features;
+ }
+ if (instance_data->pipeline_statistics_enabled) {
+ device_features_ptr->inheritedQueries = true;
+ device_features_ptr->pipelineStatisticsQuery = true;
+ }
+ }
- VkResult result = fpCreateDevice(physicalDevice, device_info, pAllocator, pDevice);
- free_chain((struct VkBaseOutStructure *)device_info);
+ VkResult result = fpCreateDevice(physicalDevice, &create_info, pAllocator, pDevice);
+ free_chain(new_pnext);
if (result != VK_SUCCESS) return result;
struct device_data *device_data = new_device_data(*pDevice, instance_data);
@@ -2516,6 +2591,10 @@ static VkResult overlay_CreateDevice(
device_map_queues(device_data, pCreateInfo);
+ device_data->pipeline_statistics_enabled =
+ new_pnext != NULL &&
+ instance_data->pipeline_statistics_enabled;
+
return result;
}
@@ -2567,7 +2646,7 @@ static VkResult overlay_CreateInstance(
* capturing fps data right away.
*/
instance_data->capture_enabled =
- instance_data->params.output_file && instance_data->params.control < 0;
+ instance_data->params.output_file && instance_data->params.control == NULL;
instance_data->capture_started = instance_data->capture_enabled;
for (int i = OVERLAY_PARAM_ENABLED_vertices;
@@ -2626,6 +2705,7 @@ static const struct {
ADD_HOOK(AcquireNextImage2KHR),
ADD_HOOK(QueueSubmit),
+ ADD_HOOK(QueueSubmit2),
ADD_HOOK(CreateDevice),
ADD_HOOK(DestroyDevice),
@@ -2646,8 +2726,8 @@ static void *find_ptr(const char *name)
return NULL;
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev,
- const char *funcName)
+PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev,
+ const char *funcName)
{
void *ptr = find_ptr(funcName);
if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
@@ -2659,8 +2739,8 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
return device_data->vtable.GetDeviceProcAddr(dev, funcName);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance,
- const char *funcName)
+PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance,
+ const char *funcName)
{
void *ptr = find_ptr(funcName);
if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
diff --git a/src/vulkan/overlay-layer/overlay_params.c b/src/vulkan/overlay-layer/overlay_params.c
index 48a9f7e3b5a..d055d32a4e0 100644
--- a/src/vulkan/overlay-layer/overlay_params.c
+++ b/src/vulkan/overlay-layer/overlay_params.c
@@ -51,19 +51,17 @@ parse_output_file(const char *str)
return fopen(str, "w+");
}
-static int
+static const char *
parse_control(const char *str)
{
- int ret = os_socket_listen_abstract(str, 1);
- if (ret < 0) {
- fprintf(stderr, "ERROR: Couldn't create socket pipe at '%s'\n", str);
- fprintf(stderr, "ERROR: '%s'\n", strerror(errno));
- return ret;
+ static char control_str[64];
+ if (strlen(str) > 63) {
+ fprintf(stderr, "ERROR: control string too long. Must be < 64 chars");
+ return NULL;
}
+ strcpy(control_str, str);
- os_socket_block(ret, false);
-
- return ret;
+ return control_str;
}
static uint32_t
@@ -169,7 +167,7 @@ parse_overlay_env(struct overlay_params *params,
params->enabled[OVERLAY_PARAM_ENABLED_format] = true;
params->fps_sampling_period = 500000; /* 500ms */
params->width = params->height = 300;
- params->control = -1;
+ params->control = NULL;
if (!env)
return;
diff --git a/src/vulkan/overlay-layer/overlay_params.h b/src/vulkan/overlay-layer/overlay_params.h
index e919f74ff6c..0c7290120af 100644
--- a/src/vulkan/overlay-layer/overlay_params.h
+++ b/src/vulkan/overlay-layer/overlay_params.h
@@ -94,7 +94,7 @@ struct overlay_params {
bool enabled[OVERLAY_PARAM_ENABLED_MAX];
enum overlay_param_position position;
FILE *output_file;
- int control;
+ const char *control;
uint32_t fps_sampling_period; /* us */
bool help;
bool no_display;
diff --git a/src/vulkan/registry/update-aliases.py b/src/vulkan/registry/update-aliases.py
new file mode 100755
index 00000000000..935bcea5f59
--- /dev/null
+++ b/src/vulkan/registry/update-aliases.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python3
+"""
+Check for and replace aliases with their new names from vk.xml
+"""
+
+import argparse
+import pathlib
+import subprocess
+import sys
+import xml.etree.ElementTree as et
+
+THIS_FILE = pathlib.Path(__file__)
+CWD = pathlib.Path.cwd()
+
+VK_XML = THIS_FILE.parent / 'vk.xml'
+EXCLUDE_PATHS = [
+ VK_XML.relative_to(CWD).as_posix(),
+
+ # These files come from other repos, there's no point checking and
+ # fixing them here as that would be overwritten in the next sync.
+ 'src/amd/vulkan/radix_sort/',
+ 'src/virtio/venus-protocol/',
+]
+
+
+def get_aliases(xml_file: pathlib.Path):
+ """
+ Get all the aliases defined in vk.xml
+ """
+ xml = et.parse(xml_file)
+
+ for node in ([]
+ + xml.findall('.//enum[@alias]')
+ + xml.findall('.//type[@alias]')
+ + xml.findall('.//command[@alias]')
+ ):
+ # Some renames only apply to some APIs
+ if 'api' in node.attrib and 'vulkan' not in node.attrib['api'].split(','):
+ continue
+
+ yield node.attrib['name'], node.attrib['alias']
+
+
+def remove_prefix(string: str, prefix: str):
+ """
+ Remove prefix if string starts with it, and return the full string
+ otherwise.
+ """
+ if not string.startswith(prefix):
+ return string
+ return string[len(prefix):]
+
+
+# Function from https://stackoverflow.com/a/312464
+def chunks(lst: list, n: int):
+ """
+ Yield successive n-sized chunks from lst.
+ """
+ for i in range(0, len(lst), n):
+ yield lst[i:i + n]
+
+
+def main(paths: list[str]):
+ """
+ Entrypoint; perform the search for all the aliases and replace them.
+ """
+ def prepare_identifier(identifier: str) -> str:
+ prefixes_seen = []
+ for prefix in [
+ # Various macros prepend these, so they will not appear in the code using them.
+ # List generated using this command:
+ # $ prefixes=$(git grep -woiE 'VK_\w+_' -- src/ ':!src/vulkan/registry/' | cut -d: -f2 | sort -u)
+ # $ for prefix in $prefixes; do grep -q $prefix src/vulkan/registry/vk.xml && echo "'$prefix',"; done
+ # (the second part eliminates prefixes used only in mesa code and not upstream)
+ 'VK_BLEND_FACTOR_',
+ 'VK_BLEND_OP_',
+ 'VK_BORDER_COLOR_',
+ 'VK_COMMAND_BUFFER_RESET_',
+ 'VK_COMMAND_POOL_RESET_',
+ 'VK_COMPARE_OP_',
+ 'VK_COMPONENT_SWIZZLE_',
+ 'VK_DESCRIPTOR_TYPE_',
+ 'VK_DRIVER_ID_',
+ 'VK_DYNAMIC_STATE_',
+ 'VK_FORMAT_',
+ 'VK_IMAGE_ASPECT_MEMORY_PLANE_',
+ 'VK_IMAGE_ASPECT_PLANE_',
+ 'VK_IMAGE_USAGE_',
+ 'VK_NV_',
+ 'VK_PERFORMANCE_COUNTER_UNIT_',
+ 'VK_PIPELINE_BIND_POINT_',
+ 'VK_SAMPLER_ADDRESS_MODE_',
+ 'VK_SHADER_STAGE_TESSELLATION_',
+ 'VK_SHADER_STAGE_',
+ 'VK_STENCIL_OP_',
+ 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_',
+ 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_',
+ 'VK_STRUCTURE_TYPE_',
+ 'VK_USE_PLATFORM_',
+ 'VK_VERSION_',
+
+ # Many places use the identifier without the `vk` prefix
+ # (eg. with the driver name as a prefix instead)
+ 'VK_',
+ 'Vk',
+ 'vk',
+ ]:
+ # The order matters! A shorter substring will match before a longer
+ # one, hiding its matches.
+ for prefix_seen in prefixes_seen:
+ assert not prefix.startswith(prefix_seen), f'{prefix_seen} must come before {prefix}'
+ prefixes_seen.append(prefix)
+
+ identifier = remove_prefix(identifier, prefix)
+
+ return identifier
+
+ aliases = {}
+ for old_name, alias_for in get_aliases(VK_XML):
+ old_name = prepare_identifier(old_name)
+ alias_for = prepare_identifier(alias_for)
+ aliases[old_name] = alias_for
+
+ print(f'Found {len(aliases)} aliases in {VK_XML.name}')
+
+ # Some aliases have aliases
+ recursion_needs_checking = True
+ while recursion_needs_checking:
+ recursion_needs_checking = False
+ for old, new in aliases.items():
+ if new in aliases:
+ aliases[old] = aliases[new]
+ recursion_needs_checking = True
+
+ # Doing the whole search in a single command breaks grep, so only
+ # look for 500 aliases at a time. Searching them one at a time would
+ # be extremely slow.
+ files_with_aliases = set()
+ for aliases_chunk in chunks([*aliases], 500):
+ grep_cmd = [
+ 'git',
+ 'grep',
+ '-rlP',
+ '|'.join(aliases_chunk),
+ ] + paths
+ search_output = subprocess.run(
+ grep_cmd,
+ check=False,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.DEVNULL,
+ ).stdout.decode()
+ files_with_aliases.update(search_output.splitlines())
+
+
+ def file_matches_path(file: str, path: str) -> bool:
+ # if path is a folder; match any file within
+ if path.endswith('/') and file.startswith(path):
+ return True
+ return file == path
+
+ for excluded_path in EXCLUDE_PATHS:
+ files_with_aliases = {
+ file for file in files_with_aliases
+ if not file_matches_path(file, excluded_path)
+ }
+
+ if not files_with_aliases:
+ print('No alias found in any file.')
+ sys.exit(0)
+
+ print(f'{len(files_with_aliases)} files contain aliases:')
+ print('\n'.join(f'- {file}' for file in sorted(files_with_aliases)))
+
+ command = [
+ 'sed',
+ '-i',
+ ";".join([f's/{old}/{new}/g' for old, new in aliases.items()]),
+ ]
+ command += files_with_aliases
+ subprocess.check_call(command, stderr=subprocess.DEVNULL)
+ print('All aliases have been replaced')
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('paths',
+ nargs=argparse.ZERO_OR_MORE,
+ default=['src/'],
+ help='Limit script to these paths (default: `src/`)')
+ args = parser.parse_args()
+ main(**vars(args))
diff --git a/src/vulkan/registry/vk.xml b/src/vulkan/registry/vk.xml
index 50d55cda392..c8464a2eae5 100644
--- a/src/vulkan/registry/vk.xml
+++ b/src/vulkan/registry/vk.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<registry>
<comment>
-Copyright 2015-2021 The Khronos Group Inc.
+Copyright 2015-2024 The Khronos Group Inc.
SPDX-License-Identifier: Apache-2.0 OR MIT
</comment>
@@ -32,12 +32,13 @@ branch of the member gitlab server.
<platform name="metal" protect="VK_USE_PLATFORM_METAL_EXT" comment="Metal on CoreAnimation on Apple platforms"/>
<platform name="fuchsia" protect="VK_USE_PLATFORM_FUCHSIA" comment="Fuchsia"/>
<platform name="ggp" protect="VK_USE_PLATFORM_GGP" comment="Google Games Platform"/>
+ <platform name="sci" protect="VK_USE_PLATFORM_SCI" comment="NVIDIA SCI"/>
<platform name="provisional" protect="VK_ENABLE_BETA_EXTENSIONS" comment="Enable declarations for beta/provisional extensions"/>
<platform name="screen" protect="VK_USE_PLATFORM_SCREEN_QNX" comment="QNX Screen Graphics Subsystem"/>
</platforms>
<tags comment="Vulkan vendor/author tags for extensions and layers">
- <tag name="IMG" author="Imagination Technologies" contact="Michael Worcester @michaelworcester"/>
+ <tag name="IMG" author="Imagination Technologies" contact="Andrew Garrard @fluppeteer"/>
<tag name="AMD" author="Advanced Micro Devices, Inc." contact="Daniel Rakos @drakos-amd"/>
<tag name="AMDX" author="Advanced Micro Devices, Inc." contact="Daniel Rakos @drakos-amd"/>
<tag name="ARM" author="ARM Limited" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm"/>
@@ -56,6 +57,7 @@ branch of the member gitlab server.
<tag name="GOOGLE" author="Google LLC" contact="Jesse Hall @critsec"/>
<tag name="QCOM" author="Qualcomm Technologies, Inc." contact="Jeff Leger @jackohounhd"/>
<tag name="LUNARG" author="LunarG, Inc." contact="Karen Ghavam @karenghavam-lunarg"/>
+ <tag name="NZXT" author="NZXT Inc." contact="Jacob Kiesel @xaeroxe"/>
<tag name="SAMSUNG" author="Samsung Electronics Co., Ltd." contact="Alon Or-bach @alonorbach"/>
<tag name="SEC" author="Samsung Electronics Co., Ltd." contact="Alon Or-bach @alonorbach"/>
<tag name="TIZEN" author="Samsung Electronics Co., Ltd." contact="Alon Or-bach @alonorbach"/>
@@ -65,13 +67,15 @@ branch of the member gitlab server.
<tag name="KHR" author="Khronos" contact="Tom Olson @tomolson"/>
<tag name="KHX" author="Khronos" contact="Tom Olson @tomolson"/>
<tag name="EXT" author="Multivendor" contact="Jon Leech @oddhack"/>
- <tag name="MESA" author="Mesa open source project" contact="Chad Versace @chadversary, Daniel Stone @fooishbar, David Airlie @airlied, Jason Ekstrand @jekstrand"/>
+ <tag name="MESA" author="Mesa open source project" contact="Lina Versace @versalinyaa, Daniel Stone @fooishbar, David Airlie @airlied, Faith Ekstrand @gfxstrand"/>
<tag name="INTEL" author="Intel Corporation" contact="Slawek Grajewski @sgrajewski"/>
- <tag name="HUAWEI" author="Huawei Technologies Co. Ltd." contact="Hueilong Wang @wyvernathuawei, Yunpeng Zhu @yunxingzhu"/>
+ <tag name="HUAWEI" author="Huawei Technologies Co. Ltd." contact="Pan Gao @PanGao-h, Juntao Li @Lawrenceleehw"/>
<tag name="VALVE" author="Valve Corporation" contact="Pierre-Loup Griffais @plagman, Joshua Ashton @Joshua-Ashton, Hans-Kristian Arntzen @HansKristian-Work"/>
- <tag name="QNX" author="BlackBerry Limited" contact="Mike Gorchak @mgorchak-blackberry"/>
+ <tag name="QNX" author="BlackBerry Limited" contact="Mike Gorchak @mgorchak-blackberry, Aaron Ruby @aruby-blackberry"/>
<tag name="JUICE" author="Juice Technologies, Inc." contact="David McCloskey @damcclos, Dean Beeler @canadacow"/>
<tag name="FB" author="Facebook, Inc" contact="Artem Bolgar @artyom17"/>
+ <tag name="RASTERGRID" author="RasterGrid Kft." contact="Daniel Rakos @aqnuep"/>
+ <tag name="MSFT" author="Microsoft Corporation" contact="Jesse Natalie @jenatali"/>
</tags>
<types comment="Vulkan type definitions">
@@ -88,6 +92,8 @@ branch of the member gitlab server.
<type category="include" name="zircon/types.h"/>
<type category="include" name="ggp_c/vulkan_types.h"/>
<type category="include" name="screen/screen.h"/>
+ <type category="include" name="nvscisync.h"/>
+ <type category="include" name="nvscibuf.h"/>
<comment>
In the current header structure, each platform's interfaces
are confined to a platform-specific header (vulkan_xlib.h,
@@ -128,43 +134,63 @@ branch of the member gitlab server.
<type requires="ggp_c/vulkan_types.h" name="GgpFrameToken"/>
<type requires="screen/screen.h" name="_screen_context"/>
<type requires="screen/screen.h" name="_screen_window"/>
+ <type requires="screen/screen.h" name="_screen_buffer"/>
+ <type requires="nvscisync.h" name="NvSciSyncAttrList"/>
+ <type requires="nvscisync.h" name="NvSciSyncObj"/>
+ <type requires="nvscisync.h" name="NvSciSyncFence"/>
+ <type requires="nvscibuf.h" name="NvSciBufAttrList"/>
+ <type requires="nvscibuf.h" name="NvSciBufObj"/>
- <type category="define">// DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead.
+ <type category="define" deprecated="true">// DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead.
#define <name>VK_MAKE_VERSION</name>(major, minor, patch) \
- ((((uint32_t)(major)) &lt;&lt; 22) | (((uint32_t)(minor)) &lt;&lt; 12) | ((uint32_t)(patch)))</type>
- <type category="define">// DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead.
-#define <name>VK_VERSION_MAJOR</name>(version) ((uint32_t)(version) &gt;&gt; 22)</type>
- <type category="define">// DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead.
-#define <name>VK_VERSION_MINOR</name>(version) (((uint32_t)(version) &gt;&gt; 12) &amp; 0x3FFU)</type>
- <type category="define">// DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead.
+ ((((uint32_t)(major)) &lt;&lt; 22U) | (((uint32_t)(minor)) &lt;&lt; 12U) | ((uint32_t)(patch)))</type>
+ <type category="define" deprecated="true">// DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead.
+#define <name>VK_VERSION_MAJOR</name>(version) ((uint32_t)(version) &gt;&gt; 22U)</type>
+ <type category="define" deprecated="true">// DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead.
+#define <name>VK_VERSION_MINOR</name>(version) (((uint32_t)(version) &gt;&gt; 12U) &amp; 0x3FFU)</type>
+ <type category="define" deprecated="true">// DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead.
#define <name>VK_VERSION_PATCH</name>(version) ((uint32_t)(version) &amp; 0xFFFU)</type>
<type category="define">#define <name>VK_MAKE_API_VERSION</name>(variant, major, minor, patch) \
- ((((uint32_t)(variant)) &lt;&lt; 29) | (((uint32_t)(major)) &lt;&lt; 22) | (((uint32_t)(minor)) &lt;&lt; 12) | ((uint32_t)(patch)))</type>
- <type category="define">#define <name>VK_API_VERSION_VARIANT</name>(version) ((uint32_t)(version) &gt;&gt; 29)</type>
- <type category="define">#define <name>VK_API_VERSION_MAJOR</name>(version) (((uint32_t)(version) &gt;&gt; 22) &amp; 0x7FU)</type>
- <type category="define">#define <name>VK_API_VERSION_MINOR</name>(version) (((uint32_t)(version) &gt;&gt; 12) &amp; 0x3FFU)</type>
+ ((((uint32_t)(variant)) &lt;&lt; 29U) | (((uint32_t)(major)) &lt;&lt; 22U) | (((uint32_t)(minor)) &lt;&lt; 12U) | ((uint32_t)(patch)))</type>
+ <type category="define">#define <name>VK_API_VERSION_VARIANT</name>(version) ((uint32_t)(version) &gt;&gt; 29U)</type>
+ <type category="define">#define <name>VK_API_VERSION_MAJOR</name>(version) (((uint32_t)(version) &gt;&gt; 22U) &amp; 0x7FU)</type>
+ <type category="define">#define <name>VK_API_VERSION_MINOR</name>(version) (((uint32_t)(version) &gt;&gt; 12U) &amp; 0x3FFU)</type>
<type category="define">#define <name>VK_API_VERSION_PATCH</name>(version) ((uint32_t)(version) &amp; 0xFFFU)</type>
+ <type category="define" requires="VK_HEADER_VERSION">// Vulkan SC variant number
+#define <name>VKSC_API_VARIANT</name> 1</type>
+
<type category="define">// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead.
-//#define <name>VK_API_VERSION</name> <type>VK_MAKE_VERSION</type>(1, 0, 0) // Patch version should always be set to 0</type>
- <type category="define" requires="VK_MAKE_API_VERSION">// Vulkan 1.0 version number
+//#define <name>VK_API_VERSION</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 0, 0) // Patch version should always be set to 0</type>
+ <type category="define">// Vulkan 1.0 version number
#define <name>VK_API_VERSION_1_0</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 0, 0)// Patch version should always be set to 0</type>
- <type category="define" requires="VK_MAKE_API_VERSION">// Vulkan 1.1 version number
+ <type category="define">// Vulkan 1.1 version number
#define <name>VK_API_VERSION_1_1</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 1, 0)// Patch version should always be set to 0</type>
- <type category="define" requires="VK_MAKE_API_VERSION">// Vulkan 1.2 version number
+ <type category="define">// Vulkan 1.2 version number
#define <name>VK_API_VERSION_1_2</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 2, 0)// Patch version should always be set to 0</type>
- <type category="define">// Version of this file
-#define <name>VK_HEADER_VERSION</name> 190</type>
- <type category="define" requires="VK_HEADER_VERSION">// Complete version of this file
-#define <name>VK_HEADER_VERSION_COMPLETE</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 2, VK_HEADER_VERSION)</type>
+ <type category="define" requires="VK_MAKE_API_VERSION">// Vulkan 1.3 version number
+#define <name>VK_API_VERSION_1_3</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 3, 0)// Patch version should always be set to 0</type>
+ <type category="define" requires="VKSC_API_VARIANT">// Vulkan SC 1.0 version number
+#define <name>VKSC_API_VERSION_1_0</name> <type>VK_MAKE_API_VERSION</type>(VKSC_API_VARIANT, 1, 0, 0)// Patch version should always be set to 0</type>
+
+ <type api="vulkan" category="define">// Version of this file
+#define <name>VK_HEADER_VERSION</name> 278</type>
+ <type api="vulkan" category="define" requires="VK_HEADER_VERSION">// Complete version of this file
+#define <name>VK_HEADER_VERSION_COMPLETE</name> <type>VK_MAKE_API_VERSION</type>(0, 1, 3, VK_HEADER_VERSION)</type>
+ <type api="vulkansc" category="define">// Version of this file
+#define <name>VK_HEADER_VERSION</name> 14</type>
+ <type api="vulkansc" category="define" requires="VKSC_API_VARIANT">// Complete version of this file
+#define <name>VK_HEADER_VERSION_COMPLETE</name> <type>VK_MAKE_API_VERSION</type>(VKSC_API_VARIANT, 1, 0, VK_HEADER_VERSION)</type>
- <type category="define">
+ <type api="vulkan" category="define">
#define <name>VK_DEFINE_HANDLE</name>(object) typedef struct object##_T* object;</type>
+ <type api="vulkansc" category="define" comment="Extra parenthesis are a MISRA-C requirement that exposes a bug in MSVC">
+#define <name>VK_DEFINE_HANDLE</name>(object) typedef struct object##_T* (object);</type>
<type category="define" name="VK_USE_64_BIT_PTR_DEFINES">
#ifndef VK_USE_64_BIT_PTR_DEFINES
- #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) &amp;&amp; !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) &amp;&amp; !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) || (defined(__riscv) &amp;&amp; __riscv_xlen == 64)
#define VK_USE_64_BIT_PTR_DEFINES 1
#else
#define VK_USE_64_BIT_PTR_DEFINES 0
@@ -185,7 +211,7 @@ branch of the member gitlab server.
#ifndef VK_NULL_HANDLE
#define VK_NULL_HANDLE 0
#endif</type>
- <type category="define" requires="VK_NULL_HANDLE" name="VK_DEFINE_NON_DISPATCHABLE_HANDLE">
+ <type api="vulkan" category="define" requires="VK_NULL_HANDLE" name="VK_DEFINE_NON_DISPATCHABLE_HANDLE">
#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE
#if (VK_USE_64_BIT_PTR_DEFINES==1)
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
@@ -193,15 +219,53 @@ branch of the member gitlab server.
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
#endif
#endif</type>
+ <type api="vulkansc" category="define" requires="VK_NULL_HANDLE" name="VK_DEFINE_NON_DISPATCHABLE_HANDLE" comment="Extra parenthesis are a MISRA-C requirement that exposes a bug in MSVC">
+#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE
+ #if (VK_USE_64_BIT_PTR_DEFINES==1)
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *(object);
+ #else
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t (object);
+ #endif
+#endif</type>
<type category="basetype">struct <name>ANativeWindow</name>;</type>
<type category="basetype">struct <name>AHardwareBuffer</name>;</type>
- <type category="basetype">
-#ifdef __OBJC__
+ <type category="basetype">#ifdef __OBJC__
@class CAMetalLayer;
#else
typedef void <name>CAMetalLayer</name>;
#endif</type>
+ <type category="basetype">#ifdef __OBJC__
+@protocol MTLDevice;
+typedef id&lt;MTLDevice&gt; MTLDevice_id;
+#else
+typedef void* <name>MTLDevice_id</name>;
+#endif</type>
+ <type category="basetype">#ifdef __OBJC__
+@protocol MTLCommandQueue;
+typedef id&lt;MTLCommandQueue&gt; MTLCommandQueue_id;
+#else
+typedef void* <name>MTLCommandQueue_id</name>;
+#endif</type>
+ <type category="basetype">#ifdef __OBJC__
+@protocol MTLBuffer;
+typedef id&lt;MTLBuffer&gt; MTLBuffer_id;
+#else
+typedef void* <name>MTLBuffer_id</name>;
+#endif</type>
+ <type category="basetype">#ifdef __OBJC__
+@protocol MTLTexture;
+typedef id&lt;MTLTexture&gt; MTLTexture_id;
+#else
+typedef void* <name>MTLTexture_id</name>;
+#endif</type>
+ <type category="basetype">#ifdef __OBJC__
+@protocol MTLSharedEvent;
+typedef id&lt;MTLSharedEvent&gt; MTLSharedEvent_id;
+#else
+typedef void* <name>MTLSharedEvent_id</name>;
+#endif</type>
+ <type category="basetype">typedef struct __IOSurface* <name>IOSurfaceRef</name>;</type>
<type category="basetype">typedef <type>uint32_t</type> <name>VkSampleMask</name>;</type>
<type category="basetype">typedef <type>uint32_t</type> <name>VkBool32</name>;</type>
@@ -231,11 +295,13 @@ typedef void <name>CAMetalLayer</name>;
<type category="bitmask">typedef <type>VkFlags</type> <name>VkQueryPoolCreateFlags</name>;</type>
<type requires="VkRenderPassCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkRenderPassCreateFlags</name>;</type>
<type requires="VkSamplerCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkSamplerCreateFlags</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineLayoutCreateFlags</name>;</type>
+ <type requires="VkPipelineLayoutCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineLayoutCreateFlags</name>;</type>
<type requires="VkPipelineCacheCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineCacheCreateFlags</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineDepthStencilStateCreateFlags</name>;</type>
+ <type api="vulkan" requires="VkPipelineDepthStencilStateCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineDepthStencilStateCreateFlags</name>;</type>
+ <type api="vulkansc" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineDepthStencilStateCreateFlags</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineDynamicStateCreateFlags</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineColorBlendStateCreateFlags</name>;</type>
+ <type api="vulkan" requires="VkPipelineColorBlendStateCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineColorBlendStateCreateFlags</name>;</type>
+ <type api="vulkansc" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineColorBlendStateCreateFlags</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineMultisampleStateCreateFlags</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineRasterizationStateCreateFlags</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineViewportStateCreateFlags</name>;</type>
@@ -245,7 +311,7 @@ typedef void <name>CAMetalLayer</name>;
<type requires="VkPipelineShaderStageCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineShaderStageCreateFlags</name>;</type>
<type requires="VkDescriptorSetLayoutCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkDescriptorSetLayoutCreateFlags</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkBufferViewCreateFlags</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkInstanceCreateFlags</name>;</type>
+ <type requires="VkInstanceCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkInstanceCreateFlags</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkDeviceCreateFlags</name>;</type>
<type requires="VkDeviceQueueCreateFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkDeviceQueueCreateFlags</name>;</type>
<type requires="VkQueueFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkQueueFlags</name>;</type>
@@ -272,7 +338,8 @@ typedef void <name>CAMetalLayer</name>;
<type requires="VkCommandBufferResetFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkCommandBufferResetFlags</name>;</type>
<type requires="VkCommandBufferUsageFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkCommandBufferUsageFlags</name>;</type>
<type requires="VkQueryPipelineStatisticFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkQueryPipelineStatisticFlags</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkMemoryMapFlags</name>;</type>
+ <type requires="VkMemoryMapFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkMemoryMapFlags</name>;</type>
+ <type requires="VkMemoryUnmapFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkMemoryUnmapFlagsKHR</name>;</type>
<type requires="VkImageAspectFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkImageAspectFlags</name>;</type>
<type requires="VkSparseMemoryBindFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkSparseMemoryBindFlags</name>;</type>
<type requires="VkSparseImageFormatFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkSparseImageFormatFlags</name>;</type>
@@ -294,11 +361,13 @@ typedef void <name>CAMetalLayer</name>;
<type category="bitmask" name="VkGeometryInstanceFlagsNV" alias="VkGeometryInstanceFlagsKHR"/>
<type requires="VkBuildAccelerationStructureFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkBuildAccelerationStructureFlagsKHR</name>;</type>
<type category="bitmask" name="VkBuildAccelerationStructureFlagsNV" alias="VkBuildAccelerationStructureFlagsKHR"/>
- <type requires="VkPrivateDataSlotCreateFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkPrivateDataSlotCreateFlagsEXT</name>;</type>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkPrivateDataSlotCreateFlags</name>;</type>
+ <type category="bitmask" name="VkPrivateDataSlotCreateFlagsEXT" alias="VkPrivateDataSlotCreateFlags"/>
<type requires="VkAccelerationStructureCreateFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkAccelerationStructureCreateFlagsKHR</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkDescriptorUpdateTemplateCreateFlags</name>;</type>
<type category="bitmask" name="VkDescriptorUpdateTemplateCreateFlagsKHR" alias="VkDescriptorUpdateTemplateCreateFlags"/>
- <type requires="VkPipelineCreationFeedbackFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineCreationFeedbackFlagsEXT</name>;</type>
+ <type requires="VkPipelineCreationFeedbackFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineCreationFeedbackFlags</name>;</type>
+ <type category="bitmask" name="VkPipelineCreationFeedbackFlagsEXT" alias="VkPipelineCreationFeedbackFlags"/>
<type requires="VkPerformanceCounterDescriptionFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkPerformanceCounterDescriptionFlagsKHR</name>;</type>
<type requires="VkAcquireProfilingLockFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkAcquireProfilingLockFlagsKHR</name>;</type>
<type requires="VkSemaphoreWaitFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkSemaphoreWaitFlags</name>;</type>
@@ -306,10 +375,23 @@ typedef void <name>CAMetalLayer</name>;
<type requires="VkPipelineCompilerControlFlagBitsAMD" category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineCompilerControlFlagsAMD</name>;</type>
<type requires="VkShaderCorePropertiesFlagBitsAMD" category="bitmask">typedef <type>VkFlags</type> <name>VkShaderCorePropertiesFlagsAMD</name>;</type>
<type requires="VkDeviceDiagnosticsConfigFlagBitsNV" category="bitmask">typedef <type>VkFlags</type> <name>VkDeviceDiagnosticsConfigFlagsNV</name>;</type>
- <type bitvalues="VkAccessFlagBits2KHR" category="bitmask">typedef <type>VkFlags64</type> <name>VkAccessFlags2KHR</name>;</type>
- <type bitvalues="VkPipelineStageFlagBits2KHR" category="bitmask">typedef <type>VkFlags64</type> <name>VkPipelineStageFlags2KHR</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkAccelerationStructureMotionInfoFlagsNV</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkAccelerationStructureMotionInstanceFlagsNV</name>;</type>
+ <type requires="VkRefreshObjectFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkRefreshObjectFlagsKHR</name>;</type>
+ <type bitvalues="VkAccessFlagBits2" category="bitmask">typedef <type>VkFlags64</type> <name>VkAccessFlags2</name>;</type>
+ <type category="bitmask" name="VkAccessFlags2KHR" alias="VkAccessFlags2"/>
+ <type bitvalues="VkPipelineStageFlagBits2" category="bitmask">typedef <type>VkFlags64</type> <name>VkPipelineStageFlags2</name>;</type>
+ <type category="bitmask" name="VkPipelineStageFlags2KHR" alias="VkPipelineStageFlags2"/>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkAccelerationStructureMotionInfoFlagsNV</name>;</type>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkAccelerationStructureMotionInstanceFlagsNV</name>;</type>
+ <type bitvalues="VkFormatFeatureFlagBits2" category="bitmask">typedef <type>VkFlags64</type> <name>VkFormatFeatureFlags2</name>;</type>
+ <type category="bitmask" name="VkFormatFeatureFlags2KHR" alias="VkFormatFeatureFlags2"/>
+ <type requires="VkRenderingFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkRenderingFlags</name>;</type>
+ <type bitvalues="VkMemoryDecompressionMethodFlagBitsNV" category="bitmask">typedef <type>VkFlags64</type> <name>VkMemoryDecompressionMethodFlagsNV</name>;</type>
+ <type category="bitmask" name="VkRenderingFlagsKHR" alias="VkRenderingFlags"/>
+ <type requires="VkBuildMicromapFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkBuildMicromapFlagsEXT</name>;</type>
+ <type requires="VkMicromapCreateFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkMicromapCreateFlagsEXT</name>;</type>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkDirectDriverLoadingFlagsLUNARG</name>;</type>
+ <type bitvalues="VkPipelineCreateFlagBits2KHR" category="bitmask">typedef <type>VkFlags64</type> <name>VkPipelineCreateFlags2KHR</name>;</type>
+ <type bitvalues="VkBufferUsageFlagBits2KHR" category="bitmask">typedef <type>VkFlags64</type> <name>VkBufferUsageFlags2KHR</name>;</type>
<comment>WSI extensions</comment>
<type requires="VkCompositeAlphaFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkCompositeAlphaFlagsKHR</name>;</type>
@@ -380,40 +462,67 @@ typedef void <name>CAMetalLayer</name>;
<type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineRasterizationStateStreamCreateFlagsEXT</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkPipelineRasterizationDepthClipStateCreateFlagsEXT</name>;</type>
<type requires="VkSwapchainImageUsageFlagBitsANDROID" category="bitmask">typedef <type>VkFlags</type> <name>VkSwapchainImageUsageFlagsANDROID</name>;</type>
- <type requires="VkToolPurposeFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkToolPurposeFlagsEXT</name>;</type>
- <type requires="VkSubmitFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkSubmitFlagsKHR</name>;</type>
+ <type requires="VkToolPurposeFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkToolPurposeFlags</name>;</type>
+ <type category="bitmask" name="VkToolPurposeFlagsEXT" alias="VkToolPurposeFlags"/>
+ <type requires="VkSubmitFlagBits" category="bitmask">typedef <type>VkFlags</type> <name>VkSubmitFlags</name>;</type>
+ <type category="bitmask" name="VkSubmitFlagsKHR" alias="VkSubmitFlags"/>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkImageFormatConstraintsFlagsFUCHSIA</name>;</type>
+ <type requires="VkHostImageCopyFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkHostImageCopyFlagsEXT</name>;</type>
+ <type requires="VkImageConstraintsInfoFlagBitsFUCHSIA" category="bitmask">typedef <type>VkFlags</type> <name>VkImageConstraintsInfoFlagsFUCHSIA</name>;</type>
+ <type requires="VkGraphicsPipelineLibraryFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkGraphicsPipelineLibraryFlagsEXT</name>;</type>
+ <type requires="VkImageCompressionFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkImageCompressionFlagsEXT</name>;</type>
+ <type requires="VkImageCompressionFixedRateFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkImageCompressionFixedRateFlagsEXT</name>;</type>
+ <type requires="VkExportMetalObjectTypeFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkExportMetalObjectTypeFlagsEXT</name>;</type>
+ <type requires="VkDeviceAddressBindingFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkDeviceAddressBindingFlagsEXT</name>;</type>
+ <type requires="VkOpticalFlowGridSizeFlagBitsNV" category="bitmask">typedef <type>VkFlags</type> <name>VkOpticalFlowGridSizeFlagsNV</name>;</type>
+ <type requires="VkOpticalFlowUsageFlagBitsNV" category="bitmask">typedef <type>VkFlags</type> <name>VkOpticalFlowUsageFlagsNV</name>;</type>
+ <type requires="VkOpticalFlowSessionCreateFlagBitsNV" category="bitmask">typedef <type>VkFlags</type> <name>VkOpticalFlowSessionCreateFlagsNV</name>;</type>
+ <type requires="VkOpticalFlowExecuteFlagBitsNV" category="bitmask">typedef <type>VkFlags</type> <name>VkOpticalFlowExecuteFlagsNV</name>;</type>
+ <type requires="VkFrameBoundaryFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkFrameBoundaryFlagsEXT</name>;</type>
+ <type requires="VkPresentScalingFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkPresentScalingFlagsEXT</name>;</type>
+ <type requires="VkPresentGravityFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkPresentGravityFlagsEXT</name>;</type>
+ <type requires="VkShaderCreateFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkShaderCreateFlagsEXT</name>;</type>
+ <type bitvalues="VkPhysicalDeviceSchedulingControlsFlagBitsARM" category="bitmask">typedef <type>VkFlags64</type> <name>VkPhysicalDeviceSchedulingControlsFlagsARM</name>;</type>
<comment>Video Core extension</comment>
<type requires="VkVideoCodecOperationFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoCodecOperationFlagsKHR</name>;</type>
<type requires="VkVideoCapabilityFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoCapabilityFlagsKHR</name>;</type>
<type requires="VkVideoSessionCreateFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoSessionCreateFlagsKHR</name>;</type>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoSessionParametersCreateFlagsKHR</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoBeginCodingFlagsKHR</name>;</type>
<type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEndCodingFlagsKHR</name>;</type>
- <type requires="VkVideoCodingQualityPresetFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoCodingQualityPresetFlagsKHR</name>;</type>
<type requires="VkVideoCodingControlFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoCodingControlFlagsKHR</name>;</type>
<comment>Video Decode Core extension</comment>
- <type requires="VkVideoDecodeFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeFlagsKHR</name>;</type>
+ <type requires="VkVideoDecodeUsageFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeUsageFlagsKHR</name>;</type>
+ <type requires="VkVideoDecodeCapabilityFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeCapabilityFlagsKHR</name>;</type>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeFlagsKHR</name>;</type>
<comment>Video Decode H.264 extension</comment>
- <type requires="VkVideoDecodeH264PictureLayoutFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeH264PictureLayoutFlagsEXT</name>;</type>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeH264CreateFlagsEXT</name>;</type>
-
- <comment>Video Decode H.265 extension</comment>
- <type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeH265CreateFlagsEXT</name>;</type>
+ <type requires="VkVideoDecodeH264PictureLayoutFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoDecodeH264PictureLayoutFlagsKHR</name>;</type>
<comment>Video Encode Core extension</comment>
<type requires="VkVideoEncodeFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeFlagsKHR</name>;</type>
- <type requires="VkVideoEncodeRateControlFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeRateControlFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeUsageFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeUsageFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeContentFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeContentFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeCapabilityFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeCapabilityFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeFeedbackFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeFeedbackFlagsKHR</name>;</type>
+ <type category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeRateControlFlagsKHR</name>;</type>
<type requires="VkVideoEncodeRateControlModeFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeRateControlModeFlagsKHR</name>;</type>
<type requires="VkVideoChromaSubsamplingFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoChromaSubsamplingFlagsKHR</name>;</type>
<type requires="VkVideoComponentBitDepthFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoComponentBitDepthFlagsKHR</name>;</type>
<comment>Video Encode H.264 extension</comment>
- <type requires="VkVideoEncodeH264CapabilityFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264CapabilityFlagsEXT</name>;</type>
- <type requires="VkVideoEncodeH264InputModeFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264InputModeFlagsEXT</name>;</type>
- <type requires="VkVideoEncodeH264OutputModeFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264OutputModeFlagsEXT</name>;</type>
- <type requires="VkVideoEncodeH264CreateFlagBitsEXT" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264CreateFlagsEXT</name>;</type>
+ <type requires="VkVideoEncodeH264CapabilityFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264CapabilityFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeH264StdFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264StdFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeH264RateControlFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH264RateControlFlagsKHR</name>;</type>
+
+ <comment>Video Encode H.265 extension</comment>
+ <type requires="VkVideoEncodeH265CapabilityFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH265CapabilityFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeH265StdFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH265StdFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeH265RateControlFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH265RateControlFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeH265CtbSizeFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH265CtbSizeFlagsKHR</name>;</type>
+ <type requires="VkVideoEncodeH265TransformBlockSizeFlagBitsKHR" category="bitmask">typedef <type>VkFlags</type> <name>VkVideoEncodeH265TransformBlockSizeFlagsKHR</name>;</type>
<comment>Types which can be void pointers or class pointers, selected at compile time</comment>
<type category="handle" objtypeenum="VK_OBJECT_TYPE_INSTANCE"><type>VK_DEFINE_HANDLE</type>(<name>VkInstance</name>)</type>
@@ -450,16 +559,21 @@ typedef void <name>CAMetalLayer</name>;
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkAccelerationStructureKHR</name>)</type>
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkAccelerationStructureNV</name>)</type>
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkPerformanceConfigurationINTEL</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkBufferCollectionFUCHSIA</name>)</type>
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkDeferredOperationKHR</name>)</type>
- <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkPrivateDataSlotEXT</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_PRIVATE_DATA_SLOT"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkPrivateDataSlot</name>)</type>
+ <type category="handle" name="VkPrivateDataSlotEXT" alias="VkPrivateDataSlot"/>
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_CU_MODULE_NVX"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkCuModuleNVX</name>)</type>
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_CU_FUNCTION_NVX"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkCuFunctionNVX</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_OPTICAL_FLOW_SESSION_NV"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkOpticalFlowSessionNV</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_MICROMAP_EXT"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkMicromapEXT</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_SHADER_EXT"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkShaderEXT</name>)</type>
<comment>WSI extensions</comment>
<type category="handle" parent="VkPhysicalDevice" objtypeenum="VK_OBJECT_TYPE_DISPLAY_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkDisplayKHR</name>)</type>
<type category="handle" parent="VkDisplayKHR" objtypeenum="VK_OBJECT_TYPE_DISPLAY_MODE_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkDisplayModeKHR</name>)</type>
<type category="handle" parent="VkInstance" objtypeenum="VK_OBJECT_TYPE_SURFACE_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkSurfaceKHR</name>)</type>
- <type category="handle" parent="VkSurfaceKHR" objtypeenum="VK_OBJECT_TYPE_SWAPCHAIN_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkSwapchainKHR</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_SWAPCHAIN_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkSwapchainKHR</name>)</type>
<type category="handle" parent="VkInstance" objtypeenum="VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkDebugReportCallbackEXT</name>)</type>
<type category="handle" parent="VkInstance" objtypeenum="VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkDebugUtilsMessengerEXT</name>)</type>
@@ -467,6 +581,9 @@ typedef void <name>CAMetalLayer</name>;
<type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_VIDEO_SESSION_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkVideoSessionKHR</name>)</type>
<type category="handle" parent="VkVideoSessionKHR" objtypeenum="VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkVideoSessionParametersKHR</name>)</type>
+ <comment>VK_NV_external_sci_sync2</comment>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_SEMAPHORE_SCI_SYNC_POOL_NV"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkSemaphoreSciSyncPoolNV</name>)</type>
+
<comment>Types generated from corresponding enums tags below</comment>
<type name="VkAttachmentLoadOp" category="enum"/>
<type name="VkAttachmentStoreOp" category="enum"/>
@@ -502,6 +619,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkFormat" category="enum"/>
<type name="VkFormatFeatureFlagBits" category="enum"/>
<type name="VkFrontFace" category="enum"/>
+ <type name="VkMemoryMapFlagBits" category="enum"/>
<type name="VkImageAspectFlagBits" category="enum"/>
<type name="VkImageCreateFlagBits" category="enum"/>
<type name="VkImageLayout" category="enum"/>
@@ -548,12 +666,14 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkEventCreateFlagBits" category="enum"/>
<type name="VkPipelineLayoutCreateFlagBits" category="enum"/>
<type name="VkSemaphoreCreateFlagBits" category="enum"/>
+ <type name="VkRayTracingInvocationReorderModeNV" category="enum"/>
<comment>Extensions</comment>
<type name="VkIndirectCommandsLayoutUsageFlagBitsNV" category="enum"/>
<type name="VkIndirectCommandsTokenTypeNV" category="enum"/>
<type name="VkIndirectStateFlagBitsNV" category="enum"/>
- <type name="VkPrivateDataSlotCreateFlagBitsEXT" category="enum"/>
+ <type name="VkPrivateDataSlotCreateFlagBits" category="enum"/>
+ <type category="enum" name="VkPrivateDataSlotCreateFlagBitsEXT" alias="VkPrivateDataSlotCreateFlagBits"/>
<type name="VkDescriptorUpdateTemplateType" category="enum"/>
<type category="enum" name="VkDescriptorUpdateTemplateTypeKHR" alias="VkDescriptorUpdateTemplateType"/>
<type name="VkViewportCoordinateSwizzleNV" category="enum"/>
@@ -565,8 +685,10 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkCoverageReductionModeNV" category="enum"/>
<type name="VkValidationCacheHeaderVersionEXT" category="enum"/>
<type name="VkShaderInfoTypeAMD" category="enum"/>
- <type name="VkQueueGlobalPriorityEXT" category="enum"/>
- <type name="VkTimeDomainEXT" category="enum"/>
+ <type name="VkQueueGlobalPriorityKHR" category="enum"/>
+ <type name="VkQueueGlobalPriorityEXT" category="enum" alias="VkQueueGlobalPriorityKHR"/>
+ <type name="VkTimeDomainKHR" category="enum"/>
+ <type name="VkTimeDomainEXT" category="enum" alias="VkTimeDomainKHR"/>
<type name="VkConservativeRasterizationModeEXT" category="enum"/>
<type name="VkResolveModeFlagBits" category="enum"/>
<type category="enum" name="VkResolveModeFlagBitsKHR" alias="VkResolveModeFlagBits"/>
@@ -596,10 +718,9 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkAccelerationStructureCompatibilityKHR" category="enum"/>
<type name="VkShaderGroupShaderKHR" category="enum"/>
<type name="VkMemoryOverallocationBehaviorAMD" category="enum"/>
- <type name="VkScopeNV" category="enum"/>
- <type name="VkComponentTypeNV" category="enum"/>
<type name="VkDeviceDiagnosticsConfigFlagBitsNV" category="enum"/>
- <type name="VkPipelineCreationFeedbackFlagBitsEXT" category="enum"/>
+ <type name="VkPipelineCreationFeedbackFlagBits" category="enum"/>
+ <type category="enum" name="VkPipelineCreationFeedbackFlagBitsEXT" alias="VkPipelineCreationFeedbackFlagBits"/>
<type name="VkPerformanceCounterScopeKHR" category="enum"/>
<type name="VkPerformanceCounterUnitKHR" category="enum"/>
<type name="VkPerformanceCounterStorageKHR" category="enum"/>
@@ -612,16 +733,69 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPerformanceOverrideTypeINTEL" category="enum"/>
<type name="VkPerformanceParameterTypeINTEL" category="enum"/>
<type name="VkPerformanceValueTypeINTEL" category="enum"/>
- <type name="VkLineRasterizationModeEXT" category="enum"/>
+ <type name="VkLineRasterizationModeKHR" category="enum"/>
+ <type name="VkLineRasterizationModeEXT" category="enum" alias="VkLineRasterizationModeKHR"/>
<type name="VkShaderModuleCreateFlagBits" category="enum"/>
<type name="VkPipelineCompilerControlFlagBitsAMD" category="enum"/>
<type name="VkShaderCorePropertiesFlagBitsAMD" category="enum"/>
- <type name="VkToolPurposeFlagBitsEXT" category="enum"/>
+ <type name="VkRefreshObjectFlagBitsKHR" category="enum"/>
+ <type name="VkFaultLevel" category="enum"/>
+ <type name="VkFaultType" category="enum"/>
+ <type name="VkFaultQueryBehavior" category="enum"/>
+ <type name="VkPipelineMatchControl" category="enum"/>
+ <type name="VkSciSyncClientTypeNV" category="enum"/>
+ <type name="VkSciSyncPrimitiveTypeNV" category="enum"/>
+ <type name="VkToolPurposeFlagBits" category="enum"/>
+ <type category="enum" name="VkToolPurposeFlagBitsEXT" alias="VkToolPurposeFlagBits"/>
<type name="VkFragmentShadingRateNV" category="enum"/>
<type name="VkFragmentShadingRateTypeNV" category="enum"/>
- <type name="VkAccessFlagBits2KHR" category="enum"/>
- <type name="VkPipelineStageFlagBits2KHR" category="enum"/>
+ <type name="VkSubpassMergeStatusEXT" category="enum"/>
+ <type name="VkAccessFlagBits2" category="enum"/>
+ <type category="enum" name="VkAccessFlagBits2KHR" alias="VkAccessFlagBits2"/>
+ <type name="VkPipelineStageFlagBits2" category="enum"/>
+ <type category="enum" name="VkPipelineStageFlagBits2KHR" alias="VkPipelineStageFlagBits2"/>
<type name="VkProvokingVertexModeEXT" category="enum"/>
+ <type name="VkPipelineCacheValidationVersion" category="enum"/>
+ <type name="VkImageFormatConstraintsFlagBitsFUCHSIA" category="enum"/>
+ <type name="VkHostImageCopyFlagBitsEXT" category="enum"/>
+ <type name="VkImageConstraintsInfoFlagBitsFUCHSIA" category="enum"/>
+ <type name="VkFormatFeatureFlagBits2" category="enum"/>
+ <type category="enum" name="VkFormatFeatureFlagBits2KHR" alias="VkFormatFeatureFlagBits2"/>
+ <type name="VkRenderingFlagBits" category="enum"/>
+ <type category="enum" name="VkRenderingFlagBitsKHR" alias="VkRenderingFlagBits"/>
+ <type name="VkPipelineDepthStencilStateCreateFlagBits" category="enum"/>
+ <type name="VkPipelineColorBlendStateCreateFlagBits" category="enum"/>
+ <type name="VkImageCompressionFlagBitsEXT" category="enum"/>
+ <type name="VkImageCompressionFixedRateFlagBitsEXT" category="enum"/>
+ <type name="VkExportMetalObjectTypeFlagBitsEXT" category="enum"/>
+ <type name="VkPipelineRobustnessBufferBehaviorEXT" category="enum"/>
+ <type name="VkPipelineRobustnessImageBehaviorEXT" category="enum"/>
+ <type name="VkDeviceAddressBindingFlagBitsEXT" category="enum"/>
+ <type name="VkDeviceAddressBindingTypeEXT" category="enum"/>
+ <type name="VkMicromapTypeEXT" category="enum"/>
+ <type name="VkBuildMicromapModeEXT" category="enum"/>
+ <type name="VkCopyMicromapModeEXT" category="enum"/>
+ <type name="VkBuildMicromapFlagBitsEXT" category="enum"/>
+ <type name="VkMicromapCreateFlagBitsEXT" category="enum"/>
+ <type name="VkOpacityMicromapFormatEXT" category="enum"/>
+ <type name="VkOpacityMicromapSpecialIndexEXT" category="enum"/>
+ <type name="VkDeviceFaultVendorBinaryHeaderVersionEXT" category="enum"/>
+ <type name="VkFrameBoundaryFlagBitsEXT" category="enum"/>
+ <type name="VkMemoryDecompressionMethodFlagBitsNV" category="enum"/>
+ <type name="VkDepthBiasRepresentationEXT" category="enum"/>
+ <type name="VkDirectDriverLoadingModeLUNARG" category="enum"/>
+ <type name="VkPipelineCreateFlagBits2KHR" category="enum"/>
+ <type name="VkBufferUsageFlagBits2KHR" category="enum"/>
+ <type name="VkDisplacementMicromapFormatNV" category="enum"/>
+ <type name="VkShaderCreateFlagBitsEXT" category="enum"/>
+ <type name="VkShaderCodeTypeEXT" category="enum"/>
+ <type name="VkScopeKHR" category="enum"/>
+ <type name="VkComponentTypeKHR" category="enum"/>
+ <type category="enum" name="VkScopeNV" alias="VkScopeKHR"/>
+ <type category="enum" name="VkComponentTypeNV" alias="VkComponentTypeKHR"/>
+ <type name="VkCubicFilterWeightsQCOM" category="enum"/>
+ <type name="VkBlockMatchWindowCompareModeQCOM" category="enum"/>
+ <type name="VkLayeredDriverUnderlyingApiMSFT" category="enum"/>
<comment>WSI extensions</comment>
<type name="VkColorSpaceKHR" category="enum"/>
@@ -683,7 +857,23 @@ typedef void <name>CAMetalLayer</name>;
<type category="enum" name="VkShaderFloatControlsIndependenceKHR" alias="VkShaderFloatControlsIndependence"/>
<type name="VkSwapchainImageUsageFlagBitsANDROID" category="enum"/>
<type name="VkFragmentShadingRateCombinerOpKHR" category="enum"/>
- <type name="VkSubmitFlagBitsKHR" category="enum"/>
+ <type name="VkSubmitFlagBits" category="enum"/>
+ <type category="enum" name="VkSubmitFlagBitsKHR" alias="VkSubmitFlagBits"/>
+ <type name="VkGraphicsPipelineLibraryFlagBitsEXT" category="enum"/>
+ <type name="VkOpticalFlowGridSizeFlagBitsNV" category="enum"/>
+ <type name="VkOpticalFlowUsageFlagBitsNV" category="enum"/>
+ <type name="VkOpticalFlowPerformanceLevelNV" category="enum"/>
+ <type name="VkOpticalFlowSessionBindingPointNV" category="enum"/>
+ <type name="VkOpticalFlowSessionCreateFlagBitsNV" category="enum"/>
+ <type name="VkOpticalFlowExecuteFlagBitsNV" category="enum"/>
+ <type name="VkDeviceFaultAddressTypeEXT" category="enum"/>
+ <type name="VkPresentScalingFlagBitsEXT" category="enum"/>
+ <type name="VkPresentGravityFlagBitsEXT" category="enum"/>
+ <type name="VkLayerSettingTypeEXT" category="enum"/>
+ <type name="VkLatencyMarkerNV" category="enum"/>
+ <type name="VkOutOfBandQueueTypeNV" category="enum"/>
+ <type name="VkPhysicalDeviceSchedulingControlsFlagBitsARM" category="enum"/>
+ <type name="VkMemoryUnmapFlagBitsKHR" category="enum"/>
<comment>Enumerated types in the header, but not used by the API</comment>
<type name="VkVendorId" category="enum"/>
@@ -699,28 +889,38 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkVideoComponentBitDepthFlagBitsKHR" category="enum"/>
<type name="VkVideoCapabilityFlagBitsKHR" category="enum"/>
<type name="VkVideoSessionCreateFlagBitsKHR" category="enum"/>
- <type name="VkVideoCodingQualityPresetFlagBitsKHR" category="enum"/>
<type name="VkVideoCodingControlFlagBitsKHR" category="enum"/>
<type name="VkQueryResultStatusKHR" category="enum"/>
<comment>Video Decode extensions</comment>
- <type name="VkVideoDecodeFlagBitsKHR" category="enum"/>
+ <type name="VkVideoDecodeUsageFlagBitsKHR" category="enum"/>
+ <type name="VkVideoDecodeCapabilityFlagBitsKHR" category="enum"/>
<comment>Video H.264 Decode extensions</comment>
- <type name="VkVideoDecodeH264PictureLayoutFlagBitsEXT" category="enum"/>
+ <type name="VkVideoDecodeH264PictureLayoutFlagBitsKHR" category="enum"/>
<comment>Video H.265 Decode extensions</comment>
<comment>Video Encode extensions</comment>
<type name="VkVideoEncodeFlagBitsKHR" category="enum"/>
- <type name="VkVideoEncodeRateControlFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeUsageFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeContentFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeTuningModeKHR" category="enum"/>
+ <type name="VkVideoEncodeCapabilityFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeFeedbackFlagBitsKHR" category="enum"/>
<type name="VkVideoEncodeRateControlModeFlagBitsKHR" category="enum"/>
<comment>Video H.264 Encode extensions</comment>
- <type name="VkVideoEncodeH264CapabilityFlagBitsEXT" category="enum"/>
- <type name="VkVideoEncodeH264InputModeFlagBitsEXT" category="enum"/>
- <type name="VkVideoEncodeH264OutputModeFlagBitsEXT" category="enum"/>
- <type name="VkVideoEncodeH264CreateFlagBitsEXT" category="enum"/>
+ <type name="VkVideoEncodeH264CapabilityFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeH264StdFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeH264RateControlFlagBitsKHR" category="enum"/>
+
+ <comment>Video H.265 Encode extensions</comment>
+ <type name="VkVideoEncodeH265CapabilityFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeH265StdFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeH265RateControlFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeH265CtbSizeFlagBitsKHR" category="enum"/>
+ <type name="VkVideoEncodeH265TransformBlockSizeFlagBitsKHR" category="enum"/>
<comment>The PFN_vk*Function types are used by VkAllocationCallbacks below</comment>
<type category="funcpointer">typedef void (VKAPI_PTR *<name>PFN_vkInternalAllocationNotification</name>)(
@@ -769,11 +969,26 @@ typedef void <name>CAMetalLayer</name>;
const <type>VkDebugUtilsMessengerCallbackDataEXT</type>* pCallbackData,
<type>void</type>* pUserData);</type>
+ <comment>The PFN_vkFaultCallbackFunction type is used by VKSC_VERSION_1_0</comment>
+ <type category="funcpointer">typedef void (VKAPI_PTR *<name>PFN_vkFaultCallbackFunction</name>)(
+ <type>VkBool32</type> unrecordedFaults,
+ <type>uint32_t</type> faultCount,
+ const <type>VkFaultData</type>* pFaults);</type>
+
<comment>The PFN_vkDeviceMemoryReportCallbackEXT type is used by the VK_EXT_device_memory_report extension</comment>
<type category="funcpointer" requires="VkDeviceMemoryReportCallbackDataEXT">typedef void (VKAPI_PTR *<name>PFN_vkDeviceMemoryReportCallbackEXT</name>)(
const <type>VkDeviceMemoryReportCallbackDataEXT</type>* pCallbackData,
<type>void</type>* pUserData);</type>
+ <comment>The PFN_vkGetInstanceProcAddrLUNARG type is used by the
+ VkDirectDriverLoadingInfoLUNARG structure.
+ We cannot introduce an explicit dependency on the
+ equivalent PFN_vkGetInstanceProcAddr type, even though
+ it is implicitly generated in the C header, because
+ that results in multiple definitions.</comment>
+ <type category="funcpointer" requires="VkInstance">typedef PFN_vkVoidFunction (VKAPI_PTR *<name>PFN_vkGetInstanceProcAddrLUNARG</name>)(
+ <type>VkInstance</type> instance, const <type>char</type>* pName);</type>
+
<comment>Struct types</comment>
<type category="struct" name="VkBaseOutStructure">
<member><type>VkStructureType</type> <name>sType</name></member>
@@ -830,20 +1045,20 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="noauto"><type>uint32_t</type> <name>vendorID</name></member>
<member limittype="noauto"><type>uint32_t</type> <name>deviceID</name></member>
<member limittype="noauto"><type>VkPhysicalDeviceType</type> <name>deviceType</name></member>
- <member limittype="noauto"><type>char</type> <name>deviceName</name>[<enum>VK_MAX_PHYSICAL_DEVICE_NAME_SIZE</enum>]</member>
+ <member limittype="noauto" len="null-terminated"><type>char</type> <name>deviceName</name>[<enum>VK_MAX_PHYSICAL_DEVICE_NAME_SIZE</enum>]</member>
<member limittype="noauto"><type>uint8_t</type> <name>pipelineCacheUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
<member limittype="struct"><type>VkPhysicalDeviceLimits</type> <name>limits</name></member>
<member limittype="struct"><type>VkPhysicalDeviceSparseProperties</type> <name>sparseProperties</name></member>
</type>
<type category="struct" name="VkExtensionProperties" returnedonly="true">
- <member><type>char</type> <name>extensionName</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]<comment>extension name</comment></member>
- <member><type>uint32_t</type> <name>specVersion</name><comment>version of the extension specification implemented</comment></member>
+ <member len="null-terminated"><type>char</type> <name>extensionName</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]<comment>extension name</comment></member>
+ <member><type>uint32_t</type> <name>specVersion</name><comment>version of the extension specification implemented</comment></member>
</type>
<type category="struct" name="VkLayerProperties" returnedonly="true">
- <member><type>char</type> <name>layerName</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]<comment>layer name</comment></member>
- <member><type>uint32_t</type> <name>specVersion</name><comment>version of the layer specification implemented</comment></member>
- <member><type>uint32_t</type> <name>implementationVersion</name><comment>build or release version of the layer's library</comment></member>
- <member><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]<comment>Free-form description of the layer</comment></member>
+ <member len="null-terminated"><type>char</type> <name>layerName</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]<comment>layer name</comment></member>
+ <member><type>uint32_t</type> <name>specVersion</name><comment>version of the layer specification implemented</comment></member>
+ <member><type>uint32_t</type> <name>implementationVersion</name><comment>build or release version of the layer's library</comment></member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]<comment>Free-form description of the layer</comment></member>
</type>
<type category="struct" name="VkApplicationInfo">
<member values="VK_STRUCTURE_TYPE_APPLICATION_INFO"><type>VkStructureType</type> <name>sType</name></member>
@@ -876,8 +1091,8 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>VkDeviceCreateFlags</type> <name>flags</name></member>
<member><type>uint32_t</type> <name>queueCreateInfoCount</name></member>
<member len="queueCreateInfoCount">const <type>VkDeviceQueueCreateInfo</type>* <name>pQueueCreateInfos</name></member>
- <member optional="true"><type>uint32_t</type> <name>enabledLayerCount</name></member>
- <member len="enabledLayerCount,null-terminated">const <type>char</type>* const* <name>ppEnabledLayerNames</name><comment>Ordered list of layer names to be enabled</comment></member>
+ <member optional="true" deprecated="ignored"><type>uint32_t</type> <name>enabledLayerCount</name></member>
+ <member len="enabledLayerCount,null-terminated" deprecated="ignored">const <type>char</type>* const* <name>ppEnabledLayerNames</name><comment>Ordered list of layer names to be enabled</comment></member>
<member optional="true"><type>uint32_t</type> <name>enabledExtensionCount</name></member>
<member len="enabledExtensionCount,null-terminated">const <type>char</type>* const* <name>ppEnabledExtensionNames</name></member>
<member optional="true">const <type>VkPhysicalDeviceFeatures</type>* <name>pEnabledFeatures</name></member>
@@ -893,16 +1108,16 @@ typedef void <name>CAMetalLayer</name>;
<member len="enabledExtensionCount,null-terminated">const <type>char</type>* const* <name>ppEnabledExtensionNames</name><comment>Extension names to be enabled</comment></member>
</type>
<type category="struct" name="VkQueueFamilyProperties" returnedonly="true">
- <member optional="true"><type>VkQueueFlags</type> <name>queueFlags</name><comment>Queue flags</comment></member>
- <member><type>uint32_t</type> <name>queueCount</name></member>
- <member><type>uint32_t</type> <name>timestampValidBits</name></member>
- <member><type>VkExtent3D</type> <name>minImageTransferGranularity</name><comment>Minimum alignment requirement for image transfers</comment></member>
+ <member optional="true" limittype="bitmask"><type>VkQueueFlags</type> <name>queueFlags</name><comment>Queue flags</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>queueCount</name></member>
+ <member limittype="bits"><type>uint32_t</type> <name>timestampValidBits</name></member>
+ <member limittype="min,mul"><type>VkExtent3D</type> <name>minImageTransferGranularity</name><comment>Minimum alignment requirement for image transfers</comment></member>
</type>
<type category="struct" name="VkPhysicalDeviceMemoryProperties" returnedonly="true">
- <member><type>uint32_t</type> <name>memoryTypeCount</name></member>
- <member><type>VkMemoryType</type> <name>memoryTypes</name>[<enum>VK_MAX_MEMORY_TYPES</enum>]</member>
- <member><type>uint32_t</type> <name>memoryHeapCount</name></member>
- <member><type>VkMemoryHeap</type> <name>memoryHeaps</name>[<enum>VK_MAX_MEMORY_HEAPS</enum>]</member>
+ <member><type>uint32_t</type> <name>memoryTypeCount</name></member>
+ <member len="memoryTypeCount"><type>VkMemoryType</type> <name>memoryTypes</name>[<enum>VK_MAX_MEMORY_TYPES</enum>]</member>
+ <member><type>uint32_t</type> <name>memoryHeapCount</name></member>
+ <member len="memoryHeapCount"><type>VkMemoryHeap</type> <name>memoryHeaps</name>[<enum>VK_MAX_MEMORY_HEAPS</enum>]</member>
</type>
<type category="struct" name="VkMemoryAllocateInfo">
<member values="VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
@@ -916,9 +1131,9 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>memoryTypeBits</name><comment>Bitmask of the allowed memory type indices into memoryTypes[] for this object</comment></member>
</type>
<type category="struct" name="VkSparseImageFormatProperties" returnedonly="true">
- <member optional="true"><type>VkImageAspectFlags</type> <name>aspectMask</name></member>
- <member><type>VkExtent3D</type> <name>imageGranularity</name></member>
- <member optional="true"><type>VkSparseImageFormatFlags</type> <name>flags</name></member>
+ <member limittype="bitmask" optional="true"><type>VkImageAspectFlags</type> <name>aspectMask</name></member>
+ <member limittype="min,mul"><type>VkExtent3D</type> <name>imageGranularity</name></member>
+ <member limittype="bitmask" optional="true"><type>VkSparseImageFormatFlags</type> <name>flags</name></member>
</type>
<type category="struct" name="VkSparseImageMemoryRequirements" returnedonly="true">
<member><type>VkSparseImageFormatProperties</type> <name>formatProperties</name></member>
@@ -943,9 +1158,9 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkDeviceSize</type> <name>size</name><comment>Size of the range within the memory object</comment></member>
</type>
<type category="struct" name="VkFormatProperties" returnedonly="true">
- <member optional="true"><type>VkFormatFeatureFlags</type> <name>linearTilingFeatures</name><comment>Format features in case of linear tiling</comment></member>
- <member optional="true"><type>VkFormatFeatureFlags</type> <name>optimalTilingFeatures</name><comment>Format features in case of optimal tiling</comment></member>
- <member optional="true"><type>VkFormatFeatureFlags</type> <name>bufferFeatures</name><comment>Format features supported by buffers</comment></member>
+ <member optional="true" limittype="bitmask"><type>VkFormatFeatureFlags</type> <name>linearTilingFeatures</name><comment>Format features in case of linear tiling</comment></member>
+ <member optional="true" limittype="bitmask"><type>VkFormatFeatureFlags</type> <name>optimalTilingFeatures</name><comment>Format features in case of optimal tiling</comment></member>
+ <member optional="true" limittype="bitmask"><type>VkFormatFeatureFlags</type> <name>bufferFeatures</name><comment>Format features supported by buffers</comment></member>
</type>
<type category="struct" name="VkImageFormatProperties" returnedonly="true">
<member><type>VkExtent3D</type> <name>maxExtent</name><comment>max image dimensions for this resource type</comment></member>
@@ -987,12 +1202,17 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>dstArrayElement</name><comment>Array element within the destination binding to copy to</comment></member>
<member><type>uint32_t</type> <name>descriptorCount</name><comment>Number of descriptors to write (determines the size of the array pointed by pDescriptors)</comment></member>
</type>
+ <type category="struct" name="VkBufferUsageFlags2CreateInfoKHR" structextends="VkBufferViewCreateInfo,VkBufferCreateInfo,VkPhysicalDeviceExternalBufferInfo,VkDescriptorBufferBindingInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBufferUsageFlags2KHR</type> <name>usage</name></member>
+ </type>
<type category="struct" name="VkBufferCreateInfo">
<member values="VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkBufferCreateFlags</type> <name>flags</name><comment>Buffer creation flags</comment></member>
<member><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
- <member><type>VkBufferUsageFlags</type> <name>usage</name><comment>Buffer usage flags</comment></member>
+ <member noautovalidity="true"><type>VkBufferUsageFlags</type> <name>usage</name><comment>Buffer usage flags</comment></member>
<member><type>VkSharingMode</type> <name>sharingMode</name></member>
<member optional="true"><type>uint32_t</type> <name>queueFamilyIndexCount</name></member>
<member noautovalidity="true" len="queueFamilyIndexCount">const <type>uint32_t</type>* <name>pQueueFamilyIndices</name></member>
@@ -1000,7 +1220,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkBufferViewCreateInfo">
<member values="VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkBufferViewCreateFlags</type><name>flags</name></member>
+ <member optional="true"><type>VkBufferViewCreateFlags</type> <name>flags</name></member>
<member><type>VkBuffer</type> <name>buffer</name></member>
<member><type>VkFormat</type> <name>format</name><comment>Optionally specifies format of elements</comment></member>
<member><type>VkDeviceSize</type> <name>offset</name><comment>Specified in bytes</comment></member>
@@ -1070,7 +1290,7 @@ typedef void <name>CAMetalLayer</name>;
<member noautovalidity="true" len="queueFamilyIndexCount">const <type>uint32_t</type>* <name>pQueueFamilyIndices</name><comment>Array of queue family indices to share across</comment></member>
<member><type>VkImageLayout</type> <name>initialLayout</name><comment>Initial image layout for all subresources</comment></member>
</type>
- <type category="struct" name="VkSubresourceLayout" returnedonly="true">
+ <type category="struct" name="VkSubresourceLayout">
<member><type>VkDeviceSize</type> <name>offset</name><comment>Specified in bytes</comment></member>
<member><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
<member><type>VkDeviceSize</type> <name>rowPitch</name><comment>Specified in bytes</comment></member>
@@ -1097,7 +1317,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
<member optional="true"><type>VkDeviceMemory</type> <name>memory</name></member>
<member><type>VkDeviceSize</type> <name>memoryOffset</name><comment>Specified in bytes</comment></member>
- <member optional="true"><type>VkSparseMemoryBindFlags</type><name>flags</name></member>
+ <member optional="true"><type>VkSparseMemoryBindFlags</type> <name>flags</name></member>
</type>
<type category="struct" name="VkSparseImageMemoryBind">
<member><type>VkImageSubresource</type> <name>subresource</name></member>
@@ -1105,7 +1325,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkExtent3D</type> <name>extent</name></member>
<member optional="true"><type>VkDeviceMemory</type> <name>memory</name></member>
<member><type>VkDeviceSize</type> <name>memoryOffset</name><comment>Specified in bytes</comment></member>
- <member optional="true"><type>VkSparseMemoryBindFlags</type><name>flags</name></member>
+ <member optional="true"><type>VkSparseMemoryBindFlags</type> <name>flags</name></member>
</type>
<type category="struct" name="VkSparseBufferMemoryBindInfo">
<member><type>VkBuffer</type> <name>buffer</name></member>
@@ -1157,6 +1377,19 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkOffset3D</type> <name>imageOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
<member><type>VkExtent3D</type> <name>imageExtent</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
</type>
+ <type category="struct" name="VkCopyMemoryIndirectCommandNV">
+ <member><type>VkDeviceAddress</type> <name>srcAddress</name></member>
+ <member><type>VkDeviceAddress</type> <name>dstAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
+ </type>
+ <type category="struct" name="VkCopyMemoryToImageIndirectCommandNV">
+ <member><type>VkDeviceAddress</type> <name>srcAddress</name></member>
+ <member><type>uint32_t</type> <name>bufferRowLength</name><comment>Specified in texels</comment></member>
+ <member><type>uint32_t</type> <name>bufferImageHeight</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>imageSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>imageOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ <member><type>VkExtent3D</type> <name>imageExtent</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ </type>
<type category="struct" name="VkImageResolve">
<member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
<member><type>VkOffset3D</type> <name>srcOffset</name></member>
@@ -1164,9 +1397,9 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkOffset3D</type> <name>dstOffset</name></member>
<member><type>VkExtent3D</type> <name>extent</name></member>
</type>
- <type category="struct" name="VkShaderModuleCreateInfo">
+ <type category="struct" name="VkShaderModuleCreateInfo" structextends="VkPipelineShaderStageCreateInfo">
<member values="VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true" optional="true">const <type>void</type>* <name>pNext</name><comment>noautovalidity because this structure can be either an explicit parameter, or passed in a pNext chain</comment></member>
<member optional="true"><type>VkShaderModuleCreateFlags</type> <name>flags</name></member>
<member><type>size_t</type> <name>codeSize</name><comment>Specified in bytes</comment></member>
<member len="latexmath:[\textrm{codeSize} \over 4]" altlen="codeSize / 4">const <type>uint32_t</type>* <name>pCode</name><comment>Binary code of size codeSize</comment></member>
@@ -1194,7 +1427,7 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkDescriptorPoolCreateFlags</type> <name>flags</name></member>
<member><type>uint32_t</type> <name>maxSets</name></member>
- <member><type>uint32_t</type> <name>poolSizeCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>poolSizeCount</name></member>
<member len="poolSizeCount">const <type>VkDescriptorPoolSize</type>* <name>pPoolSizes</name></member>
</type>
<type category="struct" name="VkDescriptorSetAllocateInfo">
@@ -1220,19 +1453,32 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkPipelineShaderStageCreateFlags</type> <name>flags</name></member>
<member><type>VkShaderStageFlagBits</type> <name>stage</name><comment>Shader stage</comment></member>
- <member><type>VkShaderModule</type> <name>module</name><comment>Module containing entry point</comment></member>
- <member len="null-terminated">const <type>char</type>* <name>pName</name><comment>Null-terminated entry point name</comment></member>
+ <member optional="true"><type>VkShaderModule</type> <name>module</name><comment>Module containing entry point</comment></member>
+ <member api="vulkan" len="null-terminated">const <type>char</type>* <name>pName</name><comment>Null-terminated entry point name</comment></member>
+ <member api="vulkansc" optional="true" len="null-terminated">const <type>char</type>* <name>pName</name><comment>Null-terminated entry point name</comment></member>
<member optional="true">const <type>VkSpecializationInfo</type>* <name>pSpecializationInfo</name></member>
</type>
<type category="struct" name="VkComputePipelineCreateInfo">
<member values="VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
+ <member noautovalidity="true" optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
<member><type>VkPipelineShaderStageCreateInfo</type> <name>stage</name></member>
<member><type>VkPipelineLayout</type> <name>layout</name><comment>Interface layout of the pipeline</comment></member>
<member noautovalidity="true" optional="true"><type>VkPipeline</type> <name>basePipelineHandle</name><comment>If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of</comment></member>
<member><type>int32_t</type> <name>basePipelineIndex</name><comment>If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of</comment></member>
</type>
+ <type category="struct" name="VkComputePipelineIndirectBufferInfoNV">
+ <member values="VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_INDIRECT_BUFFER_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceAddress</type> <name>deviceAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name></member>
+ <member><type>VkDeviceAddress</type> <name>pipelineDeviceAddressCaptureReplay</name></member>
+ </type>
+ <type category="struct" name="VkPipelineCreateFlags2CreateInfoKHR" structextends="VkComputePipelineCreateInfo,VkGraphicsPipelineCreateInfo,VkRayTracingPipelineCreateInfoNV,VkRayTracingPipelineCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkPipelineCreateFlags2KHR</type> <name>flags</name></member>
+ </type>
<type category="struct" name="VkVertexInputBindingDescription">
<member><type>uint32_t</type> <name>binding</name><comment>Vertex buffer binding id</comment></member>
<member><type>uint32_t</type> <name>stride</name><comment>Distance between vertices in bytes (0 = no advancement)</comment></member>
@@ -1318,7 +1564,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>logicOpEnable</name></member>
<member noautovalidity="true"><type>VkLogicOp</type> <name>logicOp</name></member>
<member optional="true"><type>uint32_t</type> <name>attachmentCount</name><comment># of pAttachments</comment></member>
- <member len="attachmentCount">const <type>VkPipelineColorBlendAttachmentState</type>* <name>pAttachments</name></member>
+ <member optional="true" len="attachmentCount">const <type>VkPipelineColorBlendAttachmentState</type>* <name>pAttachments</name></member>
<member><type>float</type> <name>blendConstants</name>[4]</member>
</type>
<type category="struct" name="VkPipelineDynamicStateCreateInfo">
@@ -1354,21 +1600,22 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkGraphicsPipelineCreateInfo">
<member values="VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
- <member><type>uint32_t</type> <name>stageCount</name></member>
- <member len="stageCount">const <type>VkPipelineShaderStageCreateInfo</type>* <name>pStages</name><comment>One entry for each active shader stage</comment></member>
+ <member noautovalidity="true" optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
+ <member noautovalidity="true" optional="true"><type>uint32_t</type> <name>stageCount</name></member>
+ <member api="vulkan" noautovalidity="true" len="stageCount" optional="true">const <type>VkPipelineShaderStageCreateInfo</type>* <name>pStages</name><comment>One entry for each active shader stage</comment></member>
+ <member api="vulkansc" noautovalidity="true" len="stageCount">const <type>VkPipelineShaderStageCreateInfo</type>* <name>pStages</name><comment>One entry for each active shader stage</comment></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineVertexInputStateCreateInfo</type>* <name>pVertexInputState</name></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineInputAssemblyStateCreateInfo</type>* <name>pInputAssemblyState</name></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineTessellationStateCreateInfo</type>* <name>pTessellationState</name></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineViewportStateCreateInfo</type>* <name>pViewportState</name></member>
- <member>const <type>VkPipelineRasterizationStateCreateInfo</type>* <name>pRasterizationState</name></member>
+ <member noautovalidity="true" optional="true">const <type>VkPipelineRasterizationStateCreateInfo</type>* <name>pRasterizationState</name></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineMultisampleStateCreateInfo</type>* <name>pMultisampleState</name></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineDepthStencilStateCreateInfo</type>* <name>pDepthStencilState</name></member>
<member noautovalidity="true" optional="true">const <type>VkPipelineColorBlendStateCreateInfo</type>* <name>pColorBlendState</name></member>
<member optional="true">const <type>VkPipelineDynamicStateCreateInfo</type>* <name>pDynamicState</name></member>
- <member><type>VkPipelineLayout</type> <name>layout</name><comment>Interface layout of the pipeline</comment></member>
- <member><type>VkRenderPass</type> <name>renderPass</name></member>
- <member><type>uint32_t</type> <name>subpass</name></member>
+ <member noautovalidity="true" optional="true"><type>VkPipelineLayout</type> <name>layout</name><comment>Interface layout of the pipeline</comment></member>
+ <member noautovalidity="true" optional="true"><type>VkRenderPass</type> <name>renderPass</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>subpass</name></member>
<member noautovalidity="true" optional="true"><type>VkPipeline</type> <name>basePipelineHandle</name><comment>If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of</comment></member>
<member><type>int32_t</type> <name>basePipelineIndex</name><comment>If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of</comment></member>
</type>
@@ -1376,7 +1623,8 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkPipelineCacheCreateFlags</type> <name>flags</name></member>
- <member optional="true"><type>size_t</type> <name>initialDataSize</name><comment>Size of initial data to populate cache, in bytes</comment></member>
+ <member api="vulkan" optional="true"><type>size_t</type> <name>initialDataSize</name><comment>Size of initial data to populate cache, in bytes</comment></member>
+ <member api="vulkansc"><type>size_t</type> <name>initialDataSize</name><comment>Size of initial data to populate cache, in bytes</comment></member>
<member len="initialDataSize">const <type>void</type>* <name>pInitialData</name><comment>Initial data to populate cache</comment></member>
</type>
<type category="struct" name="VkPipelineCacheHeaderVersionOne">
@@ -1387,17 +1635,41 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>deviceID</name></member>
<member><type>uint8_t</type> <name>pipelineCacheUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
</type>
+ <type category="struct" name="VkPipelineCacheStageValidationIndexEntry">
+ <comment>The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout.</comment>
+ <member><type>uint64_t</type> <name>codeSize</name></member>
+ <member><type>uint64_t</type> <name>codeOffset</name></member>
+ </type>
+ <type category="struct" name="VkPipelineCacheSafetyCriticalIndexEntry">
+ <comment>The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout.</comment>
+ <member><type>uint8_t</type> <name>pipelineIdentifier</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member><type>uint64_t</type> <name>pipelineMemorySize</name></member>
+ <member><type>uint64_t</type> <name>jsonSize</name></member>
+ <member><type>uint64_t</type> <name>jsonOffset</name></member>
+ <member><type>uint32_t</type> <name>stageIndexCount</name></member>
+ <member><type>uint32_t</type> <name>stageIndexStride</name></member>
+ <member><type>uint64_t</type> <name>stageIndexOffset</name></member>
+ </type>
+ <type category="struct" name="VkPipelineCacheHeaderVersionSafetyCriticalOne">
+ <comment>The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout.</comment>
+ <member><type>VkPipelineCacheHeaderVersionOne</type> <name>headerVersionOne</name></member>
+ <member><type>VkPipelineCacheValidationVersion</type> <name>validationVersion</name></member>
+ <member><type>uint32_t</type> <name>implementationData</name></member>
+ <member><type>uint32_t</type> <name>pipelineIndexCount</name></member>
+ <member><type>uint32_t</type> <name>pipelineIndexStride</name></member>
+ <member><type>uint64_t</type> <name>pipelineIndexOffset</name></member>
+ </type>
<type category="struct" name="VkPushConstantRange">
<member><type>VkShaderStageFlags</type> <name>stageFlags</name><comment>Which stages use the range</comment></member>
<member><type>uint32_t</type> <name>offset</name><comment>Start of the range, in bytes</comment></member>
<member><type>uint32_t</type> <name>size</name><comment>Size of the range, in bytes</comment></member>
</type>
- <type category="struct" name="VkPipelineLayoutCreateInfo">
+ <type category="struct" name="VkPipelineLayoutCreateInfo" structextends="VkBindDescriptorSetsInfoKHR,VkPushConstantsInfoKHR,VkPushDescriptorSetInfoKHR,VkPushDescriptorSetWithTemplateInfoKHR,VkSetDescriptorBufferOffsetsInfoEXT,VkBindDescriptorBufferEmbeddedSamplersInfoEXT">
<member values="VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkPipelineLayoutCreateFlags</type> <name>flags</name></member>
<member optional="true"><type>uint32_t</type> <name>setLayoutCount</name><comment>Number of descriptor sets interfaced by the pipeline</comment></member>
- <member len="setLayoutCount">const <type>VkDescriptorSetLayout</type>* <name>pSetLayouts</name><comment>Array of setCount number of descriptor set layout objects defining the layout of the</comment></member>
+ <member optional="false,true" len="setLayoutCount">const <type>VkDescriptorSetLayout</type>* <name>pSetLayouts</name><comment>Array of setCount number of descriptor set layout objects defining the layout of the</comment></member>
<member optional="true"><type>uint32_t</type> <name>pushConstantRangeCount</name><comment>Number of push-constant ranges used by the pipeline</comment></member>
<member len="pushConstantRangeCount">const <type>VkPushConstantRange</type>* <name>pPushConstantRanges</name><comment>Array of pushConstantRangeCount number of ranges used by various shader stages</comment></member>
</type>
@@ -1545,7 +1817,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>dualSrcBlend</name><comment>blend operations which take two sources</comment></member>
<member><type>VkBool32</type> <name>logicOp</name><comment>logic operations</comment></member>
<member><type>VkBool32</type> <name>multiDrawIndirect</name><comment>multi draw indirect</comment></member>
- <member><type>VkBool32</type> <name>drawIndirectFirstInstance</name><comment>indirect draws can use non-zero firstInstance</comment></member>
+ <member><type>VkBool32</type> <name>drawIndirectFirstInstance</name><comment>indirect drawing can use non-zero firstInstance</comment></member>
<member><type>VkBool32</type> <name>depthClamp</name><comment>depth clamping</comment></member>
<member><type>VkBool32</type> <name>depthBiasClamp</name><comment>depth bias clamping</comment></member>
<member><type>VkBool32</type> <name>fillModeNonSolid</name><comment>point and wireframe fill modes</comment></member>
@@ -1592,11 +1864,11 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>inheritedQueries</name><comment>Queries may be inherited from primary to secondary command buffers</comment></member>
</type>
<type category="struct" name="VkPhysicalDeviceSparseProperties" returnedonly="true">
- <member limittype="bitmask"><type>VkBool32</type> <name>residencyStandard2DBlockShape</name><comment>Sparse resources support: GPU will access all 2D (single sample) sparse resources using the standard sparse image block shapes (based on pixel format)</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>residencyStandard2DMultisampleBlockShape</name><comment>Sparse resources support: GPU will access all 2D (multisample) sparse resources using the standard sparse image block shapes (based on pixel format)</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>residencyStandard3DBlockShape</name><comment>Sparse resources support: GPU will access all 3D sparse resources using the standard sparse image block shapes (based on pixel format)</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>residencyAlignedMipSize</name><comment>Sparse resources support: Images with mip level dimensions that are NOT a multiple of the sparse image block dimensions will be placed in the mip tail</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>residencyNonResidentStrict</name><comment>Sparse resources support: GPU can consistently access non-resident regions of a resource, all reads return as if data is 0, writes are discarded</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>residencyStandard2DBlockShape</name><comment>Sparse resources support: GPU will access all 2D (single sample) sparse resources using the standard sparse image block shapes (based on pixel format)</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>residencyStandard2DMultisampleBlockShape</name><comment>Sparse resources support: GPU will access all 2D (multisample) sparse resources using the standard sparse image block shapes (based on pixel format)</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>residencyStandard3DBlockShape</name><comment>Sparse resources support: GPU will access all 3D sparse resources using the standard sparse image block shapes (based on pixel format)</comment></member>
+ <member limittype="not"><type>VkBool32</type> <name>residencyAlignedMipSize</name><comment>Sparse resources support: Images with mip level dimensions that are NOT a multiple of the sparse image block dimensions will be placed in the mip tail</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>residencyNonResidentStrict</name><comment>Sparse resources support: GPU can consistently access non-resident regions of a resource, all reads return as if data is 0, writes are discarded</comment></member>
</type>
<type category="struct" name="VkPhysicalDeviceLimits" returnedonly="true">
<comment>resource maximum sizes</comment>
@@ -1612,7 +1884,7 @@ typedef void <name>CAMetalLayer</name>;
<comment>memory limits</comment>
<member limittype="max"><type>uint32_t</type> <name>maxMemoryAllocationCount</name><comment>max number of device memory allocations supported</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxSamplerAllocationCount</name><comment>max number of samplers that can be allocated on a device</comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>bufferImageGranularity</name><comment>Granularity (in bytes) at which buffers and images can be bound to adjacent memory for simultaneous usage</comment></member>
+ <member limittype="min,mul"><type>VkDeviceSize</type> <name>bufferImageGranularity</name><comment>Granularity (in bytes) at which buffers and images can be bound to adjacent memory for simultaneous usage</comment></member>
<member limittype="max"><type>VkDeviceSize</type> <name>sparseAddressSpaceSize</name><comment>Total address space available for sparse allocations (bytes)</comment></member>
<comment>descriptor set limits</comment>
<member limittype="max"><type>uint32_t</type> <name>maxBoundDescriptorSets</name><comment>max number of descriptors sets that can be bound to a pipeline</comment></member>
@@ -1663,28 +1935,28 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>uint32_t</type> <name>maxComputeWorkGroupCount</name>[3]<comment>max num of compute work groups that may be dispatched by a single command (x,y,z)</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxComputeWorkGroupInvocations</name><comment>max total compute invocations in a single local work group</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxComputeWorkGroupSize</name>[3]<comment>max local size of a compute work group (x,y,z)</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>subPixelPrecisionBits</name><comment>number bits of subpixel precision in screen x and y</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>subTexelPrecisionBits</name><comment>number bits of precision for selecting texel weights</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>mipmapPrecisionBits</name><comment>number bits of precision for selecting mipmap weights</comment></member>
+ <member limittype="bits"><type>uint32_t</type> <name>subPixelPrecisionBits</name><comment>number bits of subpixel precision in screen x and y</comment></member>
+ <member limittype="bits"><type>uint32_t</type> <name>subTexelPrecisionBits</name><comment>number bits of precision for selecting texel weights</comment></member>
+ <member limittype="bits"><type>uint32_t</type> <name>mipmapPrecisionBits</name><comment>number bits of precision for selecting mipmap weights</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxDrawIndexedIndexValue</name><comment>max index value for indexed draw calls (for 32-bit indices)</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDrawIndirectCount</name><comment>max draw count for indirect draw calls</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDrawIndirectCount</name><comment>max draw count for indirect drawing calls</comment></member>
<member limittype="max"><type>float</type> <name>maxSamplerLodBias</name><comment>max absolute sampler LOD bias</comment></member>
<member limittype="max"><type>float</type> <name>maxSamplerAnisotropy</name><comment>max degree of sampler anisotropy</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxViewports</name><comment>max number of active viewports</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxViewportDimensions</name>[2]<comment>max viewport dimensions (x,y)</comment></member>
- <member limittype="range"><type>float</type> <name>viewportBoundsRange</name>[2]<comment>viewport bounds range (min,max)</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>viewportSubPixelBits</name><comment>number bits of subpixel precision for viewport</comment></member>
- <member limittype="noauto"><type>size_t</type> <name>minMemoryMapAlignment</name><comment>min required alignment of pointers returned by MapMemory (bytes)</comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>minTexelBufferOffsetAlignment</name><comment>min required alignment for texel buffer offsets (bytes) </comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>minUniformBufferOffsetAlignment</name><comment>min required alignment for uniform buffer sizes and offsets (bytes)</comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>minStorageBufferOffsetAlignment</name><comment>min required alignment for storage buffer offsets (bytes)</comment></member>
+ <member limittype="range"><type>float</type> <name>viewportBoundsRange</name>[2]<comment>viewport bounds range (min,max)</comment></member>
+ <member limittype="bits"><type>uint32_t</type> <name>viewportSubPixelBits</name><comment>number bits of subpixel precision for viewport</comment></member>
+ <member limittype="min,pot"><type>size_t</type> <name>minMemoryMapAlignment</name><comment>min required alignment of pointers returned by MapMemory (bytes)</comment></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>minTexelBufferOffsetAlignment</name><comment>min required alignment for texel buffer offsets (bytes) </comment></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>minUniformBufferOffsetAlignment</name><comment>min required alignment for uniform buffer sizes and offsets (bytes)</comment></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>minStorageBufferOffsetAlignment</name><comment>min required alignment for storage buffer offsets (bytes)</comment></member>
<member limittype="min"><type>int32_t</type> <name>minTexelOffset</name><comment>min texel offset for OpTextureSampleOffset</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxTexelOffset</name><comment>max texel offset for OpTextureSampleOffset</comment></member>
<member limittype="min"><type>int32_t</type> <name>minTexelGatherOffset</name><comment>min texel offset for OpTextureGatherOffset</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxTexelGatherOffset</name><comment>max texel offset for OpTextureGatherOffset</comment></member>
<member limittype="min"><type>float</type> <name>minInterpolationOffset</name><comment>furthest negative offset for interpolateAtOffset</comment></member>
<member limittype="max"><type>float</type> <name>maxInterpolationOffset</name><comment>furthest positive offset for interpolateAtOffset</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>subPixelInterpolationOffsetBits</name><comment>number of subpixel bits for interpolateAtOffset</comment></member>
+ <member limittype="bits"><type>uint32_t</type> <name>subPixelInterpolationOffsetBits</name><comment>number of subpixel bits for interpolateAtOffset</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxFramebufferWidth</name><comment>max width for a framebuffer</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxFramebufferHeight</name><comment>max height for a framebuffer</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxFramebufferLayers</name><comment>max layer count for a layered framebuffer</comment></member>
@@ -1692,28 +1964,28 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>framebufferDepthSampleCounts</name><comment>supported depth sample counts for a framebuffer</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>framebufferStencilSampleCounts</name><comment>supported stencil sample counts for a framebuffer</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>framebufferNoAttachmentsSampleCounts</name><comment>supported sample counts for a subpass which uses no attachments</comment></member>
- <member limittype="bitmask"><type>uint32_t</type> <name>maxColorAttachments</name><comment>max number of color attachments per subpass</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxColorAttachments</name><comment>max number of color attachments per subpass</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>sampledImageColorSampleCounts</name><comment>supported color sample counts for a non-integer sampled image</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>sampledImageIntegerSampleCounts</name><comment>supported sample counts for an integer image</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>sampledImageDepthSampleCounts</name><comment>supported depth sample counts for a sampled image</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>sampledImageStencilSampleCounts</name><comment>supported stencil sample counts for a sampled image</comment></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>storageImageSampleCounts</name><comment>supported sample counts for a storage image</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxSampleMaskWords</name><comment>max number of sample mask words</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>timestampComputeAndGraphics</name><comment>timestamps on graphics and compute queues</comment></member>
- <member limittype="noauto"><type>float</type> <name>timestampPeriod</name><comment>number of nanoseconds it takes for timestamp query value to increment by 1</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>timestampComputeAndGraphics</name><comment>timestamps on graphics and compute queues</comment></member>
+ <member limittype="min,mul"><type>float</type> <name>timestampPeriod</name><comment>number of nanoseconds it takes for timestamp query value to increment by 1</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxClipDistances</name><comment>max number of clip distances</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxCullDistances</name><comment>max number of cull distances</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxCombinedClipAndCullDistances</name><comment>max combined number of user clipping</comment></member>
<member limittype="max"><type>uint32_t</type> <name>discreteQueuePriorities</name><comment>distinct queue priorities available </comment></member>
- <member limittype="range"><type>float</type> <name>pointSizeRange</name>[2]<comment>range (min,max) of supported point sizes</comment></member>
- <member limittype="range"><type>float</type> <name>lineWidthRange</name>[2]<comment>range (min,max) of supported line widths</comment></member>
- <member limittype="max"><type>float</type> <name>pointSizeGranularity</name><comment>granularity of supported point sizes</comment></member>
- <member limittype="max"><type>float</type> <name>lineWidthGranularity</name><comment>granularity of supported line widths</comment></member>
- <member limittype="noauto"><type>VkBool32</type> <name>strictLines</name><comment>line rasterization follows preferred rules</comment></member>
- <member limittype="noauto"><type>VkBool32</type> <name>standardSampleLocations</name><comment>supports standard sample locations for all supported sample counts</comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>optimalBufferCopyOffsetAlignment</name><comment>optimal offset of buffer copies</comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>optimalBufferCopyRowPitchAlignment</name><comment>optimal pitch of buffer copies</comment></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>nonCoherentAtomSize</name><comment>minimum size and alignment for non-coherent host-mapped device memory access</comment></member>
+ <member limittype="range"><type>float</type> <name>pointSizeRange</name>[2]<comment>range (min,max) of supported point sizes</comment></member>
+ <member limittype="range"><type>float</type> <name>lineWidthRange</name>[2]<comment>range (min,max) of supported line widths</comment></member>
+ <member limittype="min,mul"><type>float</type> <name>pointSizeGranularity</name><comment>granularity of supported point sizes</comment></member>
+ <member limittype="min,mul"><type>float</type> <name>lineWidthGranularity</name><comment>granularity of supported line widths</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>strictLines</name><comment>line rasterization follows preferred rules</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>standardSampleLocations</name><comment>supports standard sample locations for all supported sample counts</comment></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>optimalBufferCopyOffsetAlignment</name><comment>optimal offset of buffer copies</comment></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>optimalBufferCopyRowPitchAlignment</name><comment>optimal pitch of buffer copies</comment></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>nonCoherentAtomSize</name><comment>minimum size and alignment for non-coherent host-mapped device memory access</comment></member>
</type>
<type category="struct" name="VkSemaphoreCreateInfo">
<member values="VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
@@ -1732,7 +2004,7 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkFramebufferCreateFlags</type> <name>flags</name></member>
- <member><type>VkRenderPass</type> <name>renderPass</name></member>
+ <member><type>VkRenderPass</type> <name>renderPass</name></member>
<member optional="true"><type>uint32_t</type> <name>attachmentCount</name></member>
<member noautovalidity="true" len="attachmentCount">const <type>VkImageView</type>* <name>pAttachments</name></member>
<member><type>uint32_t</type> <name>width</name></member>
@@ -1771,7 +2043,7 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>uint32_t</type> <name>waitSemaphoreCount</name></member>
<member len="waitSemaphoreCount">const <type>VkSemaphore</type>* <name>pWaitSemaphores</name></member>
- <member len="waitSemaphoreCount">const <type>VkPipelineStageFlags</type>* <name>pWaitDstStageMask</name></member>
+ <member optional="false,true" len="waitSemaphoreCount">const <type>VkPipelineStageFlags</type>* <name>pWaitDstStageMask</name></member>
<member optional="true"><type>uint32_t</type> <name>commandBufferCount</name></member>
<member len="commandBufferCount">const <type>VkCommandBuffer</type>* <name>pCommandBuffers</name></member>
<member optional="true"><type>uint32_t</type> <name>signalSemaphoreCount</name></member>
@@ -1935,7 +2207,8 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkCompositeAlphaFlagBitsKHR</type> <name>compositeAlpha</name><comment>The alpha blending mode used when compositing this surface with other surfaces in the window system</comment></member>
<member><type>VkPresentModeKHR</type> <name>presentMode</name><comment>Which presentation mode to use for presents on this swap chain</comment></member>
<member><type>VkBool32</type> <name>clipped</name><comment>Specifies whether presentable images may be affected by window clip regions</comment></member>
- <member optional="true"><type>VkSwapchainKHR</type> <name>oldSwapchain</name><comment>Existing swap chain to replace, if any</comment></member>
+ <member api="vulkan" optional="true"><type>VkSwapchainKHR</type> <name>oldSwapchain</name><comment>Existing swap chain to replace, if any</comment></member>
+ <member api="vulkansc" noautovalidity="true" optional="true"><type>VkSwapchainKHR</type> <name>oldSwapchain</name><comment>Existing swap chain to replace, if any</comment></member>
</type>
<type category="struct" name="VkPresentInfoKHR">
<member values="VK_STRUCTURE_TYPE_PRESENT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
@@ -1968,6 +2241,27 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>uint32_t</type> <name>disabledValidationFeatureCount</name><comment>Number of validation features to disable</comment></member>
<member len="disabledValidationFeatureCount">const <type>VkValidationFeatureDisableEXT</type>* <name>pDisabledValidationFeatures</name><comment>Validation features to disable</comment></member>
</type>
+ <type category="struct" name="VkLayerSettingsCreateInfoEXT" allowduplicate="true" structextends="VkInstanceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name><comment>Must be VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT</comment></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>settingCount</name><comment>Number of settings to configure</comment></member>
+ <member len="settingCount">const <type>VkLayerSettingEXT</type>* <name>pSettings</name><comment>Validation features to enable</comment></member>
+ </type>
+ <type category="struct" name="VkLayerSettingEXT">
+ <member len="null-terminated">const <type>char</type>* <name>pLayerName</name></member>
+ <member len="null-terminated">const <type>char</type>* <name>pSettingName</name></member>
+ <member><type>VkLayerSettingTypeEXT</type> <name>type</name><comment>The type of the object</comment></member>
+ <member optional="true"><type>uint32_t</type> <name>valueCount</name><comment>Number of values of the setting</comment></member>
+ <member len="valueCount">const <type>void</type>* <name>pValues</name><comment>Values to pass for a setting</comment></member>
+ </type>
+ <type category="struct" name="VkApplicationParametersEXT" allowduplicate="true" structextends="VkApplicationInfo,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_APPLICATION_PARAMETERS_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>vendorID</name></member>
+ <member optional="true"><type>uint32_t</type> <name>deviceID</name></member>
+ <member><type>uint32_t</type> <name>key</name></member>
+ <member><type>uint64_t</type> <name>value</name></member>
+ </type>
<type category="struct" name="VkPipelineRasterizationStateRasterizationOrderAMD" structextends="VkPipelineRasterizationStateCreateInfo">
<member values="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
@@ -2039,7 +2333,36 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>SECURITY_ATTRIBUTES</type>* <name>pAttributes</name></member>
<member optional="true"><type>DWORD</type> <name>dwAccess</name></member>
</type>
- <type category="struct" name="VkWin32KeyedMutexAcquireReleaseInfoNV" structextends="VkSubmitInfo,VkSubmitInfo2KHR">
+ <type category="struct" name="VkExportMemorySciBufInfoNV" structextends="VkMemoryAllocateInfo">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_MEMORY_SCI_BUF_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>NvSciBufAttrList</type> <name>pAttributes</name></member>
+ </type>
+ <type category="struct" name="VkImportMemorySciBufInfoNV" structextends="VkMemoryAllocateInfo">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_MEMORY_SCI_BUF_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></member>
+ <member><type>NvSciBufObj</type> <name>handle</name></member>
+ </type>
+ <type category="struct" name="VkMemoryGetSciBufInfoNV">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_GET_SCI_BUF_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceMemory</type> <name>memory</name></member>
+ <member><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></member>
+ </type>
+ <type category="struct" name="VkMemorySciBufPropertiesNV">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_SCI_BUF_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>memoryTypeBits</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalMemorySciBufFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCI_BUF_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>sciBufImport</name></member>
+ <member><type>VkBool32</type> <name>sciBufExport</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalSciBufFeaturesNV" alias="VkPhysicalDeviceExternalMemorySciBufFeaturesNV"/>
+ <type category="struct" name="VkWin32KeyedMutexAcquireReleaseInfoNV" structextends="VkSubmitInfo,VkSubmitInfo2">
<member values="VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>uint32_t</type> <name>acquireCount</name></member>
@@ -2051,25 +2374,35 @@ typedef void <name>CAMetalLayer</name>;
<member len="releaseCount">const <type>uint64_t</type>* <name>pReleaseKeys</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>deviceGeneratedCommands</name></member>
</type>
- <type category="struct" name="VkDevicePrivateDataCreateInfoEXT" allowduplicate="true" structextends="VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>deviceGeneratedCompute</name></member>
+ <member><type>VkBool32</type> <name>deviceGeneratedComputePipelines</name></member>
+ <member><type>VkBool32</type> <name>deviceGeneratedComputeCaptureReplay</name></member>
+ </type>
+ <type category="struct" name="VkDevicePrivateDataCreateInfo" allowduplicate="true" structextends="VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>privateDataSlotRequestCount</name></member>
</type>
- <type category="struct" name="VkPrivateDataSlotCreateInfoEXT">
- <member values="VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkDevicePrivateDataCreateInfoEXT" alias="VkDevicePrivateDataCreateInfo"/>
+ <type category="struct" name="VkPrivateDataSlotCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkPrivateDataSlotCreateFlagsEXT</type> <name>flags</name></member>
+ <member><type>VkPrivateDataSlotCreateFlags</type> <name>flags</name></member>
</type>
- <type category="struct" name="VkPhysicalDevicePrivateDataFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPrivateDataSlotCreateInfoEXT" alias="VkPrivateDataSlotCreateInfo"/>
+ <type category="struct" name="VkPhysicalDevicePrivateDataFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>privateData</name></member>
</type>
+ <type category="struct" name="VkPhysicalDevicePrivateDataFeaturesEXT" alias="VkPhysicalDevicePrivateDataFeatures"/>
<type category="struct" name="VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -2079,9 +2412,9 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>uint32_t</type> <name>maxIndirectCommandsStreamCount</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxIndirectCommandsTokenOffset</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxIndirectCommandsStreamStride</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>minSequencesCountBufferOffsetAlignment</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>minSequencesIndexBufferOffsetAlignment</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>minIndirectCommandsBufferOffsetAlignment</name></member>
+ <member limittype="min"><type>uint32_t</type> <name>minSequencesCountBufferOffsetAlignment</name></member>
+ <member limittype="min"><type>uint32_t</type> <name>minSequencesIndexBufferOffsetAlignment</name></member>
+ <member limittype="min"><type>uint32_t</type> <name>minIndirectCommandsBufferOffsetAlignment</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceMultiDrawPropertiesEXT" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
@@ -2098,8 +2431,8 @@ typedef void <name>CAMetalLayer</name>;
</type>
<type category="struct" name="VkGraphicsPipelineShaderGroupsCreateInfoNV" structextends="VkGraphicsPipelineCreateInfo">
<member values="VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>groupCount</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>groupCount</name></member>
<member len="groupCount">const <type>VkGraphicsShaderGroupCreateInfoNV</type>* <name>pGroups</name></member>
<member optional="true"><type>uint32_t</type> <name>pipelineCount</name></member>
<member len="pipelineCount">const <type>VkPipeline</type>* <name>pPipelines</name></member>
@@ -2172,10 +2505,19 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></member>
- <member><type>VkPipeline</type> <name>pipeline</name></member>
+ <member optional="true"><type>VkPipeline</type> <name>pipeline</name></member>
<member><type>VkIndirectCommandsLayoutNV</type> <name>indirectCommandsLayout</name></member>
<member><type>uint32_t</type> <name>maxSequencesCount</name></member>
</type>
+ <type category="struct" name="VkPipelineIndirectDeviceAddressInfoNV">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_INDIRECT_DEVICE_ADDRESS_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></member>
+ <member><type>VkPipeline</type> <name>pipeline</name></member>
+ </type>
+ <type category="struct" name="VkBindPipelineIndirectCommandNV">
+ <member><type>VkDeviceAddress</type> <name>pipelineAddress</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceFeatures2" structextends="VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -2213,7 +2555,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkQueueFamilyProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkQueueFamilyProperties</type> <name>queueFamilyProperties</name></member>
+ <member limittype="struct"><type>VkQueueFamilyProperties</type> <name>queueFamilyProperties</name></member>
</type>
<type category="struct" name="VkQueueFamilyProperties2KHR" alias="VkQueueFamilyProperties2"/>
<type category="struct" name="VkPhysicalDeviceMemoryProperties2" returnedonly="true">
@@ -2225,7 +2567,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkSparseImageFormatProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkSparseImageFormatProperties</type> <name>properties</name></member>
+ <member limittype="struct"><type>VkSparseImageFormatProperties</type> <name>properties</name></member>
</type>
<type category="struct" name="VkSparseImageFormatProperties2KHR" alias="VkSparseImageFormatProperties2"/>
<type category="struct" name="VkPhysicalDeviceSparseImageFormatInfo2">
@@ -2253,10 +2595,10 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceDriverProperties" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkDriverId</type> <name>driverID</name></member>
- <member limittype="noauto"><type>char</type> <name>driverName</name>[<enum>VK_MAX_DRIVER_NAME_SIZE</enum>]</member>
- <member limittype="noauto"><type>char</type> <name>driverInfo</name>[<enum>VK_MAX_DRIVER_INFO_SIZE</enum>]</member>
- <member limittype="noauto"><type>VkConformanceVersion</type> <name>conformanceVersion</name></member>
+ <member limittype="exact"><type>VkDriverId</type> <name>driverID</name></member>
+ <member limittype="exact" len="null-terminated"><type>char</type> <name>driverName</name>[<enum>VK_MAX_DRIVER_NAME_SIZE</enum>]</member>
+ <member limittype="exact" len="null-terminated"><type>char</type> <name>driverInfo</name>[<enum>VK_MAX_DRIVER_INFO_SIZE</enum>]</member>
+ <member limittype="exact"><type>VkConformanceVersion</type> <name>conformanceVersion</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceDriverPropertiesKHR" alias="VkPhysicalDeviceDriverProperties"/>
<type category="struct" name="VkPresentRegionsKHR" structextends="VkPresentInfoKHR">
@@ -2305,7 +2647,7 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkBufferCreateFlags</type> <name>flags</name></member>
- <member><type>VkBufferUsageFlags</type> <name>usage</name></member>
+ <member optional="true" noautovalidity="true"><type>VkBufferUsageFlags</type> <name>usage</name></member>
<member><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceExternalBufferInfoKHR" alias="VkPhysicalDeviceExternalBufferInfo"/>
@@ -2318,11 +2660,11 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceIDProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint8_t</type> <name>deviceUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
- <member limittype="noauto"><type>uint8_t</type> <name>driverUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
- <member limittype="noauto"><type>uint8_t</type> <name>deviceLUID</name>[<enum>VK_LUID_SIZE</enum>]</member>
- <member limittype="noauto"><type>uint32_t</type> <name>deviceNodeMask</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>deviceLUIDValid</name></member>
+ <member limittype="noauto"><type>uint8_t</type> <name>deviceUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member limittype="noauto"><type>uint8_t</type> <name>driverUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member limittype="noauto"><type>uint8_t</type> <name>deviceLUID</name>[<enum>VK_LUID_SIZE</enum>]</member>
+ <member limittype="noauto"><type>uint32_t</type> <name>deviceNodeMask</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>deviceLUIDValid</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceIDPropertiesKHR" alias="VkPhysicalDeviceIDProperties"/>
<type category="struct" name="VkExternalMemoryImageCreateInfo" structextends="VkImageCreateInfo">
@@ -2402,7 +2744,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkDeviceMemory</type> <name>memory</name></member>
<member><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></member>
</type>
- <type category="struct" name="VkWin32KeyedMutexAcquireReleaseInfoKHR" structextends="VkSubmitInfo,VkSubmitInfo2KHR">
+ <type category="struct" name="VkWin32KeyedMutexAcquireReleaseInfoKHR" structextends="VkSubmitInfo,VkSubmitInfo2">
<member values="VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>uint32_t</type> <name>acquireCount</name></member>
@@ -2547,6 +2889,80 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkFence</type> <name>fence</name></member>
<member><type>VkExternalFenceHandleTypeFlagBits</type> <name>handleType</name></member>
</type>
+ <type category="struct" name="VkExportFenceSciSyncInfoNV" structextends="VkFenceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_FENCE_SCI_SYNC_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>NvSciSyncAttrList</type> <name>pAttributes</name></member>
+ </type>
+ <type category="struct" name="VkImportFenceSciSyncInfoNV">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_FENCE_SCI_SYNC_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member externsync="true"><type>VkFence</type> <name>fence</name></member>
+ <member><type>VkExternalFenceHandleTypeFlagBits</type> <name>handleType</name></member>
+ <member><type>void</type>* <name>handle</name></member>
+ </type>
+ <type category="struct" name="VkFenceGetSciSyncInfoNV">
+ <member values="VK_STRUCTURE_TYPE_FENCE_GET_SCI_SYNC_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkFence</type> <name>fence</name></member>
+ <member><type>VkExternalFenceHandleTypeFlagBits</type> <name>handleType</name></member>
+ </type>
+ <type category="struct" name="VkExportSemaphoreSciSyncInfoNV" structextends="VkSemaphoreCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_SCI_SYNC_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>NvSciSyncAttrList</type> <name>pAttributes</name></member>
+ </type>
+ <type category="struct" name="VkImportSemaphoreSciSyncInfoNV">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_SCI_SYNC_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member externsync="true"><type>VkSemaphore</type> <name>semaphore</name></member>
+ <member><type>VkExternalSemaphoreHandleTypeFlagBits</type> <name>handleType</name></member>
+ <member><type>void</type>* <name>handle</name></member>
+ </type>
+ <type category="struct" name="VkSemaphoreGetSciSyncInfoNV">
+ <member values="VK_STRUCTURE_TYPE_SEMAPHORE_GET_SCI_SYNC_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkSemaphore</type> <name>semaphore</name></member>
+ <member><type>VkExternalSemaphoreHandleTypeFlagBits</type> <name>handleType</name></member>
+ </type>
+ <type category="struct" name="VkSciSyncAttributesInfoNV">
+ <member values="VK_STRUCTURE_TYPE_SCI_SYNC_ATTRIBUTES_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkSciSyncClientTypeNV</type> <name>clientType</name></member>
+ <member><type>VkSciSyncPrimitiveTypeNV</type> <name>primitiveType</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalSciSyncFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SCI_SYNC_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>sciSyncFence</name></member>
+ <member><type>VkBool32</type> <name>sciSyncSemaphore</name></member>
+ <member><type>VkBool32</type> <name>sciSyncImport</name></member>
+ <member><type>VkBool32</type> <name>sciSyncExport</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalSciSync2FeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SCI_SYNC_2_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>sciSyncFence</name></member>
+ <member><type>VkBool32</type> <name>sciSyncSemaphore2</name></member>
+ <member><type>VkBool32</type> <name>sciSyncImport</name></member>
+ <member><type>VkBool32</type> <name>sciSyncExport</name></member>
+ </type>
+ <type category="struct" name="VkSemaphoreSciSyncPoolCreateInfoNV">
+ <member values="VK_STRUCTURE_TYPE_SEMAPHORE_SCI_SYNC_POOL_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>NvSciSyncObj</type> <name>handle</name></member>
+ </type>
+ <type category="struct" name="VkSemaphoreSciSyncCreateInfoNV" structextends="VkSemaphoreCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_SEMAPHORE_SCI_SYNC_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkSemaphoreSciSyncPoolNV</type> <name>semaphorePool</name></member>
+ <member>const <type>NvSciSyncFence</type>* <name>pFence</name></member>
+ </type>
+ <type category="struct" name="VkDeviceSemaphoreSciSyncPoolReservationCreateInfoNV" allowduplicate="true" structextends="VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_SEMAPHORE_SCI_SYNC_POOL_RESERVATION_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>semaphoreSciSyncPoolRequestCount</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceMultiviewFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -2612,7 +3028,7 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>physicalDeviceCount</name></member>
- <member><type>VkPhysicalDevice</type> <name>physicalDevices</name>[<enum>VK_MAX_DEVICE_GROUP_SIZE</enum>]</member>
+ <member len="physicalDeviceCount"><type>VkPhysicalDevice</type> <name>physicalDevices</name>[<enum>VK_MAX_DEVICE_GROUP_SIZE</enum>]</member>
<member><type>VkBool32</type> <name>subsetAllocation</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceGroupPropertiesKHR" alias="VkPhysicalDeviceGroupProperties"/>
@@ -2655,7 +3071,7 @@ typedef void <name>CAMetalLayer</name>;
<member len="splitInstanceBindRegionCount">const <type>VkRect2D</type>* <name>pSplitInstanceBindRegions</name></member>
</type>
<type category="struct" name="VkBindImageMemoryDeviceGroupInfoKHR" alias="VkBindImageMemoryDeviceGroupInfo"/>
- <type category="struct" name="VkDeviceGroupRenderPassBeginInfo" structextends="VkRenderPassBeginInfo">
+ <type category="struct" name="VkDeviceGroupRenderPassBeginInfo" structextends="VkRenderPassBeginInfo,VkRenderingInfo">
<member values="VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>deviceMask</name></member>
@@ -2750,7 +3166,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkDescriptorUpdateTemplateType</type> <name>templateType</name></member>
<member noautovalidity="true"><type>VkDescriptorSetLayout</type> <name>descriptorSetLayout</name></member>
<member noautovalidity="true"><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></member>
- <member noautovalidity="true"><type>VkPipelineLayout</type><name>pipelineLayout</name><comment>If used for push descriptors, this is the only allowed layout</comment></member>
+ <member noautovalidity="true"><type>VkPipelineLayout</type> <name>pipelineLayout</name><comment>If used for push descriptors, this is the only allowed layout</comment></member>
<member noautovalidity="true"><type>uint32_t</type> <name>set</name></member>
</type>
<type category="struct" name="VkDescriptorUpdateTemplateCreateInfoKHR" alias="VkDescriptorUpdateTemplateCreateInfo"/>
@@ -2895,7 +3311,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceSurfaceInfo2KHR">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkSurfaceKHR</type> <name>surface</name></member>
+ <member noautovalidity="true" optional="true"><type>VkSurfaceKHR</type> <name>surface</name></member>
</type>
<type category="struct" name="VkSurfaceCapabilities2KHR" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
@@ -2950,7 +3366,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceSubgroupProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto" noautovalidity="true"><type>uint32_t</type> <name>subgroupSize</name><comment>The size of a subgroup for this queue.</comment></member>
+ <member limittype="max,pot" noautovalidity="true"><type>uint32_t</type> <name>subgroupSize</name><comment>The size of a subgroup for this queue.</comment></member>
<member limittype="bitmask" noautovalidity="true"><type>VkShaderStageFlags</type> <name>supportedStages</name><comment>Bitfield of what shader stages support subgroup operations</comment></member>
<member limittype="bitmask" noautovalidity="true"><type>VkSubgroupFeatureFlags</type> <name>supportedOperations</name><comment>Bitfield of what subgroup operations are supported.</comment></member>
<member limittype="bitmask" noautovalidity="true"><type>VkBool32</type> <name>quadOperationsInAllStages</name><comment>Flag to specify whether quad operations are available in all stages.</comment></member>
@@ -2967,6 +3383,12 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBuffer</type> <name>buffer</name></member>
</type>
<type category="struct" name="VkBufferMemoryRequirementsInfo2KHR" alias="VkBufferMemoryRequirementsInfo2"/>
+ <type category="struct" name="VkDeviceBufferMemoryRequirements">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>VkBufferCreateInfo</type>* <name>pCreateInfo</name></member>
+ </type>
+ <type category="struct" name="VkDeviceBufferMemoryRequirementsKHR" alias="VkDeviceBufferMemoryRequirements"/>
<type category="struct" name="VkImageMemoryRequirementsInfo2">
<member values="VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
@@ -2979,6 +3401,13 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkImage</type> <name>image</name></member>
</type>
<type category="struct" name="VkImageSparseMemoryRequirementsInfo2KHR" alias="VkImageSparseMemoryRequirementsInfo2"/>
+ <type category="struct" name="VkDeviceImageMemoryRequirements">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>VkImageCreateInfo</type>* <name>pCreateInfo</name></member>
+ <member optional="true"><type>VkImageAspectFlagBits</type> <name>planeAspect</name></member>
+ </type>
+ <type category="struct" name="VkDeviceImageMemoryRequirementsKHR" alias="VkDeviceImageMemoryRequirements"/>
<type category="struct" name="VkMemoryRequirements2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -2994,7 +3423,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDevicePointClippingProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkPointClippingBehavior</type> <name>pointClippingBehavior</name></member>
+ <member limittype="exact"><type>VkPointClippingBehavior</type> <name>pointClippingBehavior</name></member>
</type>
<type category="struct" name="VkPhysicalDevicePointClippingPropertiesKHR" alias="VkPhysicalDevicePointClippingProperties"/>
<type category="struct" name="VkMemoryDedicatedRequirements" returnedonly="true" structextends="VkMemoryRequirements2">
@@ -3016,6 +3445,12 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkImageUsageFlags</type> <name>usage</name></member>
</type>
+ <type category="struct" name="VkImageViewSlicedCreateInfoEXT" structextends="VkImageViewCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_VIEW_SLICED_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>sliceOffset</name></member>
+ <member><type>uint32_t</type> <name>sliceCount</name></member>
+ </type>
<type category="struct" name="VkImageViewUsageCreateInfoKHR" alias="VkImageViewUsageCreateInfo"/>
<type category="struct" name="VkPipelineTessellationDomainOriginStateCreateInfo" structextends="VkPipelineTessellationStateCreateInfo">
<member values="VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
@@ -3091,7 +3526,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceProtectedMemoryProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>protectedNoFault</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>protectedNoFault</name></member>
</type>
<type category="struct" name="VkDeviceQueueInfo2">
<member values="VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
@@ -3118,7 +3553,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>float</type> <name>x</name></member>
<member><type>float</type> <name>y</name></member>
</type>
- <type category="struct" name="VkSampleLocationsInfoEXT" structextends="VkImageMemoryBarrier,VkImageMemoryBarrier2KHR">
+ <type category="struct" name="VkSampleLocationsInfoEXT" structextends="VkImageMemoryBarrier,VkImageMemoryBarrier2">
<member values="VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member noautovalidity="true"><type>VkSampleCountFlagBits</type> <name>sampleLocationsPerPixel</name></member>
@@ -3154,7 +3589,7 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="bitmask"><type>VkSampleCountFlags</type> <name>sampleLocationSampleCounts</name></member>
<member limittype="max"><type>VkExtent2D</type> <name>maxSampleLocationGridSize</name></member>
<member limittype="range"><type>float</type> <name>sampleLocationCoordinateRange</name>[2]</member>
- <member limittype="noauto"><type>uint32_t</type> <name>sampleLocationSubPixelBits</name></member>
+ <member limittype="bits"><type>uint32_t</type> <name>sampleLocationSubPixelBits</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>variableSampleLocations</name></member>
</type>
<type category="struct" name="VkMultisamplePropertiesEXT" returnedonly="true">
@@ -3195,14 +3630,15 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>dstPremultiplied</name></member>
<member><type>VkBlendOverlapEXT</type> <name>blendOverlap</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceInlineUniformBlockFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <type category="struct" name="VkPhysicalDeviceInlineUniformBlockFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>inlineUniformBlock</name></member>
<member><type>VkBool32</type> <name>descriptorBindingInlineUniformBlockUpdateAfterBind</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceInlineUniformBlockPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceInlineUniformBlockFeaturesEXT" alias="VkPhysicalDeviceInlineUniformBlockFeatures"/>
+ <type category="struct" name="VkPhysicalDeviceInlineUniformBlockProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxInlineUniformBlockSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorInlineUniformBlocks</name></member>
@@ -3210,17 +3646,20 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetInlineUniformBlocks</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindInlineUniformBlocks</name></member>
</type>
- <type category="struct" name="VkWriteDescriptorSetInlineUniformBlockEXT" structextends="VkWriteDescriptorSet">
- <member values="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>dataSize</name></member>
- <member len="dataSize">const <type>void</type>* <name>pData</name></member>
+ <type category="struct" name="VkPhysicalDeviceInlineUniformBlockPropertiesEXT" alias="VkPhysicalDeviceInlineUniformBlockProperties"/>
+ <type category="struct" name="VkWriteDescriptorSetInlineUniformBlock" structextends="VkWriteDescriptorSet">
+ <member values="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>dataSize</name></member>
+ <member len="dataSize">const <type>void</type>* <name>pData</name></member>
</type>
- <type category="struct" name="VkDescriptorPoolInlineUniformBlockCreateInfoEXT" structextends="VkDescriptorPoolCreateInfo">
- <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>maxInlineUniformBlockBindings</name></member>
+ <type category="struct" name="VkWriteDescriptorSetInlineUniformBlockEXT" alias="VkWriteDescriptorSetInlineUniformBlock"/>
+ <type category="struct" name="VkDescriptorPoolInlineUniformBlockCreateInfo" structextends="VkDescriptorPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>maxInlineUniformBlockBindings</name></member>
</type>
+ <type category="struct" name="VkDescriptorPoolInlineUniformBlockCreateInfoEXT" alias="VkDescriptorPoolInlineUniformBlockCreateInfo"/>
<type category="struct" name="VkPipelineCoverageModulationStateCreateInfoNV" structextends="VkPipelineMultisampleStateCreateInfo">
<member values="VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
@@ -3244,7 +3683,7 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>size_t</type> <name>initialDataSize</name></member>
<member len="initialDataSize">const <type>void</type>* <name>pInitialData</name></member>
</type>
- <type category="struct" name="VkShaderModuleValidationCacheCreateInfoEXT" structextends="VkShaderModuleCreateInfo">
+ <type category="struct" name="VkShaderModuleValidationCacheCreateInfoEXT" structextends="VkShaderModuleCreateInfo,VkPipelineShaderStageCreateInfo">
<member values="VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkValidationCacheEXT</type> <name>validationCache</name></member>
@@ -3256,6 +3695,54 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>VkDeviceSize</type> <name>maxMemoryAllocationSize</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceMaintenance3PropertiesKHR" alias="VkPhysicalDeviceMaintenance3Properties"/>
+ <type category="struct" name="VkPhysicalDeviceMaintenance4Features" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>maintenance4</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMaintenance4FeaturesKHR" alias="VkPhysicalDeviceMaintenance4Features"/>
+ <type category="struct" name="VkPhysicalDeviceMaintenance4Properties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>maxBufferSize</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMaintenance4PropertiesKHR" alias="VkPhysicalDeviceMaintenance4Properties"/>
+ <type category="struct" name="VkPhysicalDeviceMaintenance5FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>maintenance5</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMaintenance5PropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>earlyFragmentMultisampleCoverageAfterSampleCounting</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>earlyFragmentSampleMaskTestBeforeSampleCounting</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>depthStencilSwizzleOneSupport</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>polygonModePointSize</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>nonStrictSinglePixelWideLinesUseParallelogram</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>nonStrictWideLinesUseParallelogram</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMaintenance6FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>maintenance6</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMaintenance6PropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>blockTexelViewCompatibleMultipleLayers</name></member>
+ <member><type>uint32_t</type> <name>maxCombinedImageSamplerDescriptorCount</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>fragmentShadingRateClampCombinerInputs</name></member>
+ </type>
+ <type category="struct" name="VkRenderingAreaInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>viewMask</name></member>
+ <member optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member noautovalidity="true" len="colorAttachmentCount">const <type>VkFormat</type>* <name>pColorAttachmentFormats</name></member>
+ <member noautovalidity="true"><type>VkFormat</type> <name>depthAttachmentFormat</name></member>
+ <member noautovalidity="true"><type>VkFormat</type> <name>stencilAttachmentFormat</name></member>
+ </type>
<type category="struct" name="VkDescriptorSetLayoutSupport" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -3279,8 +3766,8 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceFloatControlsProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkShaderFloatControlsIndependence</type> <name>denormBehaviorIndependence</name></member>
- <member limittype="noauto"><type>VkShaderFloatControlsIndependence</type> <name>roundingModeIndependence</name></member>
+ <member limittype="exact"><type>VkShaderFloatControlsIndependence</type> <name>denormBehaviorIndependence</name></member>
+ <member limittype="exact"><type>VkShaderFloatControlsIndependence</type> <name>roundingModeIndependence</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat16</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
<member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat32</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
<member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat64</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
@@ -3343,23 +3830,26 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>numAvailableSgprs</name></member>
<member><type>uint32_t</type> <name>computeWorkGroupSize</name>[3]</member>
</type>
- <type category="struct" name="VkDeviceQueueGlobalPriorityCreateInfoEXT" structextends="VkDeviceQueueCreateInfo">
- <member values="VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkDeviceQueueGlobalPriorityCreateInfoKHR" structextends="VkDeviceQueueCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkQueueGlobalPriorityEXT</type> <name>globalPriority</name></member>
+ <member><type>VkQueueGlobalPriorityKHR</type> <name>globalPriority</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkDeviceQueueGlobalPriorityCreateInfoEXT" alias="VkDeviceQueueGlobalPriorityCreateInfoKHR"/>
+ <type category="struct" name="VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>globalPriorityQuery</name></member>
</type>
- <type category="struct" name="VkQueueFamilyGlobalPriorityPropertiesEXT" structextends="VkQueueFamilyProperties2">
- <member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>priorityCount</name></member>
- <member><type>VkQueueGlobalPriorityEXT</type> <name>priorities</name>[<enum>VK_MAX_GLOBAL_PRIORITY_SIZE_EXT</enum>]</member>
+ <type category="struct" name="VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT" alias="VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR"/>
+ <type category="struct" name="VkQueueFamilyGlobalPriorityPropertiesKHR" structextends="VkQueueFamilyProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>priorityCount</name></member>
+ <member limittype="bitmask" len="priorityCount"><type>VkQueueGlobalPriorityKHR</type> <name>priorities</name>[<enum>VK_MAX_GLOBAL_PRIORITY_SIZE_KHR</enum>]</member>
</type>
- <type category="struct" name="VkDebugUtilsObjectNameInfoEXT">
+ <type category="struct" name="VkQueueFamilyGlobalPriorityPropertiesEXT" alias="VkQueueFamilyGlobalPriorityPropertiesKHR"/>
+ <type category="struct" name="VkDebugUtilsObjectNameInfoEXT" structextends="VkPipelineShaderStageCreateInfo">
<member values="VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkObjectType</type> <name>objectType</name></member>
@@ -3396,7 +3886,7 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>VkDebugUtilsMessengerCallbackDataFlagsEXT</type> <name>flags</name></member>
<member optional="true" len="null-terminated">const <type>char</type>* <name>pMessageIdName</name></member>
<member><type>int32_t</type> <name>messageIdNumber</name></member>
- <member len="null-terminated">const <type>char</type>* <name>pMessage</name></member>
+ <member optional="true" len="null-terminated">const <type>char</type>* <name>pMessage</name></member>
<member optional="true"><type>uint32_t</type> <name>queueLabelCount</name></member>
<member len="queueLabelCount">const <type>VkDebugUtilsLabelEXT</type>* <name>pQueueLabels</name></member>
<member optional="true"><type>uint32_t</type> <name>cmdBufLabelCount</name></member>
@@ -3424,14 +3914,14 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint64_t</type> <name>memoryObjectId</name></member>
<member><type>VkDeviceSize</type> <name>size</name></member>
<member><type>VkObjectType</type> <name>objectType</name></member>
- <member><type>uint64_t</type> <name>objectHandle</name></member>
+ <member objecttype="objectType"><type>uint64_t</type> <name>objectHandle</name></member>
<member><type>uint32_t</type> <name>heapIndex</name></member>
</type>
<type category="struct" name="VkImportMemoryHostPointerInfoEXT" structextends="VkMemoryAllocateInfo">
<member values="VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></member>
- <member optional="false"><type>void</type>* <name>pHostPointer</name></member>
+ <member><type>void</type>* <name>pHostPointer</name></member>
</type>
<type category="struct" name="VkMemoryHostPointerPropertiesEXT" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
@@ -3441,43 +3931,44 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceExternalMemoryHostPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>minImportedHostPointerAlignment</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>minImportedHostPointerAlignment</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceConservativeRasterizationPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>float</type> <name>primitiveOverestimationSize</name><comment>The size in pixels the primitive is enlarged at each edge during conservative rasterization</comment></member>
- <member limittype="max"><type>float</type> <name>maxExtraPrimitiveOverestimationSize</name><comment>The maximum additional overestimation the client can specify in the pipeline state</comment></member>
- <member limittype="noauto"><type>float</type> <name>extraPrimitiveOverestimationSizeGranularity</name><comment>The granularity of extra overestimation sizes the implementations supports between 0 and maxExtraOverestimationSize</comment></member>
+ <member limittype="exact"><type>float</type> <name>primitiveOverestimationSize</name><comment>The size in pixels the primitive is enlarged at each edge during conservative rasterization</comment></member>
+ <member limittype="max"><type>float</type> <name>maxExtraPrimitiveOverestimationSize</name><comment>The maximum additional overestimation the client can specify in the pipeline state</comment></member>
+ <member limittype="min,mul"><type>float</type> <name>extraPrimitiveOverestimationSizeGranularity</name><comment>The granularity of extra overestimation sizes the implementations supports between 0 and maxExtraOverestimationSize</comment></member>
<member limittype="bitmask"><type>VkBool32</type> <name>primitiveUnderestimation</name><comment>true if the implementation supports conservative rasterization underestimation mode</comment></member>
- <member limittype="noauto"><type>VkBool32</type> <name>conservativePointAndLineRasterization</name><comment>true if conservative rasterization also applies to points and lines</comment></member>
- <member limittype="noauto"><type>VkBool32</type> <name>degenerateTrianglesRasterized</name><comment>true if degenerate triangles (those with zero area after snap) are rasterized</comment></member>
- <member limittype="noauto"><type>VkBool32</type> <name>degenerateLinesRasterized</name><comment>true if degenerate lines (those with zero length after snap) are rasterized</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>conservativePointAndLineRasterization</name><comment>true if conservative rasterization also applies to points and lines</comment></member>
+ <member limittype="exact"><type>VkBool32</type> <name>degenerateTrianglesRasterized</name><comment>true if degenerate triangles (those with zero area after snap) are rasterized</comment></member>
+ <member limittype="exact"><type>VkBool32</type> <name>degenerateLinesRasterized</name><comment>true if degenerate lines (those with zero length after snap) are rasterized</comment></member>
<member limittype="bitmask"><type>VkBool32</type> <name>fullyCoveredFragmentShaderInputVariable</name><comment>true if the implementation supports the FullyCoveredEXT SPIR-V builtin fragment shader input variable</comment></member>
<member limittype="bitmask"><type>VkBool32</type> <name>conservativeRasterizationPostDepthCoverage</name><comment>true if the implementation supports both conservative rasterization and post depth coverage sample coverage mask</comment></member>
</type>
- <type category="struct" name="VkCalibratedTimestampInfoEXT">
- <member values="VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkCalibratedTimestampInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkTimeDomainEXT</type> <name>timeDomain</name></member>
+ <member><type>VkTimeDomainKHR</type> <name>timeDomain</name></member>
</type>
+ <type category="struct" name="VkCalibratedTimestampInfoEXT" alias="VkCalibratedTimestampInfoKHR"/>
<type category="struct" name="VkPhysicalDeviceShaderCorePropertiesAMD" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="max"><type>uint32_t</type> <name>shaderEngineCount</name><comment>number of shader engines</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>shaderArraysPerEngineCount</name><comment>number of shader arrays</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>computeUnitsPerShaderArray</name><comment>number of physical CUs per shader array</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>simdPerComputeUnit</name><comment>number of SIMDs per compute unit</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>wavefrontsPerSimd</name><comment>number of wavefront slots in each SIMD</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>wavefrontSize</name><comment>maximum number of threads per wavefront</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>sgprsPerSimd</name><comment>number of physical SGPRs per SIMD</comment></member>
- <member limittype="min"><type>uint32_t</type> <name>minSgprAllocation</name><comment>minimum number of SGPRs that can be allocated by a wave</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>maxSgprAllocation</name><comment>number of available SGPRs</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>sgprAllocationGranularity</name><comment>SGPRs are allocated in groups of this size</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>vgprsPerSimd</name><comment>number of physical VGPRs per SIMD</comment></member>
- <member limittype="min"><type>uint32_t</type> <name>minVgprAllocation</name><comment>minimum number of VGPRs that can be allocated by a wave</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>maxVgprAllocation</name><comment>number of available VGPRs</comment></member>
- <member limittype="noauto"><type>uint32_t</type> <name>vgprAllocationGranularity</name><comment>VGPRs are allocated in groups of this size</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderEngineCount</name><comment>number of shader engines</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderArraysPerEngineCount</name><comment>number of shader arrays</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>computeUnitsPerShaderArray</name><comment>number of physical CUs per shader array</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>simdPerComputeUnit</name><comment>number of SIMDs per compute unit</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>wavefrontsPerSimd</name><comment>number of wavefront slots in each SIMD</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>wavefrontSize</name><comment>maximum number of threads per wavefront</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>sgprsPerSimd</name><comment>number of physical SGPRs per SIMD</comment></member>
+ <member limittype="min"><type>uint32_t</type> <name>minSgprAllocation</name><comment>minimum number of SGPRs that can be allocated by a wave</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxSgprAllocation</name><comment>number of available SGPRs</comment></member>
+ <member limittype="min,mul"><type>uint32_t</type> <name>sgprAllocationGranularity</name><comment>SGPRs are allocated in groups of this size</comment></member>
+ <member limittype="exact"><type>uint32_t</type> <name>vgprsPerSimd</name><comment>number of physical VGPRs per SIMD</comment></member>
+ <member limittype="min"><type>uint32_t</type> <name>minVgprAllocation</name><comment>minimum number of VGPRs that can be allocated by a wave</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxVgprAllocation</name><comment>number of available VGPRs</comment></member>
+ <member limittype="min,mul"><type>uint32_t</type> <name>vgprAllocationGranularity</name><comment>VGPRs are allocated in groups of this size</comment></member>
</type>
<type category="struct" name="VkPhysicalDeviceShaderCoreProperties2AMD" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD"><type>VkStructureType</type> <name>sType</name></member>
@@ -3685,21 +4176,29 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint64_t</type> <name>value</name></member>
</type>
<type category="struct" name="VkSemaphoreSignalInfoKHR" alias="VkSemaphoreSignalInfo"/>
- <type category="struct" name="VkVertexInputBindingDivisorDescriptionEXT">
+ <type category="struct" name="VkVertexInputBindingDivisorDescriptionKHR">
<member><type>uint32_t</type> <name>binding</name></member>
<member><type>uint32_t</type> <name>divisor</name></member>
</type>
- <type category="struct" name="VkPipelineVertexInputDivisorStateCreateInfoEXT" structextends="VkPipelineVertexInputStateCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkVertexInputBindingDivisorDescriptionEXT" alias="VkVertexInputBindingDivisorDescriptionKHR"/>
+ <type category="struct" name="VkPipelineVertexInputDivisorStateCreateInfoKHR" structextends="VkPipelineVertexInputStateCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>vertexBindingDivisorCount</name></member>
- <member len="vertexBindingDivisorCount">const <type>VkVertexInputBindingDivisorDescriptionEXT</type>* <name>pVertexBindingDivisors</name></member>
+ <member len="vertexBindingDivisorCount">const <type>VkVertexInputBindingDivisorDescriptionKHR</type>* <name>pVertexBindingDivisors</name></member>
</type>
+ <type category="struct" name="VkPipelineVertexInputDivisorStateCreateInfoEXT" alias="VkPipelineVertexInputDivisorStateCreateInfoKHR"/>
<type category="struct" name="VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxVertexAttribDivisor</name><comment>max value of vertex attribute divisor</comment></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxVertexAttribDivisor</name><comment>max value of vertex attribute divisor</comment></member>
+ <member limittype="max"><type>VkBool32</type> <name>supportsNonZeroFirstInstance</name></member>
+ </type>
<type category="struct" name="VkPhysicalDevicePCIBusInfoPropertiesEXT" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -3746,7 +4245,7 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>conditionalRenderingEnable</name><comment>Whether this secondary command buffer may be executed during an active conditional rendering</comment></member>
</type>
- <type category="struct" name="VkExternalFormatANDROID" structextends="VkImageCreateInfo,VkSamplerYcbcrConversionCreateInfo">
+ <type category="struct" name="VkExternalFormatANDROID" structextends="VkImageCreateInfo,VkSamplerYcbcrConversionCreateInfo,VkAttachmentDescription2,VkGraphicsPipelineCreateInfo,VkCommandBufferInheritanceInfo">
<member values="VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>uint64_t</type> <name>externalFormat</name></member>
@@ -3812,16 +4311,17 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>shaderImageFloat32AtomicMinMax</name></member>
<member><type>VkBool32</type> <name>sparseImageFloat32AtomicMinMax</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>vertexAttributeInstanceRateDivisor</name></member>
<member><type>VkBool32</type> <name>vertexAttributeInstanceRateZeroDivisor</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT" alias="VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR"/>
<type category="struct" name="VkQueueFamilyCheckpointPropertiesNV" structextends="VkQueueFamilyProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkPipelineStageFlags</type> <name>checkpointExecutionStageMask</name></member>
+ <member limittype="bitmask"><type>VkPipelineStageFlags</type> <name>checkpointExecutionStageMask</name></member>
</type>
<type category="struct" name="VkCheckpointDataNV" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV"><type>VkStructureType</type> <name>sType</name></member>
@@ -3870,7 +4370,7 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>VkDeviceSize</type> <name>maxTransformFeedbackBufferSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxTransformFeedbackStreamDataSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxTransformFeedbackBufferDataSize</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>maxTransformFeedbackBufferDataStride</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTransformFeedbackBufferDataStride</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>transformFeedbackQueries</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>transformFeedbackStreamsLinesTriangles</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>transformFeedbackRasterizationStreamSelect</name></member>
@@ -3883,7 +4383,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>rasterizationStream</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>representativeFragmentTest</name></member>
</type>
@@ -3914,11 +4414,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>computeDerivativeGroupQuads</name></member>
<member><type>VkBool32</type> <name>computeDerivativeGroupLinear</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>fragmentShaderBarycentric</name></member>
- </type>
+ <type category="struct" name="VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV" alias="VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR"/>
<type category="struct" name="VkPhysicalDeviceShaderImageFootprintFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -3929,6 +4425,27 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>dedicatedAllocationImageAliasing</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceCopyMemoryIndirectFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>indirectCopy</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCopyMemoryIndirectPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask" noautovalidity="true"><type>VkQueueFlags</type> <name>supportedQueues</name><comment>Bitfield of which queues are supported for indirect copy</comment></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMemoryDecompressionFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>memoryDecompression</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMemoryDecompressionPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkMemoryDecompressionMethodFlagsNV</type> <name>decompressionMethods</name></member>
+ <member limittype="max"><type>uint64_t</type> <name>maxDecompressionIndirectCount</name></member>
+ </type>
<type category="struct" name="VkShadingRatePaletteNV">
<member><type>uint32_t</type> <name>shadingRatePaletteEntryCount</name></member>
<member len="shadingRatePaletteEntryCount">const <type>VkShadingRatePaletteEntryNV</type>* <name>pShadingRatePaletteEntries</name></member>
@@ -3949,7 +4466,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceShadingRateImagePropertiesNV" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkExtent2D</type> <name>shadingRateTexelSize</name></member>
+ <member limittype="exact"><type>VkExtent2D</type> <name>shadingRateTexelSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>shadingRatePaletteSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>shadingRateMaxCoarseSamples</name></member>
</type>
@@ -3996,13 +4513,59 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>uint32_t</type> <name>maxMeshOutputVertices</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxMeshOutputPrimitives</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxMeshMultiviewViewCount</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>meshOutputPerVertexGranularity</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>meshOutputPerPrimitiveGranularity</name></member>
+ <member limittype="min,mul"><type>uint32_t</type> <name>meshOutputPerVertexGranularity</name></member>
+ <member limittype="min,mul"><type>uint32_t</type> <name>meshOutputPerPrimitiveGranularity</name></member>
</type>
<type category="struct" name="VkDrawMeshTasksIndirectCommandNV">
<member><type>uint32_t</type> <name>taskCount</name></member>
<member><type>uint32_t</type> <name>firstTask</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceMeshShaderFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>taskShader</name></member>
+ <member><type>VkBool32</type> <name>meshShader</name></member>
+ <member><type>VkBool32</type> <name>multiviewMeshShader</name></member>
+ <member><type>VkBool32</type> <name>primitiveFragmentShadingRateMeshShader</name></member>
+ <member><type>VkBool32</type> <name>meshShaderQueries</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMeshShaderPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskWorkGroupTotalCount</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskWorkGroupCount</name>[3]</member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskWorkGroupInvocations</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskWorkGroupSize</name>[3]</member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskPayloadSize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskSharedMemorySize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxTaskPayloadAndSharedMemorySize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshWorkGroupTotalCount</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshWorkGroupCount</name>[3]</member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshWorkGroupInvocations</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshWorkGroupSize</name>[3]</member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshSharedMemorySize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshPayloadAndSharedMemorySize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshOutputMemorySize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshPayloadAndOutputMemorySize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshOutputComponents</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshOutputVertices</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshOutputPrimitives</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshOutputLayers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxMeshMultiviewViewCount</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>meshOutputPerVertexGranularity</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>meshOutputPerPrimitiveGranularity</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPreferredTaskWorkGroupInvocations</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPreferredMeshWorkGroupInvocations</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>prefersLocalInvocationVertexOutput</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>prefersLocalInvocationPrimitiveOutput</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>prefersCompactVertexOutput</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>prefersCompactPrimitiveOutput</name></member>
+ </type>
+ <type category="struct" name="VkDrawMeshTasksIndirectCommandEXT">
+ <member noautovalidity="true"><type>uint32_t</type> <name>groupCountX</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>groupCountY</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>groupCountZ</name></member>
+ </type>
<type category="struct" name="VkRayTracingShaderGroupCreateInfoNV">
<member values="VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
@@ -4025,7 +4588,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkRayTracingPipelineCreateInfoNV">
<member values="VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
+ <member noautovalidity="true" optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
<member><type>uint32_t</type> <name>stageCount</name></member>
<member len="stageCount">const <type>VkPipelineShaderStageCreateInfo</type>* <name>pStages</name><comment>One entry for each active shader stage</comment></member>
<member><type>uint32_t</type> <name>groupCount</name></member>
@@ -4038,7 +4601,7 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkRayTracingPipelineCreateInfoKHR">
<member values="VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
+ <member noautovalidity="true" optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name><comment>Pipeline creation flags</comment></member>
<member optional="true"><type>uint32_t</type> <name>stageCount</name></member>
<member len="stageCount">const <type>VkPipelineShaderStageCreateInfo</type>* <name>pStages</name><comment>One entry for each active shader stage</comment></member>
<member optional="true"><type>uint32_t</type> <name>groupCount</name></member>
@@ -4089,7 +4652,7 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkAccelerationStructureTypeNV</type> <name>type</name></member>
- <member optional="true"><type>VkBuildAccelerationStructureFlagsNV</type><name>flags</name></member>
+ <member optional="true"><type>VkBuildAccelerationStructureFlagsNV</type> <name>flags</name></member>
<member optional="true"><type>uint32_t</type> <name>instanceCount</name></member>
<member optional="true"><type>uint32_t</type> <name>geometryCount</name></member>
<member len="geometryCount">const <type>VkGeometryNV</type>* <name>pGeometries</name></member>
@@ -4165,22 +4728,22 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceRayTracingPipelinePropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>shaderGroupHandleSize</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderGroupHandleSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxRayRecursionDepth</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxShaderGroupStride</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>shaderGroupBaseAlignment</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>shaderGroupHandleCaptureReplaySize</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderGroupBaseAlignment</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderGroupHandleCaptureReplaySize</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxRayDispatchInvocationCount</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>shaderGroupHandleAlignment</name></member>
+ <member limittype="min,pot"><type>uint32_t</type> <name>shaderGroupHandleAlignment</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxRayHitAttributeSize</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceRayTracingPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>shaderGroupHandleSize</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderGroupHandleSize</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxRecursionDepth</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxShaderGroupStride</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>shaderGroupBaseAlignment</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>shaderGroupBaseAlignment</name></member>
<member limittype="max"><type>uint64_t</type> <name>maxGeometryCount</name></member>
<member limittype="max"><type>uint64_t</type> <name>maxInstanceCount</name></member>
<member limittype="max"><type>uint64_t</type> <name>maxTriangleCount</name></member>
@@ -4196,11 +4759,33 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>height</name></member>
<member><type>uint32_t</type> <name>depth</name></member>
</type>
+ <type category="struct" name="VkTraceRaysIndirectCommand2KHR">
+ <member><type>VkDeviceAddress</type> <name>raygenShaderRecordAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>raygenShaderRecordSize</name></member>
+ <member><type>VkDeviceAddress</type> <name>missShaderBindingTableAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>missShaderBindingTableSize</name></member>
+ <member><type>VkDeviceSize</type> <name>missShaderBindingTableStride</name></member>
+ <member><type>VkDeviceAddress</type> <name>hitShaderBindingTableAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>hitShaderBindingTableSize</name></member>
+ <member><type>VkDeviceSize</type> <name>hitShaderBindingTableStride</name></member>
+ <member><type>VkDeviceAddress</type> <name>callableShaderBindingTableAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>callableShaderBindingTableSize</name></member>
+ <member><type>VkDeviceSize</type> <name>callableShaderBindingTableStride</name></member>
+ <member><type>uint32_t</type> <name>width</name></member>
+ <member><type>uint32_t</type> <name>height</name></member>
+ <member><type>uint32_t</type> <name>depth</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>rayTracingMaintenance1</name></member>
+ <member><type>VkBool32</type> <name>rayTracingPipelineTraceRaysIndirect2</name></member>
+ </type>
<type category="struct" name="VkDrmFormatModifierPropertiesListEXT" returnedonly="true" structextends="VkFormatProperties2">
<member values="VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member optional="true"><type>uint32_t</type> <name>drmFormatModifierCount</name></member>
- <member optional="true,false" len="drmFormatModifierCount"><type>VkDrmFormatModifierPropertiesEXT</type>* <name>pDrmFormatModifierProperties</name></member>
+ <member optional="true" len="drmFormatModifierCount"><type>VkDrmFormatModifierPropertiesEXT</type>* <name>pDrmFormatModifierProperties</name></member>
</type>
<type category="struct" name="VkDrmFormatModifierPropertiesEXT" returnedonly="true">
<member><type>uint64_t</type> <name>drmFormatModifier</name></member>
@@ -4225,7 +4810,7 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>uint64_t</type> <name>drmFormatModifier</name></member>
- <member optional="false"><type>uint32_t</type> <name>drmFormatModifierPlaneCount</name></member>
+ <member><type>uint32_t</type> <name>drmFormatModifierPlaneCount</name></member>
<member len="drmFormatModifierPlaneCount">const <type>VkSubresourceLayout</type>* <name>pPlaneLayouts</name></member>
</type>
<type category="struct" name="VkImageDrmFormatModifierPropertiesEXT" returnedonly="true">
@@ -4256,26 +4841,42 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>fragmentDensityMapDeferred</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>fragmentDensityMapOffset</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceFragmentDensityMapPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="min"><type>VkExtent2D</type> <name>minFragmentDensityTexelSize</name></member>
<member limittype="max"><type>VkExtent2D</type> <name>maxFragmentDensityTexelSize</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>fragmentDensityInvocations</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>fragmentDensityInvocations</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceFragmentDensityMap2PropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>subsampledLoads</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>subsampledCoarseReconstructionEarlyAccess</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>subsampledLoads</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>subsampledCoarseReconstructionEarlyAccess</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxSubsampledArrayLayers</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetSubsampledSamplers</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="min,mul"><type>VkExtent2D</type> <name>fragmentDensityOffsetGranularity</name></member>
+ </type>
<type category="struct" name="VkRenderPassFragmentDensityMapCreateInfoEXT" structextends="VkRenderPassCreateInfo,VkRenderPassCreateInfo2">
<member values="VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkAttachmentReference</type> <name>fragmentDensityMapAttachment</name></member>
</type>
+ <type category="struct" name="VkSubpassFragmentDensityMapOffsetEndInfoQCOM" structextends="VkSubpassEndInfo">
+ <member values="VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>fragmentDensityOffsetCount</name></member>
+ <member len="fragmentDensityOffsetCount">const <type>VkOffset2D</type>* <name>pFragmentDensityOffsets</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceScalarBlockLayoutFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -4320,6 +4921,11 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>float</type> <name>priority</name></member>
</type>
+ <type category="struct" name="VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>pageableDeviceLocalMemory</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceBufferDeviceAddressFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -4397,11 +5003,12 @@ typedef void <name>CAMetalLayer</name>;
<member len="attachmentCount">const <type>VkImageView</type>* <name>pAttachments</name></member>
</type>
<type category="struct" name="VkRenderPassAttachmentBeginInfoKHR" alias="VkRenderPassAttachmentBeginInfo"/>
- <type category="struct" name="VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <type category="struct" name="VkPhysicalDeviceTextureCompressionASTCHDRFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>textureCompressionASTC_HDR</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT" alias="VkPhysicalDeviceTextureCompressionASTCHDRFeatures"/>
<type category="struct" name="VkPhysicalDeviceCooperativeMatrixFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -4413,7 +5020,7 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="bitmask"><type>VkShaderStageFlags</type> <name>cooperativeMatrixSupportedStages</name></member>
</type>
- <type category="struct" name="VkCooperativeMatrixPropertiesNV">
+ <type category="struct" name="VkCooperativeMatrixPropertiesNV" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>MSize</name></member>
@@ -4448,17 +5055,19 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>GgpFrameToken</type> <name>frameToken</name></member>
</type>
- <type category="struct" name="VkPipelineCreationFeedbackEXT" returnedonly="true">
- <member><type>VkPipelineCreationFeedbackFlagsEXT</type> <name>flags</name></member>
+ <type category="struct" name="VkPipelineCreationFeedback" returnedonly="true">
+ <member><type>VkPipelineCreationFeedbackFlags</type> <name>flags</name></member>
<member><type>uint64_t</type> <name>duration</name></member>
</type>
- <type category="struct" name="VkPipelineCreationFeedbackCreateInfoEXT" structextends="VkGraphicsPipelineCreateInfo,VkComputePipelineCreateInfo,VkRayTracingPipelineCreateInfoNV,VkRayTracingPipelineCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkPipelineCreationFeedbackEXT</type>* <name>pPipelineCreationFeedback</name><comment>Output pipeline creation feedback.</comment></member>
- <member><type>uint32_t</type> <name>pipelineStageCreationFeedbackCount</name></member>
- <member len="pipelineStageCreationFeedbackCount"><type>VkPipelineCreationFeedbackEXT</type>* <name>pPipelineStageCreationFeedbacks</name><comment>One entry for each shader stage specified in the parent Vk*PipelineCreateInfo struct</comment></member>
+ <type category="struct" name="VkPipelineCreationFeedbackEXT" alias="VkPipelineCreationFeedback"/>
+ <type category="struct" name="VkPipelineCreationFeedbackCreateInfo" structextends="VkGraphicsPipelineCreateInfo,VkComputePipelineCreateInfo,VkRayTracingPipelineCreateInfoNV,VkRayTracingPipelineCreateInfoKHR,VkExecutionGraphPipelineCreateInfoAMDX">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkPipelineCreationFeedback</type>* <name>pPipelineCreationFeedback</name><comment>Output pipeline creation feedback.</comment></member>
+ <member optional="true"><type>uint32_t</type> <name>pipelineStageCreationFeedbackCount</name></member>
+ <member len="pipelineStageCreationFeedbackCount"><type>VkPipelineCreationFeedback</type>* <name>pPipelineStageCreationFeedbacks</name><comment>One entry for each shader stage specified in the parent Vk*PipelineCreateInfo struct</comment></member>
</type>
+ <type category="struct" name="VkPipelineCreationFeedbackCreateInfoEXT" alias="VkPipelineCreationFeedbackCreateInfo"/>
<type category="struct" name="VkSurfaceFullScreenExclusiveInfoEXT" structextends="VkPhysicalDeviceSurfaceInfo2KHR,VkSwapchainCreateInfoKHR">
<member values="VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -4474,6 +5083,21 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>fullScreenExclusiveSupported</name></member>
</type>
+ <type category="struct" name="VkPhysicalDevicePresentBarrierFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_BARRIER_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>presentBarrier</name></member>
+ </type>
+ <type category="struct" name="VkSurfaceCapabilitiesPresentBarrierNV" structextends="VkSurfaceCapabilities2KHR">
+ <member values="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_PRESENT_BARRIER_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>presentBarrierSupported</name></member>
+ </type>
+ <type category="struct" name="VkSwapchainPresentBarrierCreateInfoNV" structextends="VkSwapchainCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_BARRIER_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>presentBarrierEnable</name></member>
+ </type>
<type category="struct" name="VkPhysicalDevicePerformanceQueryFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -4497,9 +5121,9 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkPerformanceCounterDescriptionFlagsKHR</type> <name>flags</name></member>
- <member><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>char</type> <name>category</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>category</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
</type>
<type category="struct" name="VkQueryPoolPerformanceCreateInfoKHR" structextends="VkQueryPoolCreateInfo">
<member values="VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
@@ -4522,18 +5146,23 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>VkAcquireProfilingLockFlagsKHR</type> <name>flags</name><comment>Acquire profiling lock flags</comment></member>
<member><type>uint64_t</type> <name>timeout</name></member>
</type>
- <type category="struct" name="VkPerformanceQuerySubmitInfoKHR" structextends="VkSubmitInfo,VkSubmitInfo2KHR">
+ <type category="struct" name="VkPerformanceQuerySubmitInfoKHR" structextends="VkSubmitInfo,VkSubmitInfo2">
<member values="VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>counterPassIndex</name><comment>Index for which counter pass to submit</comment></member>
</type>
+ <type category="struct" name="VkPerformanceQueryReservationInfoKHR" allowduplicate="true" structextends="VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_RESERVATION_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>maxPerformanceQueriesPerPool</name><comment>Maximum number of VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR queries in a query pool</comment></member>
+ </type>
<type category="struct" name="VkHeadlessSurfaceCreateInfoEXT">
<member values="VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkHeadlessSurfaceCreateFlagsEXT</type> <name>flags</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceCoverageReductionModeFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>coverageReductionMode</name></member>
</type>
@@ -4563,7 +5192,7 @@ typedef void <name>CAMetalLayer</name>;
<member selection="VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL"><type>VkBool32</type> <name>valueBool</name></member>
<member selection="VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL" len="null-terminated">const <type>char</type>* <name>valueString</name></member>
</type>
- <type category="struct" name="VkPerformanceValueINTEL">
+ <type category="struct" name="VkPerformanceValueINTEL" returnedonly="true">
<member><type>VkPerformanceValueTypeINTEL</type> <name>type</name></member>
<member selector="type" noautovalidity="true"><type>VkPerformanceValueDataINTEL</type> <name>data</name></member>
</type>
@@ -4606,11 +5235,12 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>shaderSubgroupClock</name></member>
<member><type>VkBool32</type> <name>shaderDeviceClock</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceIndexTypeUint8FeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceIndexTypeUint8FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>indexTypeUint8</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceIndexTypeUint8FeaturesEXT" alias="VkPhysicalDeviceIndexTypeUint8FeaturesKHR"/>
<type category="struct" name="VkPhysicalDeviceShaderSMBuiltinsPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -4618,7 +5248,7 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="max"><type>uint32_t</type> <name>shaderWarpsPerSM</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceShaderSMBuiltinsFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>shaderSMBuiltins</name></member>
</type>
@@ -4630,13 +5260,13 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>fragmentShaderShadingRateInterlock</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>separateDepthStencilLayouts</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR" alias="VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures"/>
<type category="struct" name="VkAttachmentReferenceStencilLayout" structextends="VkAttachmentReference2">
- <member values="VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkImageLayout</type> <name>stencilLayout</name></member>
</type>
@@ -4648,7 +5278,7 @@ typedef void <name>CAMetalLayer</name>;
</type>
<type category="struct" name="VkAttachmentReferenceStencilLayoutKHR" alias="VkAttachmentReferenceStencilLayout"/>
<type category="struct" name="VkAttachmentDescriptionStencilLayout" structextends="VkAttachmentDescription2">
- <member values="VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkImageLayout</type> <name>stencilInitialLayout</name></member>
<member><type>VkImageLayout</type> <name>stencilFinalLayout</name></member>
@@ -4664,13 +5294,14 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkPipeline</type> <name>pipeline</name></member>
</type>
+ <type category="struct" name="VkPipelineInfoEXT" alias="VkPipelineInfoKHR"/>
<type category="struct" name="VkPipelineExecutablePropertiesKHR" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkShaderStageFlags</type> <name>stages</name></member>
- <member><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>uint32_t</type> <name>subgroupSize</name></member>
+ <member><type>VkShaderStageFlags</type> <name>stages</name></member>
+ <member len="null-terminated"><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member><type>uint32_t</type> <name>subgroupSize</name></member>
</type>
<type category="struct" name="VkPipelineExecutableInfoKHR">
<member values="VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
@@ -4686,59 +5317,65 @@ typedef void <name>CAMetalLayer</name>;
</type>
<type category="struct" name="VkPipelineExecutableStatisticKHR" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member len="null-terminated"><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
<member><type>VkPipelineExecutableStatisticFormatKHR</type> <name>format</name></member>
<member selector="format" noautovalidity="true"><type>VkPipelineExecutableStatisticValueKHR</type> <name>value</name></member>
</type>
<type category="struct" name="VkPipelineExecutableInternalRepresentationKHR" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>VkBool32</type> <name>isText</name></member>
- <member><type>size_t</type> <name>dataSize</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member len="null-terminated"><type>char</type> <name>name</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member><type>VkBool32</type> <name>isText</name></member>
+ <member><type>size_t</type> <name>dataSize</name></member>
<member optional="true" len="dataSize"><type>void</type>* <name>pData</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>shaderDemoteToHelperInvocation</name></member>
+ <type category="struct" name="VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderDemoteToHelperInvocation</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT" alias="VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures"/>
<type category="struct" name="VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>texelBufferAlignment</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceTexelBufferAlignmentProperties" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>storageTexelBufferOffsetAlignmentBytes</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>storageTexelBufferOffsetSingleTexelAlignment</name></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>uniformTexelBufferOffsetAlignmentBytes</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>uniformTexelBufferOffsetSingleTexelAlignment</name></member>
- </type>
- <type category="struct" name="VkPhysicalDeviceSubgroupSizeControlFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>storageTexelBufferOffsetAlignmentBytes</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>storageTexelBufferOffsetSingleTexelAlignment</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>uniformTexelBufferOffsetAlignmentBytes</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>uniformTexelBufferOffsetSingleTexelAlignment</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT" alias="VkPhysicalDeviceTexelBufferAlignmentProperties"/>
+ <type category="struct" name="VkPhysicalDeviceSubgroupSizeControlFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>subgroupSizeControl</name></member>
+ <member><type>VkBool32</type> <name>computeFullSubgroups</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceSubgroupSizeControlFeaturesEXT" alias="VkPhysicalDeviceSubgroupSizeControlFeatures"/>
+ <type category="struct" name="VkPhysicalDeviceSubgroupSizeControlProperties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>subgroupSizeControl</name></member>
- <member><type>VkBool32</type> <name>computeFullSubgroups</name></member>
- </type>
- <type category="struct" name="VkPhysicalDeviceSubgroupSizeControlPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="min" noautovalidity="true"><type>uint32_t</type> <name>minSubgroupSize</name><comment>The minimum subgroup size supported by this device</comment></member>
- <member limittype="max" noautovalidity="true"><type>uint32_t</type> <name>maxSubgroupSize</name><comment>The maximum subgroup size supported by this device</comment></member>
+ <member limittype="min,pot" noautovalidity="true"><type>uint32_t</type> <name>minSubgroupSize</name><comment>The minimum subgroup size supported by this device</comment></member>
+ <member limittype="max,pot" noautovalidity="true"><type>uint32_t</type> <name>maxSubgroupSize</name><comment>The maximum subgroup size supported by this device</comment></member>
<member limittype="max" noautovalidity="true"><type>uint32_t</type> <name>maxComputeWorkgroupSubgroups</name><comment>The maximum number of subgroups supported in a workgroup</comment></member>
- <member limittype="bitmask"><type>VkShaderStageFlags</type> <name>requiredSubgroupSizeStages</name><comment>The shader stages that support specifying a subgroup size</comment></member>
+ <member limittype="bitmask"><type>VkShaderStageFlags</type> <name>requiredSubgroupSizeStages</name><comment>The shader stages that support specifying a subgroup size</comment></member>
</type>
- <type category="struct" name="VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT" returnedonly="true" structextends="VkPipelineShaderStageCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <type category="struct" name="VkPhysicalDeviceSubgroupSizeControlPropertiesEXT" alias="VkPhysicalDeviceSubgroupSizeControlProperties"/>
+ <type category="struct" name="VkPipelineShaderStageRequiredSubgroupSizeCreateInfo" returnedonly="true" structextends="VkPipelineShaderStageCreateInfo,VkShaderCreateInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>requiredSubgroupSize</name></member>
</type>
- <type category="struct" name="VkSubpassShadingPipelineCreateInfoHUAWEI" returnedonly="true" structextends="VkComputePipelineCreateInfo">
+ <type category="struct" name="VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT" alias="VkPipelineShaderStageRequiredSubgroupSizeCreateInfo"/>
+ <type category="struct" name="VkShaderRequiredSubgroupSizeCreateInfoEXT" alias="VkPipelineShaderStageRequiredSubgroupSizeCreateInfo"/>
+ <type category="struct" name="VkSubpassShadingPipelineCreateInfoHUAWEI" structextends="VkComputePipelineCreateInfo">
<member values="VK_STRUCTURE_TYPE_SUBPASS_SHADING_PIPELINE_CREATE_INFO_HUAWEI"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkRenderPass</type> <name>renderPass</name></member>
@@ -4747,7 +5384,15 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceSubpassShadingPropertiesHUAWEI" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_PROPERTIES_HUAWEI"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>maxSubpassShadingWorkgroupSizeAspectRatio</name></member>
+ <member limittype="max,pot"><type>uint32_t</type> <name>maxSubpassShadingWorkgroupSizeAspectRatio</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceClusterCullingShaderPropertiesHUAWEI" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max,pot"><type>uint32_t</type> <name>maxWorkGroupCount</name>[3]</member>
+ <member limittype="max,pot"><type>uint32_t</type> <name>maxWorkGroupSize</name>[3]</member>
+ <member limittype="max"><type>uint32_t</type> <name>maxOutputClusterCount</name></member>
+ <member limittype="exact"><type>VkDeviceSize</type> <name>indirectBufferOffsetAlignment</name></member>
</type>
<type category="struct" name="VkMemoryOpaqueCaptureAddressAllocateInfo" structextends="VkMemoryAllocateInfo">
<member values="VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
@@ -4761,8 +5406,8 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkDeviceMemory</type> <name>memory</name></member>
</type>
<type category="struct" name="VkDeviceMemoryOpaqueCaptureAddressInfoKHR" alias="VkDeviceMemoryOpaqueCaptureAddressInfo"/>
- <type category="struct" name="VkPhysicalDeviceLineRasterizationFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceLineRasterizationFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>rectangularLines</name></member>
<member><type>VkBool32</type> <name>bresenhamLines</name></member>
@@ -4771,26 +5416,30 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>stippledBresenhamLines</name></member>
<member><type>VkBool32</type> <name>stippledSmoothLines</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceLineRasterizationPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceLineRasterizationFeaturesEXT" alias="VkPhysicalDeviceLineRasterizationFeaturesKHR"/>
+ <type category="struct" name="VkPhysicalDeviceLineRasterizationPropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>lineSubPixelPrecisionBits</name></member>
+ <member limittype="bits"><type>uint32_t</type> <name>lineSubPixelPrecisionBits</name></member>
</type>
- <type category="struct" name="VkPipelineRasterizationLineStateCreateInfoEXT" structextends="VkPipelineRasterizationStateCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkLineRasterizationModeEXT</type> <name>lineRasterizationMode</name></member>
+ <type category="struct" name="VkPhysicalDeviceLineRasterizationPropertiesEXT" alias="VkPhysicalDeviceLineRasterizationPropertiesKHR"/>
+ <type category="struct" name="VkPipelineRasterizationLineStateCreateInfoKHR" structextends="VkPipelineRasterizationStateCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkLineRasterizationModeKHR</type> <name>lineRasterizationMode</name></member>
<member><type>VkBool32</type> <name>stippledLineEnable</name></member>
<member><type>uint32_t</type> <name>lineStippleFactor</name></member>
<member><type>uint16_t</type> <name>lineStipplePattern</name></member>
</type>
- <type category="struct" name="VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPipelineRasterizationLineStateCreateInfoEXT" alias="VkPipelineRasterizationLineStateCreateInfoKHR"/>
+ <type category="struct" name="VkPhysicalDevicePipelineCreationCacheControlFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>pipelineCreationCacheControl</name></member>
+ <member><type>VkBool32</type> <name>pipelineCreationCacheControl</name></member>
</type>
+ <type category="struct" name="VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT" alias="VkPhysicalDevicePipelineCreationCacheControlFeatures"/>
<type category="struct" name="VkPhysicalDeviceVulkan11Features" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>storageBuffer16BitAccess</name><comment>16-bit integer/floating-point variables supported in BufferBlock</comment></member>
<member><type>VkBool32</type> <name>uniformAndStorageBuffer16BitAccess</name><comment>16-bit integer/floating-point variables supported in BufferBlock and Block</comment></member>
@@ -4806,26 +5455,26 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>shaderDrawParameters</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceVulkan11Properties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint8_t</type> <name>deviceUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
- <member limittype="noauto"><type>uint8_t</type> <name>driverUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
- <member limittype="noauto"><type>uint8_t</type> <name>deviceLUID</name>[<enum>VK_LUID_SIZE</enum>]</member>
- <member limittype="noauto"><type>uint32_t</type> <name>deviceNodeMask</name></member>
- <member limittype="noauto"><type>VkBool32</type> <name>deviceLUIDValid</name></member>
- <member limittype="noauto" noautovalidity="true"><type>uint32_t</type> <name>subgroupSize</name><comment>The size of a subgroup for this queue.</comment></member>
+ <member limittype="exact"><type>uint8_t</type> <name>deviceUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member limittype="exact"><type>uint8_t</type> <name>driverUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member limittype="exact"><type>uint8_t</type> <name>deviceLUID</name>[<enum>VK_LUID_SIZE</enum>]</member>
+ <member limittype="exact"><type>uint32_t</type> <name>deviceNodeMask</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>deviceLUIDValid</name></member>
+ <member limittype="max,pot" noautovalidity="true"><type>uint32_t</type> <name>subgroupSize</name><comment>The size of a subgroup for this queue.</comment></member>
<member limittype="bitmask" noautovalidity="true"><type>VkShaderStageFlags</type> <name>subgroupSupportedStages</name><comment>Bitfield of what shader stages support subgroup operations</comment></member>
<member limittype="bitmask" noautovalidity="true"><type>VkSubgroupFeatureFlags</type> <name>subgroupSupportedOperations</name><comment>Bitfield of what subgroup operations are supported.</comment></member>
<member limittype="bitmask" noautovalidity="true"><type>VkBool32</type> <name>subgroupQuadOperationsInAllStages</name><comment>Flag to specify whether quad operations are available in all stages.</comment></member>
- <member limittype="noauto"><type>VkPointClippingBehavior</type> <name>pointClippingBehavior</name></member>
+ <member limittype="exact"><type>VkPointClippingBehavior</type> <name>pointClippingBehavior</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxMultiviewViewCount</name><comment>max number of views in a subpass</comment></member>
<member limittype="max"><type>uint32_t</type> <name>maxMultiviewInstanceIndex</name><comment>max instance index for a draw in a multiview subpass</comment></member>
- <member limittype="noauto"><type>VkBool32</type> <name>protectedNoFault</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>protectedNoFault</name></member>
<member limittype="max"><type>uint32_t</type> <name>maxPerSetDescriptors</name></member>
<member limittype="max"><type>VkDeviceSize</type> <name>maxMemoryAllocationSize</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceVulkan12Features" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>samplerMirrorClampToEdge</name></member>
<member><type>VkBool32</type> <name>drawIndirectCount</name></member>
@@ -4876,62 +5525,130 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>subgroupBroadcastDynamicId</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceVulkan12Properties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="noauto"><type>VkDriverId</type> <name>driverID</name></member>
- <member limittype="noauto"><type>char</type> <name>driverName</name>[<enum>VK_MAX_DRIVER_NAME_SIZE</enum>]</member>
- <member limittype="noauto"><type>char</type> <name>driverInfo</name>[<enum>VK_MAX_DRIVER_INFO_SIZE</enum>]</member>
+ <member limittype="noauto" len="null-terminated"><type>char</type> <name>driverName</name>[<enum>VK_MAX_DRIVER_NAME_SIZE</enum>]</member>
+ <member limittype="noauto" len="null-terminated"><type>char</type> <name>driverInfo</name>[<enum>VK_MAX_DRIVER_INFO_SIZE</enum>]</member>
<member limittype="noauto"><type>VkConformanceVersion</type> <name>conformanceVersion</name></member>
- <member limittype="noauto"><type>VkShaderFloatControlsIndependence</type><name>denormBehaviorIndependence</name></member>
- <member limittype="noauto"><type>VkShaderFloatControlsIndependence</type><name>roundingModeIndependence</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat16</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat32</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat64</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormPreserveFloat16</name><comment>An implementation can preserve denormals</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormPreserveFloat32</name><comment>An implementation can preserve denormals</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormPreserveFloat64</name><comment>An implementation can preserve denormals</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormFlushToZeroFloat16</name><comment>An implementation can flush to zero denormals</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormFlushToZeroFloat32</name><comment>An implementation can flush to zero denormals</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormFlushToZeroFloat64</name><comment>An implementation can flush to zero denormals</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTEFloat16</name><comment>An implementation can support RTE</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTEFloat32</name><comment>An implementation can support RTE</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTEFloat64</name><comment>An implementation can support RTE</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTZFloat16</name><comment>An implementation can support RTZ</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTZFloat32</name><comment>An implementation can support RTZ</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTZFloat64</name><comment>An implementation can support RTZ</comment></member>
- <member limittype="max"><type>uint32_t</type> <name>maxUpdateAfterBindDescriptorsInAllPools</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderUniformBufferArrayNonUniformIndexingNative</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderSampledImageArrayNonUniformIndexingNative</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderStorageBufferArrayNonUniformIndexingNative</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderStorageImageArrayNonUniformIndexingNative</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>shaderInputAttachmentArrayNonUniformIndexingNative</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>robustBufferAccessUpdateAfterBind</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>quadDivergentImplicitLod</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindSamplers</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindUniformBuffers</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindStorageBuffers</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindSampledImages</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindStorageImages</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindInputAttachments</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxPerStageUpdateAfterBindResources</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindSamplers</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindUniformBuffers</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindUniformBuffersDynamic</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindStorageBuffers</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindStorageBuffersDynamic</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindSampledImages</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindStorageImages</name></member>
- <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindInputAttachments</name></member>
- <member limittype="bitmask"><type>VkResolveModeFlags</type> <name>supportedDepthResolveModes</name><comment>supported depth resolve modes</comment></member>
- <member limittype="bitmask"><type>VkResolveModeFlags</type> <name>supportedStencilResolveModes</name><comment>supported stencil resolve modes</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>independentResolveNone</name><comment>depth and stencil resolve modes can be set independently if one of them is none</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>independentResolve</name><comment>depth and stencil resolve modes can be set independently</comment></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>filterMinmaxSingleComponentFormats</name></member>
- <member limittype="bitmask"><type>VkBool32</type> <name>filterMinmaxImageComponentMapping</name></member>
- <member limittype="max"><type>uint64_t</type> <name>maxTimelineSemaphoreValueDifference</name></member>
+ <member limittype="exact"><type>VkShaderFloatControlsIndependence</type> <name>denormBehaviorIndependence</name></member>
+ <member limittype="exact"><type>VkShaderFloatControlsIndependence</type> <name>roundingModeIndependence</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat16</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat32</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderSignedZeroInfNanPreserveFloat64</name><comment>An implementation can preserve signed zero, nan, inf</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormPreserveFloat16</name><comment>An implementation can preserve denormals</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormPreserveFloat32</name><comment>An implementation can preserve denormals</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormPreserveFloat64</name><comment>An implementation can preserve denormals</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormFlushToZeroFloat16</name><comment>An implementation can flush to zero denormals</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormFlushToZeroFloat32</name><comment>An implementation can flush to zero denormals</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderDenormFlushToZeroFloat64</name><comment>An implementation can flush to zero denormals</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTEFloat16</name><comment>An implementation can support RTE</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTEFloat32</name><comment>An implementation can support RTE</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTEFloat64</name><comment>An implementation can support RTE</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTZFloat16</name><comment>An implementation can support RTZ</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTZFloat32</name><comment>An implementation can support RTZ</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderRoundingModeRTZFloat64</name><comment>An implementation can support RTZ</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxUpdateAfterBindDescriptorsInAllPools</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderUniformBufferArrayNonUniformIndexingNative</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderSampledImageArrayNonUniformIndexingNative</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderStorageBufferArrayNonUniformIndexingNative</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderStorageImageArrayNonUniformIndexingNative</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>shaderInputAttachmentArrayNonUniformIndexingNative</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>robustBufferAccessUpdateAfterBind</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>quadDivergentImplicitLod</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindSamplers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindUniformBuffers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindStorageBuffers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindSampledImages</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindStorageImages</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindInputAttachments</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageUpdateAfterBindResources</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindSamplers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindUniformBuffers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindUniformBuffersDynamic</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindStorageBuffers</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindStorageBuffersDynamic</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindSampledImages</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindStorageImages</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindInputAttachments</name></member>
+ <member limittype="bitmask"><type>VkResolveModeFlags</type> <name>supportedDepthResolveModes</name><comment>supported depth resolve modes</comment></member>
+ <member limittype="bitmask"><type>VkResolveModeFlags</type> <name>supportedStencilResolveModes</name><comment>supported stencil resolve modes</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>independentResolveNone</name><comment>depth and stencil resolve modes can be set independently if one of them is none</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>independentResolve</name><comment>depth and stencil resolve modes can be set independently</comment></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>filterMinmaxSingleComponentFormats</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>filterMinmaxImageComponentMapping</name></member>
+ <member limittype="max"><type>uint64_t</type> <name>maxTimelineSemaphoreValueDifference</name></member>
<member limittype="bitmask" optional="true"><type>VkSampleCountFlags</type> <name>framebufferIntegerColorSampleCounts</name></member>
</type>
- <type category="struct" name="VkPipelineCompilerControlCreateInfoAMD" structextends="VkGraphicsPipelineCreateInfo,VkComputePipelineCreateInfo">
+ <type category="struct" name="VkPhysicalDeviceVulkan13Features" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>robustImageAccess</name></member>
+ <member><type>VkBool32</type> <name>inlineUniformBlock</name></member>
+ <member><type>VkBool32</type> <name>descriptorBindingInlineUniformBlockUpdateAfterBind</name></member>
+ <member><type>VkBool32</type> <name>pipelineCreationCacheControl</name></member>
+ <member><type>VkBool32</type> <name>privateData</name></member>
+ <member><type>VkBool32</type> <name>shaderDemoteToHelperInvocation</name></member>
+ <member><type>VkBool32</type> <name>shaderTerminateInvocation</name></member>
+ <member><type>VkBool32</type> <name>subgroupSizeControl</name></member>
+ <member><type>VkBool32</type> <name>computeFullSubgroups</name></member>
+ <member><type>VkBool32</type> <name>synchronization2</name></member>
+ <member><type>VkBool32</type> <name>textureCompressionASTC_HDR</name></member>
+ <member><type>VkBool32</type> <name>shaderZeroInitializeWorkgroupMemory</name></member>
+ <member><type>VkBool32</type> <name>dynamicRendering</name></member>
+ <member><type>VkBool32</type> <name>shaderIntegerDotProduct</name></member>
+ <member><type>VkBool32</type> <name>maintenance4</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceVulkan13Properties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="min,pot" noautovalidity="true"><type>uint32_t</type> <name>minSubgroupSize</name><comment>The minimum subgroup size supported by this device</comment></member>
+ <member limittype="max,pot" noautovalidity="true"><type>uint32_t</type> <name>maxSubgroupSize</name><comment>The maximum subgroup size supported by this device</comment></member>
+ <member limittype="max" noautovalidity="true"><type>uint32_t</type> <name>maxComputeWorkgroupSubgroups</name><comment>The maximum number of subgroups supported in a workgroup</comment></member>
+ <member limittype="bitmask"><type>VkShaderStageFlags</type> <name>requiredSubgroupSizeStages</name><comment>The shader stages that support specifying a subgroup size</comment></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxInlineUniformBlockSize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorInlineUniformBlocks</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetInlineUniformBlocks</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetUpdateAfterBindInlineUniformBlocks</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxInlineUniformTotalSize</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct8BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct8BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct8BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct4x8BitPackedUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct4x8BitPackedSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct4x8BitPackedMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct16BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct16BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct16BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct32BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct32BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct32BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct64BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct64BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct64BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating8BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating8BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating16BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating16BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating32BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating32BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating64BitUnsignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating64BitSignedAccelerated</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>storageTexelBufferOffsetAlignmentBytes</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>storageTexelBufferOffsetSingleTexelAlignment</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>uniformTexelBufferOffsetAlignmentBytes</name></member>
+ <member limittype="exact"><type>VkBool32</type> <name>uniformTexelBufferOffsetSingleTexelAlignment</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>maxBufferSize</name></member>
+ </type>
+ <type category="struct" name="VkPipelineCompilerControlCreateInfoAMD" structextends="VkGraphicsPipelineCreateInfo,VkComputePipelineCreateInfo,VkExecutionGraphPipelineCreateInfoAMDX">
<member values="VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkPipelineCompilerControlFlagsAMD</type> <name>compilerControlFlags</name></member>
@@ -4941,15 +5658,29 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>deviceCoherentMemory</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceToolPropertiesEXT" returnedonly="true">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>char</type> <name>name</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]</member>
- <member><type>char</type> <name>version</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]</member>
- <member><type>VkToolPurposeFlagsEXT</type> <name>purposes</name></member>
- <member><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
- <member><type>char</type> <name>layer</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]</member>
+ <type category="struct" name="VkFaultData" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_FAULT_DATA"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkFaultLevel</type> <name>faultLevel</name></member>
+ <member><type>VkFaultType</type> <name>faultType</name></member>
+ </type>
+ <type category="struct" name="VkFaultCallbackInfo" structextends="VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_FAULT_CALLBACK_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>faultCount</name></member>
+ <member optional="true" len="faultCount"><type>VkFaultData</type>*<name>pFaults</name></member>
+ <member><type>PFN_vkFaultCallbackFunction</type> <name>pfnFaultCallback</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceToolProperties" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member len="null-terminated"><type>char</type> <name>name</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>version</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]</member>
+ <member><type>VkToolPurposeFlags</type> <name>purposes</name></member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member len="null-terminated"><type>char</type> <name>layer</name>[<enum>VK_MAX_EXTENSION_NAME_SIZE</enum>]</member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceToolPropertiesEXT" alias="VkPhysicalDeviceToolProperties"/>
<type category="struct" name="VkSamplerCustomBorderColorCreateInfoEXT" structextends="VkSamplerCreateInfo">
<member values="VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
@@ -4967,6 +5698,18 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>customBorderColors</name></member>
<member><type>VkBool32</type> <name>customBorderColorWithoutFormat</name></member>
</type>
+ <type category="struct" name="VkSamplerBorderColorComponentMappingCreateInfoEXT" structextends="VkSamplerCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkComponentMapping</type> <name>components</name></member>
+ <member><type>VkBool32</type> <name>srgb</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceBorderColorSwizzleFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>borderColorSwizzle</name></member>
+ <member><type>VkBool32</type> <name>borderColorSwizzleFromImage</name></member>
+ </type>
<type category="union" name="VkDeviceOrHostAddressKHR">
<member noautovalidity="true"><type>VkDeviceAddress</type> <name>deviceAddress</name></member>
<member noautovalidity="true"><type>void</type>* <name>hostAddress</name></member>
@@ -4975,6 +5718,10 @@ typedef void <name>CAMetalLayer</name>;
<member noautovalidity="true"><type>VkDeviceAddress</type> <name>deviceAddress</name></member>
<member noautovalidity="true">const <type>void</type>* <name>hostAddress</name></member>
</type>
+ <type category="union" name="VkDeviceOrHostAddressConstAMDX">
+ <member noautovalidity="true"><type>VkDeviceAddress</type> <name>deviceAddress</name></member>
+ <member noautovalidity="true">const <type>void</type>* <name>hostAddress</name></member>
+ </type>
<type category="struct" name="VkAccelerationStructureGeometryTrianglesDataKHR">
<member values="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
@@ -5099,12 +5846,23 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>maxPipelineRayPayloadSize</name></member>
<member><type>uint32_t</type> <name>maxPipelineRayHitAttributeSize</name></member>
</type>
- <type category="struct" name="VkPipelineLibraryCreateInfoKHR">
+ <type category="struct" name="VkPipelineLibraryCreateInfoKHR" structextends="VkGraphicsPipelineCreateInfo">
<member values="VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>uint32_t</type> <name>libraryCount</name></member>
<member len="libraryCount">const <type>VkPipeline</type>* <name>pLibraries</name></member>
</type>
+ <type category="struct" name="VkRefreshObjectKHR">
+ <member><type>VkObjectType</type> <name>objectType</name></member>
+ <member objecttype="objectType" externsync="true"><type>uint64_t</type> <name>objectHandle</name></member>
+ <member optional="true"><type>VkRefreshObjectFlagsKHR</type> <name>flags</name></member>
+ </type>
+ <type category="struct" name="VkRefreshObjectListKHR">
+ <member values="VK_STRUCTURE_TYPE_REFRESH_OBJECT_LIST_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>objectCount</name></member>
+ <member len="objectCount">const <type>VkRefreshObjectKHR</type>* <name>pObjects</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceExtendedDynamicStateFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -5117,12 +5875,67 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>extendedDynamicState2LogicOp</name></member>
<member><type>VkBool32</type> <name>extendedDynamicState2PatchControlPoints</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceExtendedDynamicState3FeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3TessellationDomainOrigin</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3DepthClampEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3PolygonMode</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3RasterizationSamples</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3SampleMask</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3AlphaToCoverageEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3AlphaToOneEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3LogicOpEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ColorBlendEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ColorBlendEquation</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ColorWriteMask</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3RasterizationStream</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ConservativeRasterizationMode</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ExtraPrimitiveOverestimationSize</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3DepthClipEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3SampleLocationsEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ColorBlendAdvanced</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ProvokingVertexMode</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3LineRasterizationMode</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3LineStippleEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3DepthClipNegativeOneToOne</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ViewportWScalingEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ViewportSwizzle</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3CoverageToColorEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3CoverageToColorLocation</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3CoverageModulationMode</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3CoverageModulationTableEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3CoverageModulationTable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3CoverageReductionMode</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3RepresentativeFragmentTestEnable</name></member>
+ <member><type>VkBool32</type> <name>extendedDynamicState3ShadingRateImageEnable</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExtendedDynamicState3PropertiesEXT" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>dynamicPrimitiveTopologyUnrestricted</name></member>
+ </type>
+ <type category="struct" name="VkColorBlendEquationEXT">
+ <member><type>VkBlendFactor</type> <name>srcColorBlendFactor</name></member>
+ <member><type>VkBlendFactor</type> <name>dstColorBlendFactor</name></member>
+ <member><type>VkBlendOp</type> <name>colorBlendOp</name></member>
+ <member><type>VkBlendFactor</type> <name>srcAlphaBlendFactor</name></member>
+ <member><type>VkBlendFactor</type> <name>dstAlphaBlendFactor</name></member>
+ <member><type>VkBlendOp</type> <name>alphaBlendOp</name></member>
+ </type>
+ <type category="struct" name="VkColorBlendAdvancedEXT">
+ <member><type>VkBlendOp</type> <name>advancedBlendOp</name></member>
+ <member><type>VkBool32</type> <name>srcPremultiplied</name></member>
+ <member><type>VkBool32</type> <name>dstPremultiplied</name></member>
+ <member><type>VkBlendOverlapEXT</type> <name>blendOverlap</name></member>
+ <member><type>VkBool32</type> <name>clampResults</name></member>
+ </type>
<type category="struct" name="VkRenderPassTransformBeginInfoQCOM" structextends="VkRenderPassBeginInfo">
<member values="VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name><comment>Pointer to next structure</comment></member>
<member noautovalidity="true"><type>VkSurfaceTransformFlagBitsKHR</type> <name>transform</name></member>
</type>
- <type category="struct" name="VkCopyCommandTransformInfoQCOM" structextends="VkBufferImageCopy2KHR,VkImageBlit2KHR">
+ <type category="struct" name="VkCopyCommandTransformInfoQCOM" structextends="VkBufferImageCopy2,VkImageBlit2">
<member values="VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
<member noautovalidity="true"><type>VkSurfaceTransformFlagBitsKHR</type> <name>transform</name></member>
@@ -5134,7 +5947,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkRect2D</type> <name>renderArea</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceDiagnosticsConfigFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>diagnosticsConfig</name></member>
</type>
@@ -5143,11 +5956,19 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member optional="true"><type>VkDeviceDiagnosticsConfigFlagsNV</type> <name>flags</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPipelineOfflineCreateInfo" structextends="VkGraphicsPipelineCreateInfo,VkComputePipelineCreateInfo,VkRayTracingPipelineCreateInfoKHR,VkRayTracingPipelineCreateInfoNV">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_OFFLINE_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint8_t</type> <name>pipelineIdentifier</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member><type>VkPipelineMatchControl</type> <name>matchControl</name></member>
+ <member><type>VkDeviceSize</type> <name>poolEntrySize</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>shaderZeroInitializeWorkgroupMemory</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR" alias="VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures"/>
<type category="struct" name="VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -5155,22 +5976,23 @@ typedef void <name>CAMetalLayer</name>;
</type>
<type category="struct" name="VkPhysicalDeviceRobustness2FeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>robustBufferAccess2</name></member>
<member><type>VkBool32</type> <name>robustImageAccess2</name></member>
<member><type>VkBool32</type> <name>nullDescriptor</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceRobustness2PropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>robustStorageBufferAccessSizeAlignment</name></member>
- <member limittype="noauto"><type>VkDeviceSize</type> <name>robustUniformBufferAccessSizeAlignment</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>robustStorageBufferAccessSizeAlignment</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>robustUniformBufferAccessSizeAlignment</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceImageRobustnessFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <type category="struct" name="VkPhysicalDeviceImageRobustnessFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>robustImageAccess</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceImageRobustnessFeaturesEXT" alias="VkPhysicalDeviceImageRobustnessFeatures"/>
<type category="struct" name="VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
@@ -5201,119 +6023,141 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDevicePortabilitySubsetPropertiesKHR" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>minVertexInputBindingStrideAlignment</name></member>
+ <member limittype="min,pot"><type>uint32_t</type> <name>minVertexInputBindingStrideAlignment</name></member>
</type>
<type category="struct" name="VkPhysicalDevice4444FormatsFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>formatA4R4G4B4</name></member>
<member><type>VkBool32</type> <name>formatA4B4G4R4</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceSubpassShadingFeaturesHUAWEI" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>subpassShading</name></member>
</type>
- <type category="struct" name="VkBufferCopy2KHR">
- <member values="VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkDeviceSize</type> <name>srcOffset</name><comment>Specified in bytes</comment></member>
- <member><type>VkDeviceSize</type> <name>dstOffset</name><comment>Specified in bytes</comment></member>
- <member noautovalidity="true"><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
+ <type category="struct" name="VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>*<name>pNext</name></member>
+ <member><type>VkBool32</type> <name>clustercullingShader</name></member>
+ <member><type>VkBool32</type> <name>multiviewClusterCullingShader</name></member>
</type>
- <type category="struct" name="VkImageCopy2KHR">
- <member values="VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
- <member><type>VkOffset3D</type> <name>srcOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- <member><type>VkImageSubresourceLayers</type> <name>dstSubresource</name></member>
- <member><type>VkOffset3D</type> <name>dstOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- <member><type>VkExtent3D</type> <name>extent</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- </type>
- <type category="struct" name="VkImageBlit2KHR">
- <member values="VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
- <member><type>VkOffset3D</type> <name>srcOffsets</name>[2]<comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- <member><type>VkImageSubresourceLayers</type> <name>dstSubresource</name></member>
- <member><type>VkOffset3D</type> <name>dstOffsets</name>[2]<comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- </type>
- <type category="struct" name="VkBufferImageCopy2KHR">
- <member values="VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkDeviceSize</type> <name>bufferOffset</name><comment>Specified in bytes</comment></member>
- <member><type>uint32_t</type> <name>bufferRowLength</name><comment>Specified in texels</comment></member>
- <member><type>uint32_t</type> <name>bufferImageHeight</name></member>
- <member><type>VkImageSubresourceLayers</type> <name>imageSubresource</name></member>
- <member><type>VkOffset3D</type> <name>imageOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- <member><type>VkExtent3D</type> <name>imageExtent</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
- </type>
- <type category="struct" name="VkImageResolve2KHR">
- <member values="VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
- <member><type>VkOffset3D</type> <name>srcOffset</name></member>
- <member><type>VkImageSubresourceLayers</type> <name>dstSubresource</name></member>
- <member><type>VkOffset3D</type> <name>dstOffset</name></member>
- <member><type>VkExtent3D</type> <name>extent</name></member>
+ <type category="struct" name="VkPhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI" structextends="VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_VRS_FEATURES_HUAWEI"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>*<name>pNext</name></member>
+ <member><type>VkBool32</type> <name>clusterShadingRate</name></member>
</type>
- <type category="struct" name="VkCopyBufferInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkBuffer</type> <name>srcBuffer</name></member>
- <member><type>VkBuffer</type> <name>dstBuffer</name></member>
- <member><type>uint32_t</type> <name>regionCount</name></member>
- <member len="regionCount">const <type>VkBufferCopy2KHR</type>* <name>pRegions</name></member>
- </type>
- <type category="struct" name="VkCopyImageInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImage</type> <name>srcImage</name></member>
- <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
- <member><type>VkImage</type> <name>dstImage</name></member>
- <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
- <member><type>uint32_t</type> <name>regionCount</name></member>
- <member len="regionCount">const <type>VkImageCopy2KHR</type>* <name>pRegions</name></member>
- </type>
- <type category="struct" name="VkBlitImageInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImage</type> <name>srcImage</name></member>
- <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
- <member><type>VkImage</type> <name>dstImage</name></member>
- <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
- <member><type>uint32_t</type> <name>regionCount</name></member>
- <member len="regionCount">const <type>VkImageBlit2KHR</type>* <name>pRegions</name></member>
- <member><type>VkFilter</type> <name>filter</name></member>
- </type>
- <type category="struct" name="VkCopyBufferToImageInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkBuffer</type> <name>srcBuffer</name></member>
- <member><type>VkImage</type> <name>dstImage</name></member>
- <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
- <member><type>uint32_t</type> <name>regionCount</name></member>
- <member len="regionCount">const <type>VkBufferImageCopy2KHR</type>* <name>pRegions</name></member>
- </type>
- <type category="struct" name="VkCopyImageToBufferInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImage</type> <name>srcImage</name></member>
- <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
- <member><type>VkBuffer</type> <name>dstBuffer</name></member>
- <member><type>uint32_t</type> <name>regionCount</name></member>
- <member len="regionCount">const <type>VkBufferImageCopy2KHR</type>* <name>pRegions</name></member>
- </type>
- <type category="struct" name="VkResolveImageInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkImage</type> <name>srcImage</name></member>
- <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
- <member><type>VkImage</type> <name>dstImage</name></member>
- <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
- <member><type>uint32_t</type> <name>regionCount</name></member>
- <member len="regionCount">const <type>VkImageResolve2KHR</type>* <name>pRegions</name></member>
+ <type category="struct" name="VkBufferCopy2">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_COPY_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>srcOffset</name><comment>Specified in bytes</comment></member>
+ <member><type>VkDeviceSize</type> <name>dstOffset</name><comment>Specified in bytes</comment></member>
+ <member noautovalidity="true"><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
</type>
+ <type category="struct" name="VkBufferCopy2KHR" alias="VkBufferCopy2"/>
+ <type category="struct" name="VkImageCopy2">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_COPY_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>srcOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ <member><type>VkImageSubresourceLayers</type> <name>dstSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>dstOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ <member><type>VkExtent3D</type> <name>extent</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ </type>
+ <type category="struct" name="VkImageCopy2KHR" alias="VkImageCopy2"/>
+ <type category="struct" name="VkImageBlit2">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_BLIT_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>srcOffsets</name>[2]<comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ <member><type>VkImageSubresourceLayers</type> <name>dstSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>dstOffsets</name>[2]<comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ </type>
+ <type category="struct" name="VkImageBlit2KHR" alias="VkImageBlit2"/>
+ <type category="struct" name="VkBufferImageCopy2">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>bufferOffset</name><comment>Specified in bytes</comment></member>
+ <member><type>uint32_t</type> <name>bufferRowLength</name><comment>Specified in texels</comment></member>
+ <member><type>uint32_t</type> <name>bufferImageHeight</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>imageSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>imageOffset</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ <member><type>VkExtent3D</type> <name>imageExtent</name><comment>Specified in pixels for both compressed and uncompressed images</comment></member>
+ </type>
+ <type category="struct" name="VkBufferImageCopy2KHR" alias="VkBufferImageCopy2"/>
+ <type category="struct" name="VkImageResolve2">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>srcSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>srcOffset</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>dstSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>dstOffset</name></member>
+ <member><type>VkExtent3D</type> <name>extent</name></member>
+ </type>
+ <type category="struct" name="VkImageResolve2KHR" alias="VkImageResolve2"/>
+ <type category="struct" name="VkCopyBufferInfo2">
+ <member values="VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBuffer</type> <name>srcBuffer</name></member>
+ <member><type>VkBuffer</type> <name>dstBuffer</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkBufferCopy2</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkCopyBufferInfo2KHR" alias="VkCopyBufferInfo2"/>
+ <type category="struct" name="VkCopyImageInfo2">
+ <member values="VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>srcImage</name></member>
+ <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
+ <member><type>VkImage</type> <name>dstImage</name></member>
+ <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkImageCopy2</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkCopyImageInfo2KHR" alias="VkCopyImageInfo2"/>
+ <type category="struct" name="VkBlitImageInfo2">
+ <member values="VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>srcImage</name></member>
+ <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
+ <member><type>VkImage</type> <name>dstImage</name></member>
+ <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkImageBlit2</type>* <name>pRegions</name></member>
+ <member><type>VkFilter</type> <name>filter</name></member>
+ </type>
+ <type category="struct" name="VkBlitImageInfo2KHR" alias="VkBlitImageInfo2"/>
+ <type category="struct" name="VkCopyBufferToImageInfo2">
+ <member values="VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBuffer</type> <name>srcBuffer</name></member>
+ <member><type>VkImage</type> <name>dstImage</name></member>
+ <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkBufferImageCopy2</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkCopyBufferToImageInfo2KHR" alias="VkCopyBufferToImageInfo2"/>
+ <type category="struct" name="VkCopyImageToBufferInfo2">
+ <member values="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>srcImage</name></member>
+ <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
+ <member><type>VkBuffer</type> <name>dstBuffer</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkBufferImageCopy2</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkCopyImageToBufferInfo2KHR" alias="VkCopyImageToBufferInfo2"/>
+ <type category="struct" name="VkResolveImageInfo2">
+ <member values="VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>srcImage</name></member>
+ <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
+ <member><type>VkImage</type> <name>dstImage</name></member>
+ <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkImageResolve2</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkResolveImageInfo2KHR" alias="VkResolveImageInfo2"/>
<type category="struct" name="VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -5330,7 +6174,7 @@ typedef void <name>CAMetalLayer</name>;
<member values="VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkExtent2D</type> <name>fragmentSize</name></member>
- <member><type>VkFragmentShadingRateCombinerOpKHR</type> <name>combinerOps</name>[2]</member>
+ <member noautovalidity="true"><type>VkFragmentShadingRateCombinerOpKHR</type> <name>combinerOps</name>[2]</member>
</type>
<type category="struct" name="VkPhysicalDeviceFragmentShadingRateFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
@@ -5344,13 +6188,13 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="min"><type>VkExtent2D</type> <name>minFragmentShadingRateAttachmentTexelSize</name></member>
<member limittype="max"><type>VkExtent2D</type> <name>maxFragmentShadingRateAttachmentTexelSize</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>maxFragmentShadingRateAttachmentTexelSizeAspectRatio</name></member>
+ <member limittype="max,pot"><type>uint32_t</type> <name>maxFragmentShadingRateAttachmentTexelSizeAspectRatio</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>primitiveFragmentShadingRateWithMultipleViewports</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>layeredShadingRateAttachments</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>fragmentShadingRateNonTrivialCombinerOps</name></member>
<member limittype="max"><type>VkExtent2D</type> <name>maxFragmentSize</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>maxFragmentSizeAspectRatio</name></member>
- <member limittype="noauto"><type>uint32_t</type> <name>maxFragmentShadingRateCoverageSamples</name></member>
+ <member limittype="max,pot"><type>uint32_t</type> <name>maxFragmentSizeAspectRatio</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxFragmentShadingRateCoverageSamples</name></member>
<member limittype="max"><type>VkSampleCountFlagBits</type> <name>maxFragmentShadingRateRasterizationSamples</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>fragmentShadingRateWithShaderDepthStencilWrites</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>fragmentShadingRateWithSampleMask</name></member>
@@ -5366,14 +6210,15 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkSampleCountFlags</type> <name>sampleCounts</name></member>
<member><type>VkExtent2D</type> <name>fragmentSize</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR"><type>VkStructureType</type><name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceShaderTerminateInvocationFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>shaderTerminateInvocation</name></member>
+ <member><type>VkBool32</type> <name>shaderTerminateInvocation</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR" alias="VkPhysicalDeviceShaderTerminateInvocationFeatures"/>
<type category="struct" name="VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>fragmentShadingRateEnums</name></member>
<member><type>VkBool32</type> <name>supersampleFragmentShadingRates</name></member>
<member><type>VkBool32</type> <name>noInvocationFragmentShadingRates</name></member>
@@ -5381,36 +6226,65 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member limittype="bitmask"><type>VkSampleCountFlagBits</type> <name>maxFragmentShadingRateInvocationCount</name></member>
+ <member limittype="max"><type>VkSampleCountFlagBits</type> <name>maxFragmentShadingRateInvocationCount</name></member>
</type>
<type category="struct" name="VkPipelineFragmentShadingRateEnumStateCreateInfoNV" structextends="VkGraphicsPipelineCreateInfo">
<member values="VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkFragmentShadingRateTypeNV</type> <name>shadingRateType</name></member>
- <member><type>VkFragmentShadingRateNV</type> <name>shadingRate</name></member>
- <member><type>VkFragmentShadingRateCombinerOpKHR</type> <name>combinerOps</name>[2]</member>
+ <member noautovalidity="true"><type>VkFragmentShadingRateTypeNV</type> <name>shadingRateType</name></member>
+ <member noautovalidity="true"><type>VkFragmentShadingRateNV</type> <name>shadingRate</name></member>
+ <member noautovalidity="true"><type>VkFragmentShadingRateCombinerOpKHR</type> <name>combinerOps</name>[2]</member>
</type>
- <type category="struct" name="VkAccelerationStructureBuildSizesInfoKHR">
+ <type category="struct" name="VkAccelerationStructureBuildSizesInfoKHR" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkDeviceSize</type> <name>accelerationStructureSize</name></member>
<member><type>VkDeviceSize</type> <name>updateScratchSize</name></member>
<member><type>VkDeviceSize</type> <name>buildScratchSize</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceImage2DViewOf3DFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>image2DViewOf3D</name></member>
+ <member><type>VkBool32</type> <name>sampler2DViewOf3D</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>imageSlicedViewOf3D</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>attachmentFeedbackLoopDynamicState</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>mutableDescriptorType</name></member>
</type>
- <type category="struct" name="VkMutableDescriptorTypeListVALVE">
- <member optional="true"><type>uint32_t</type> <name>descriptorTypeCount</name></member>
+ <type category="struct" name="VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE" alias="VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT"/>
+ <type category="struct" name="VkMutableDescriptorTypeListEXT">
+ <member optional="true"><type>uint32_t</type> <name>descriptorTypeCount</name></member>
<member len="descriptorTypeCount">const <type>VkDescriptorType</type>* <name>pDescriptorTypes</name></member>
</type>
- <type category="struct" name="VkMutableDescriptorTypeCreateInfoVALVE" structextends="VkDescriptorSetLayoutCreateInfo,VkDescriptorPoolCreateInfo">
- <member values="VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>uint32_t</type> <name>mutableDescriptorTypeListCount</name></member>
- <member len="mutableDescriptorTypeListCount">const <type>VkMutableDescriptorTypeListVALVE</type>* <name>pMutableDescriptorTypeLists</name></member>
+ <type category="struct" name="VkMutableDescriptorTypeListVALVE" alias="VkMutableDescriptorTypeListEXT"/>
+ <type category="struct" name="VkMutableDescriptorTypeCreateInfoEXT" structextends="VkDescriptorSetLayoutCreateInfo,VkDescriptorPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>mutableDescriptorTypeListCount</name></member>
+ <member len="mutableDescriptorTypeListCount">const <type>VkMutableDescriptorTypeListEXT</type>* <name>pMutableDescriptorTypeLists</name></member>
+ </type>
+ <type category="struct" name="VkMutableDescriptorTypeCreateInfoVALVE" alias="VkMutableDescriptorTypeCreateInfoEXT"/>
+ <type category="struct" name="VkPhysicalDeviceDepthClipControlFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>depthClipControl</name></member>
+ </type>
+ <type category="struct" name="VkPipelineViewportDepthClipControlCreateInfoEXT" structextends="VkPipelineViewportStateCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>negativeOneToOne</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
@@ -5423,7 +6297,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkBool32</type> <name>externalMemoryRDMA</name></member>
</type>
<type category="struct" name="VkVertexInputBindingDescription2EXT">
- <member values="VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>binding</name></member>
<member><type>uint32_t</type> <name>stride</name></member>
@@ -5431,7 +6305,7 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>divisor</name></member>
</type>
<type category="struct" name="VkVertexInputAttributeDescription2EXT">
- <member values="VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT"><type>VkStructureType</type><name>sType</name></member>
+ <member values="VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
<member><type>uint32_t</type> <name>location</name><comment>location of the shader vertex attrib</comment></member>
<member><type>uint32_t</type> <name>binding</name><comment>Vertex buffer binding id</comment></member>
@@ -5449,181 +6323,426 @@ typedef void <name>CAMetalLayer</name>;
<member optional="true"><type>uint32_t</type> <name>attachmentCount</name><comment># of pAttachments</comment></member>
<member len="attachmentCount">const <type>VkBool32</type>* <name>pColorWriteEnables</name></member>
</type>
- <type category="struct" name="VkMemoryBarrier2KHR" structextends="VkSubpassDependency2">
- <member values="VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>srcStageMask</name></member>
- <member optional="true"><type>VkAccessFlags2KHR</type> <name>srcAccessMask</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>dstStageMask</name></member>
- <member optional="true"><type>VkAccessFlags2KHR</type> <name>dstAccessMask</name></member>
- </type>
- <type category="struct" name="VkImageMemoryBarrier2KHR">
- <member values="VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>srcStageMask</name></member>
- <member optional="true"><type>VkAccessFlags2KHR</type> <name>srcAccessMask</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>dstStageMask</name></member>
- <member optional="true"><type>VkAccessFlags2KHR</type> <name>dstAccessMask</name></member>
- <member><type>VkImageLayout</type> <name>oldLayout</name></member>
- <member><type>VkImageLayout</type> <name>newLayout</name></member>
- <member><type>uint32_t</type> <name>srcQueueFamilyIndex</name></member>
- <member><type>uint32_t</type> <name>dstQueueFamilyIndex</name></member>
- <member><type>VkImage</type> <name>image</name></member>
- <member><type>VkImageSubresourceRange</type> <name>subresourceRange</name></member>
- </type>
- <type category="struct" name="VkBufferMemoryBarrier2KHR">
- <member values="VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>srcStageMask</name></member>
- <member optional="true"><type>VkAccessFlags2KHR</type> <name>srcAccessMask</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>dstStageMask</name></member>
- <member optional="true"><type>VkAccessFlags2KHR</type> <name>dstAccessMask</name></member>
- <member><type>uint32_t</type> <name>srcQueueFamilyIndex</name></member>
- <member><type>uint32_t</type> <name>dstQueueFamilyIndex</name></member>
- <member><type>VkBuffer</type> <name>buffer</name></member>
- <member><type>VkDeviceSize</type> <name>offset</name></member>
- <member><type>VkDeviceSize</type> <name>size</name></member>
- </type>
- <type category="struct" name="VkDependencyInfoKHR">
- <member values="VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkDependencyFlags</type> <name>dependencyFlags</name></member>
- <member optional="true"><type>uint32_t</type> <name>memoryBarrierCount</name></member>
- <member len="memoryBarrierCount">const <type>VkMemoryBarrier2KHR</type>* <name>pMemoryBarriers</name></member>
- <member optional="true"><type>uint32_t</type> <name>bufferMemoryBarrierCount</name></member>
- <member len="bufferMemoryBarrierCount">const <type>VkBufferMemoryBarrier2KHR</type>* <name>pBufferMemoryBarriers</name></member>
- <member optional="true"><type>uint32_t</type> <name>imageMemoryBarrierCount</name></member>
- <member len="imageMemoryBarrierCount">const <type>VkImageMemoryBarrier2KHR</type>* <name>pImageMemoryBarriers</name></member>
- </type>
- <type category="struct" name="VkSemaphoreSubmitInfoKHR">
- <member values="VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkMemoryBarrier2" structextends="VkSubpassDependency2">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_BARRIER_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>srcStageMask</name></member>
+ <member optional="true"><type>VkAccessFlags2</type> <name>srcAccessMask</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>dstStageMask</name></member>
+ <member optional="true"><type>VkAccessFlags2</type> <name>dstAccessMask</name></member>
+ </type>
+ <type category="struct" name="VkMemoryBarrier2KHR" alias="VkMemoryBarrier2"/>
+ <type category="struct" name="VkImageMemoryBarrier2">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>srcStageMask</name></member>
+ <member optional="true"><type>VkAccessFlags2</type> <name>srcAccessMask</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>dstStageMask</name></member>
+ <member optional="true"><type>VkAccessFlags2</type> <name>dstAccessMask</name></member>
+ <member><type>VkImageLayout</type> <name>oldLayout</name></member>
+ <member><type>VkImageLayout</type> <name>newLayout</name></member>
+ <member><type>uint32_t</type> <name>srcQueueFamilyIndex</name></member>
+ <member><type>uint32_t</type> <name>dstQueueFamilyIndex</name></member>
+ <member><type>VkImage</type> <name>image</name></member>
+ <member><type>VkImageSubresourceRange</type> <name>subresourceRange</name></member>
+ </type>
+ <type category="struct" name="VkImageMemoryBarrier2KHR" alias="VkImageMemoryBarrier2"/>
+ <type category="struct" name="VkBufferMemoryBarrier2">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>srcStageMask</name></member>
+ <member optional="true"><type>VkAccessFlags2</type> <name>srcAccessMask</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>dstStageMask</name></member>
+ <member optional="true"><type>VkAccessFlags2</type> <name>dstAccessMask</name></member>
+ <member><type>uint32_t</type> <name>srcQueueFamilyIndex</name></member>
+ <member><type>uint32_t</type> <name>dstQueueFamilyIndex</name></member>
+ <member><type>VkBuffer</type> <name>buffer</name></member>
+ <member><type>VkDeviceSize</type> <name>offset</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name></member>
+ </type>
+ <type category="struct" name="VkBufferMemoryBarrier2KHR" alias="VkBufferMemoryBarrier2"/>
+ <type category="struct" name="VkDependencyInfo">
+ <member values="VK_STRUCTURE_TYPE_DEPENDENCY_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkDependencyFlags</type> <name>dependencyFlags</name></member>
+ <member optional="true"><type>uint32_t</type> <name>memoryBarrierCount</name></member>
+ <member len="memoryBarrierCount">const <type>VkMemoryBarrier2</type>* <name>pMemoryBarriers</name></member>
+ <member optional="true"><type>uint32_t</type> <name>bufferMemoryBarrierCount</name></member>
+ <member len="bufferMemoryBarrierCount">const <type>VkBufferMemoryBarrier2</type>* <name>pBufferMemoryBarriers</name></member>
+ <member optional="true"><type>uint32_t</type> <name>imageMemoryBarrierCount</name></member>
+ <member len="imageMemoryBarrierCount">const <type>VkImageMemoryBarrier2</type>* <name>pImageMemoryBarriers</name></member>
+ </type>
+ <type category="struct" name="VkDependencyInfoKHR" alias="VkDependencyInfo"/>
+ <type category="struct" name="VkSemaphoreSubmitInfo">
+ <member values="VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkSemaphore</type> <name>semaphore</name></member>
<member><type>uint64_t</type> <name>value</name></member>
- <member optional="true"><type>VkPipelineStageFlags2KHR</type> <name>stageMask</name></member>
+ <member optional="true"><type>VkPipelineStageFlags2</type> <name>stageMask</name></member>
<member><type>uint32_t</type> <name>deviceIndex</name></member>
</type>
- <type category="struct" name="VkCommandBufferSubmitInfoKHR">
- <member values="VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkSemaphoreSubmitInfoKHR" alias="VkSemaphoreSubmitInfo"/>
+ <type category="struct" name="VkCommandBufferSubmitInfo">
+ <member values="VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
<member><type>VkCommandBuffer</type> <name>commandBuffer</name></member>
<member><type>uint32_t</type> <name>deviceMask</name></member>
</type>
- <type category="struct" name="VkSubmitInfo2KHR">
- <member values="VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkCommandBufferSubmitInfoKHR" alias="VkCommandBufferSubmitInfo"/>
+ <type category="struct" name="VkSubmitInfo2">
+ <member values="VK_STRUCTURE_TYPE_SUBMIT_INFO_2"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkSubmitFlagsKHR</type> <name>flags</name></member>
+ <member optional="true"><type>VkSubmitFlags</type> <name>flags</name></member>
<member optional="true"><type>uint32_t</type> <name>waitSemaphoreInfoCount</name></member>
- <member len="waitSemaphoreInfoCount">const <type>VkSemaphoreSubmitInfoKHR</type>* <name>pWaitSemaphoreInfos</name></member>
+ <member len="waitSemaphoreInfoCount">const <type>VkSemaphoreSubmitInfo</type>* <name>pWaitSemaphoreInfos</name></member>
<member optional="true"><type>uint32_t</type> <name>commandBufferInfoCount</name></member>
- <member len="commandBufferInfoCount">const <type>VkCommandBufferSubmitInfoKHR</type>* <name>pCommandBufferInfos</name></member>
+ <member len="commandBufferInfoCount">const <type>VkCommandBufferSubmitInfo</type>* <name>pCommandBufferInfos</name></member>
<member optional="true"><type>uint32_t</type> <name>signalSemaphoreInfoCount</name></member>
- <member len="signalSemaphoreInfoCount">const <type>VkSemaphoreSubmitInfoKHR</type>* <name>pSignalSemaphoreInfos</name></member>
+ <member len="signalSemaphoreInfoCount">const <type>VkSemaphoreSubmitInfo</type>* <name>pSignalSemaphoreInfos</name></member>
</type>
+ <type category="struct" name="VkSubmitInfo2KHR" alias="VkSubmitInfo2"/>
<type category="struct" name="VkQueueFamilyCheckpointProperties2NV" structextends="VkQueueFamilyProperties2" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkPipelineStageFlags2KHR</type> <name>checkpointExecutionStageMask</name></member>
+ <member limittype="bitmask"><type>VkPipelineStageFlags2</type> <name>checkpointExecutionStageMask</name></member>
</type>
<type category="struct" name="VkCheckpointData2NV" returnedonly="true">
<member values="VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkPipelineStageFlags2KHR</type> <name>stage</name></member>
+ <member><type>VkPipelineStageFlags2</type> <name>stage</name></member>
<member noautovalidity="true"><type>void</type>* <name>pCheckpointMarker</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceSynchronization2FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceSynchronization2Features" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>synchronization2</name></member>
</type>
- <type category="struct" name="VkVideoQueueFamilyProperties2KHR" structextends="VkQueueFamilyProperties2">
- <member values="VK_STRUCTURE_TYPE_VIDEO_QUEUE_FAMILY_PROPERTIES_2_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoCodecOperationFlagsKHR</type> <name>videoCodecOperations</name></member>
+ <type category="struct" name="VkPhysicalDeviceSynchronization2FeaturesKHR" alias="VkPhysicalDeviceSynchronization2Features"/>
+ <type category="struct" name="VkPhysicalDeviceHostImageCopyFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>hostImageCopy</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceHostImageCopyPropertiesEXT" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true" limittype="noauto"><type>uint32_t</type> <name>copySrcLayoutCount</name></member>
+ <member optional="true" limittype="noauto" len="copySrcLayoutCount"><type>VkImageLayout</type>* <name>pCopySrcLayouts</name></member>
+ <member optional="true" limittype="noauto"><type>uint32_t</type> <name>copyDstLayoutCount</name></member>
+ <member optional="true" limittype="noauto" len="copyDstLayoutCount"><type>VkImageLayout</type>* <name>pCopyDstLayouts</name></member>
+ <member optional="true" limittype="noauto"><type>uint8_t</type> <name>optimalTilingLayoutUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>identicalMemoryTypeRequirements</name></member>
+ </type>
+ <type category="struct" name="VkMemoryToImageCopyEXT">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>void</type>* <name>pHostPointer</name></member>
+ <member><type>uint32_t</type> <name>memoryRowLength</name><comment>Specified in texels</comment></member>
+ <member><type>uint32_t</type> <name>memoryImageHeight</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>imageSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>imageOffset</name></member>
+ <member><type>VkExtent3D</type> <name>imageExtent</name></member>
+ </type>
+ <type category="struct" name="VkImageToMemoryCopyEXT">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>void</type>* <name>pHostPointer</name></member>
+ <member><type>uint32_t</type> <name>memoryRowLength</name><comment>Specified in texels</comment></member>
+ <member><type>uint32_t</type> <name>memoryImageHeight</name></member>
+ <member><type>VkImageSubresourceLayers</type> <name>imageSubresource</name></member>
+ <member><type>VkOffset3D</type> <name>imageOffset</name></member>
+ <member><type>VkExtent3D</type> <name>imageExtent</name></member>
+ </type>
+ <type category="struct" name="VkCopyMemoryToImageInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkHostImageCopyFlagsEXT</type> <name>flags</name></member>
+ <member><type>VkImage</type> <name>dstImage</name></member>
+ <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkMemoryToImageCopyEXT</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkCopyImageToMemoryInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkHostImageCopyFlagsEXT</type> <name>flags</name></member>
+ <member><type>VkImage</type> <name>srcImage</name></member>
+ <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkImageToMemoryCopyEXT</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkCopyImageToImageInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkHostImageCopyFlagsEXT</type> <name>flags</name></member>
+ <member><type>VkImage</type> <name>srcImage</name></member>
+ <member><type>VkImageLayout</type> <name>srcImageLayout</name></member>
+ <member><type>VkImage</type> <name>dstImage</name></member>
+ <member><type>VkImageLayout</type> <name>dstImageLayout</name></member>
+ <member><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkImageCopy2</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkHostImageLayoutTransitionInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>image</name></member>
+ <member><type>VkImageLayout</type> <name>oldLayout</name></member>
+ <member><type>VkImageLayout</type> <name>newLayout</name></member>
+ <member><type>VkImageSubresourceRange</type> <name>subresourceRange</name></member>
+ </type>
+ <type category="struct" name="VkSubresourceHostMemcpySizeEXT" returnedonly="true" structextends="VkSubresourceLayout2KHR">
+ <member values="VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name><comment>Specified in bytes</comment></member>
+ </type>
+ <type category="struct" name="VkHostImageCopyDevicePerformanceQueryEXT" returnedonly="true" structextends="VkImageFormatProperties2">
+ <member values="VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>optimalDeviceAccess</name><comment>Specifies if device access is optimal</comment></member>
+ <member><type>VkBool32</type> <name>identicalMemoryLayout</name><comment>Specifies if memory layout is identical</comment></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceVulkanSC10Properties" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_SC_1_0_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>deviceNoDynamicHostAllocations</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>deviceDestroyFreesMemory</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>commandPoolMultipleCommandBuffersRecording</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>commandPoolResetCommandBuffer</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>commandBufferSimultaneousUse</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>secondaryCommandBufferNullOrImagelessFramebuffer</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>recycleDescriptorSetMemory</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>recyclePipelineMemory</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxRenderPassSubpasses</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxRenderPassDependencies</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxSubpassInputAttachments</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxSubpassPreserveAttachments</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxFramebufferAttachments</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorSetLayoutBindings</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxQueryFaultCount</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxCallbackFaultCount</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxCommandPoolCommandBuffers</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>maxCommandBufferSize</name></member>
+ </type>
+ <type category="struct" name="VkPipelinePoolSize">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_POOL_SIZE"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>poolEntrySize</name></member>
+ <member><type>uint32_t</type> <name>poolEntryCount</name></member>
+ </type>
+ <type category="struct" name="VkDeviceObjectReservationCreateInfo" allowduplicate="true" structextends="VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_OBJECT_RESERVATION_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>pipelineCacheCreateInfoCount</name></member>
+ <member len="pipelineCacheCreateInfoCount">const <type>VkPipelineCacheCreateInfo</type>* <name>pPipelineCacheCreateInfos</name></member>
+ <member optional="true"><type>uint32_t</type> <name>pipelinePoolSizeCount</name></member>
+ <member len="pipelinePoolSizeCount">const <type>VkPipelinePoolSize</type>* <name>pPipelinePoolSizes</name></member>
+ <member optional="true"><type>uint32_t</type> <name>semaphoreRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>commandBufferRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>fenceRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>deviceMemoryRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>bufferRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>imageRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>eventRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>queryPoolRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>bufferViewRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>imageViewRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>layeredImageViewRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>pipelineCacheRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>pipelineLayoutRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>renderPassRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>graphicsPipelineRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>computePipelineRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>descriptorSetLayoutRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>samplerRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>descriptorPoolRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>descriptorSetRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>framebufferRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>commandPoolRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>samplerYcbcrConversionRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>surfaceRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>swapchainRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>displayModeRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>subpassDescriptionRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>attachmentDescriptionRequestCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>descriptorSetLayoutBindingRequestCount</name></member>
+ <member><type>uint32_t</type> <name>descriptorSetLayoutBindingLimit</name></member>
+ <member><type>uint32_t</type> <name>maxImageViewMipLevels</name></member>
+ <member><type>uint32_t</type> <name>maxImageViewArrayLayers</name></member>
+ <member><type>uint32_t</type> <name>maxLayeredImageViewMipLevels</name></member>
+ <member><type>uint32_t</type> <name>maxOcclusionQueriesPerPool</name></member>
+ <member><type>uint32_t</type> <name>maxPipelineStatisticsQueriesPerPool</name></member>
+ <member><type>uint32_t</type> <name>maxTimestampQueriesPerPool</name></member>
+ <member><type>uint32_t</type> <name>maxImmutableSamplersPerDescriptorSetLayout</name></member>
+ </type>
+ <type category="struct" name="VkCommandPoolMemoryReservationCreateInfo" structextends="VkCommandPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_COMMAND_POOL_MEMORY_RESERVATION_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>commandPoolReservedSize</name></member>
+ <member><type>uint32_t</type> <name>commandPoolMaxCommandBuffers</name></member>
+ </type>
+ <type category="struct" name="VkCommandPoolMemoryConsumption" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_COMMAND_POOL_MEMORY_CONSUMPTION"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>commandPoolAllocated</name></member>
+ <member><type>VkDeviceSize</type> <name>commandPoolReservedSize</name></member>
+ <member><type>VkDeviceSize</type> <name>commandBufferAllocated</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceVulkanSC10Features" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_SC_1_0_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderAtomicInstructions</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>primitivesGeneratedQuery</name></member>
+ <member><type>VkBool32</type> <name>primitivesGeneratedQueryWithRasterizerDiscard</name></member>
+ <member><type>VkBool32</type> <name>primitivesGeneratedQueryWithNonZeroStreams</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceLegacyDitheringFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>legacyDithering</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>multisampledRenderToSingleSampled</name></member>
+ </type>
+ <type category="struct" name="VkSubpassResolvePerformanceQueryEXT" returnedonly="true" structextends="VkFormatProperties2">
+ <member values="VK_STRUCTURE_TYPE_SUBPASS_RESOLVE_PERFORMANCE_QUERY_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>optimal</name></member>
+ </type>
+ <type category="struct" name="VkMultisampledRenderToSingleSampledInfoEXT" structextends="VkSubpassDescription2,VkRenderingInfo">
+ <member values="VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>multisampledRenderToSingleSampledEnable</name></member>
+ <member><type>VkSampleCountFlagBits</type> <name>rasterizationSamples</name></member>
</type>
- <type category="struct" name="VkVideoProfilesKHR" structextends="VkFormatProperties2,VkImageCreateInfo,VkImageViewCreateInfo,VkBufferCreateInfo">
- <member values="VK_STRUCTURE_TYPE_VIDEO_PROFILES_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>profileCount</name></member>
- <member>const <type>VkVideoProfileKHR</type>* <name>pProfiles</name></member>
+ <type category="struct" name="VkPhysicalDevicePipelineProtectedAccessFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>pipelineProtectedAccess</name></member>
+ </type>
+ <type category="struct" name="VkQueueFamilyVideoPropertiesKHR" returnedonly="true" structextends="VkQueueFamilyProperties2">
+ <member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_VIDEO_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkVideoCodecOperationFlagsKHR</type> <name>videoCodecOperations</name></member>
+ </type>
+ <type category="struct" name="VkQueueFamilyQueryResultStatusPropertiesKHR" returnedonly="true" structextends="VkQueueFamilyProperties2">
+ <member values="VK_STRUCTURE_TYPE_QUEUE_FAMILY_QUERY_RESULT_STATUS_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>queryResultStatusSupport</name></member>
+ </type>
+ <type category="struct" name="VkVideoProfileListInfoKHR" structextends="VkPhysicalDeviceImageFormatInfo2,VkPhysicalDeviceVideoFormatInfoKHR,VkImageCreateInfo,VkBufferCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_PROFILE_LIST_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>profileCount</name></member>
+ <member len="profileCount">const <type>VkVideoProfileInfoKHR</type>* <name>pProfiles</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceVideoFormatInfoKHR" returnedonly="true">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkImageUsageFlags</type> <name>imageUsage</name></member>
- <member>const <type>VkVideoProfilesKHR</type>* <name>pVideoProfiles</name></member>
+ <type category="struct" name="VkPhysicalDeviceVideoFormatInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageUsageFlags</type> <name>imageUsage</name></member>
</type>
<type category="struct" name="VkVideoFormatPropertiesKHR" returnedonly="true">
- <member values="VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkFormat</type> <name>format</name></member>
- </type>
- <type category="struct" name="VkVideoProfileKHR" structextends="VkQueryPoolCreateInfo,VkFormatProperties2,VkImageCreateInfo,VkImageViewCreateInfo,VkBufferCreateInfo">
- <member values="VK_STRUCTURE_TYPE_VIDEO_PROFILE_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoCodecOperationFlagBitsKHR</type> <name>videoCodecOperation</name></member>
- <member><type>VkVideoChromaSubsamplingFlagsKHR</type> <name>chromaSubsampling</name></member>
- <member><type>VkVideoComponentBitDepthFlagsKHR</type> <name>lumaBitDepth</name></member>
- <member><type>VkVideoComponentBitDepthFlagsKHR</type> <name>chromaBitDepth</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkFormat</type> <name>format</name></member>
+ <member><type>VkComponentMapping</type> <name>componentMapping</name></member>
+ <member><type>VkImageCreateFlags</type> <name>imageCreateFlags</name></member>
+ <member><type>VkImageType</type> <name>imageType</name></member>
+ <member><type>VkImageTiling</type> <name>imageTiling</name></member>
+ <member><type>VkImageUsageFlags</type> <name>imageUsageFlags</name></member>
+ </type>
+ <type category="struct" name="VkVideoProfileInfoKHR" structextends="VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_PROFILE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoCodecOperationFlagBitsKHR</type> <name>videoCodecOperation</name></member>
+ <member><type>VkVideoChromaSubsamplingFlagsKHR</type> <name>chromaSubsampling</name></member>
+ <member><type>VkVideoComponentBitDepthFlagsKHR</type> <name>lumaBitDepth</name></member>
+ <member optional="true"><type>VkVideoComponentBitDepthFlagsKHR</type> <name>chromaBitDepth</name></member>
</type>
<type category="struct" name="VkVideoCapabilitiesKHR" returnedonly="true">
- <member values="VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoCapabilityFlagsKHR</type> <name>capabilityFlags</name></member>
- <member><type>VkDeviceSize</type> <name>minBitstreamBufferOffsetAlignment</name></member>
- <member><type>VkDeviceSize</type> <name>minBitstreamBufferSizeAlignment</name></member>
- <member><type>VkExtent2D</type> <name>videoPictureExtentGranularity</name></member>
- <member><type>VkExtent2D</type> <name>minExtent</name></member>
- <member><type>VkExtent2D</type> <name>maxExtent</name></member>
- <member><type>uint32_t</type> <name>maxReferencePicturesSlotsCount</name></member>
- <member><type>uint32_t</type> <name>maxReferencePicturesActiveCount</name></member>
- </type>
- <type category="struct" name="VkVideoGetMemoryPropertiesKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_GET_MEMORY_PROPERTIES_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>memoryBindIndex</name></member>
- <member><type>VkMemoryRequirements2</type>* <name>pMemoryRequirements</name></member>
- </type>
- <type category="struct" name="VkVideoBindMemoryKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_BIND_MEMORY_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>memoryBindIndex</name></member>
- <member><type>VkDeviceMemory</type> <name>memory</name></member>
- <member><type>VkDeviceSize</type> <name>memoryOffset</name></member>
- <member><type>VkDeviceSize</type> <name>memorySize</name></member>
- </type>
- <type category="struct" name="VkVideoPictureResourceKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkOffset2D</type> <name>codedOffset</name><comment>The offset to be used for the picture resource, currently only used in field mode</comment></member>
- <member><type>VkExtent2D</type> <name>codedExtent</name><comment>The extent to be used for the picture resource</comment></member>
- <member><type>uint32_t</type> <name>baseArrayLayer</name><comment>TThe first array layer to be accessed for the Decode or Encode Operations</comment></member>
- <member><type>VkImageView</type> <name>imageViewBinding</name><comment>The ImageView binding of the resource</comment></member>
- </type>
- <type category="struct" name="VkVideoReferenceSlotKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>int8_t</type> <name>slotIndex</name><comment>The reference slot index</comment></member>
- <member>const <type>VkVideoPictureResourceKHR</type>* <name>pPictureResource</name><comment>The reference picture resource</comment></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoCapabilityFlagsKHR</type> <name>flags</name></member>
+ <member><type>VkDeviceSize</type> <name>minBitstreamBufferOffsetAlignment</name></member>
+ <member><type>VkDeviceSize</type> <name>minBitstreamBufferSizeAlignment</name></member>
+ <member><type>VkExtent2D</type> <name>pictureAccessGranularity</name></member>
+ <member><type>VkExtent2D</type> <name>minCodedExtent</name></member>
+ <member><type>VkExtent2D</type> <name>maxCodedExtent</name></member>
+ <member><type>uint32_t</type> <name>maxDpbSlots</name></member>
+ <member><type>uint32_t</type> <name>maxActiveReferencePictures</name></member>
+ <member><type>VkExtensionProperties</type> <name>stdHeaderVersion</name></member>
+ </type>
+ <type category="struct" name="VkVideoSessionMemoryRequirementsKHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_MEMORY_REQUIREMENTS_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>memoryBindIndex</name></member>
+ <member><type>VkMemoryRequirements</type> <name>memoryRequirements</name></member>
+ </type>
+ <type category="struct" name="VkBindVideoSessionMemoryInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_BIND_VIDEO_SESSION_MEMORY_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>memoryBindIndex</name></member>
+ <member><type>VkDeviceMemory</type> <name>memory</name></member>
+ <member><type>VkDeviceSize</type> <name>memoryOffset</name></member>
+ <member><type>VkDeviceSize</type> <name>memorySize</name></member>
+ </type>
+ <type category="struct" name="VkVideoPictureResourceInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkOffset2D</type> <name>codedOffset</name><comment>The offset to be used for the picture resource, currently only used in field mode</comment></member>
+ <member><type>VkExtent2D</type> <name>codedExtent</name><comment>The extent to be used for the picture resource</comment></member>
+ <member><type>uint32_t</type> <name>baseArrayLayer</name><comment>The first array layer to be accessed for the Decode or Encode Operations</comment></member>
+ <member><type>VkImageView</type> <name>imageViewBinding</name><comment>The ImageView binding of the resource</comment></member>
+ </type>
+ <type category="struct" name="VkVideoReferenceSlotInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>int32_t</type> <name>slotIndex</name><comment>The reference slot index</comment></member>
+ <member optional="true">const <type>VkVideoPictureResourceInfoKHR</type>* <name>pPictureResource</name><comment>The reference picture resource</comment></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeCapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>VkVideoDecodeCapabilityFlagsKHR</type> <name>flags</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeUsageInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_USAGE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoDecodeUsageFlagsKHR</type> <name>videoUsageHints</name></member>
</type>
<type category="struct" name="VkVideoDecodeInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkVideoDecodeFlagsKHR</type> <name>flags</name></member>
- <member><type>VkOffset2D</type> <name>codedOffset</name></member>
- <member><type>VkExtent2D</type> <name>codedExtent</name></member>
- <member><type>VkBuffer</type> <name>srcBuffer</name></member>
- <member><type>VkDeviceSize</type> <name>srcBufferOffset</name></member>
- <member><type>VkDeviceSize</type> <name>srcBufferRange</name></member>
- <member><type>VkVideoPictureResourceKHR</type> <name>dstPictureResource</name></member>
- <member>const <type>VkVideoReferenceSlotKHR</type>* <name>pSetupReferenceSlot</name></member>
- <member><type>uint32_t</type> <name>referenceSlotCount</name></member>
- <member len="referenceSlotCount">const <type>VkVideoReferenceSlotKHR</type>* <name>pReferenceSlots</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoDecodeFlagsKHR</type> <name>flags</name></member>
+ <member><type>VkBuffer</type> <name>srcBuffer</name></member>
+ <member><type>VkDeviceSize</type> <name>srcBufferOffset</name></member>
+ <member><type>VkDeviceSize</type> <name>srcBufferRange</name></member>
+ <member><type>VkVideoPictureResourceInfoKHR</type> <name>dstPictureResource</name></member>
+ <member optional="true">const <type>VkVideoReferenceSlotInfoKHR</type>* <name>pSetupReferenceSlot</name></member>
+ <member optional="true"><type>uint32_t</type> <name>referenceSlotCount</name></member>
+ <member len="referenceSlotCount">const <type>VkVideoReferenceSlotInfoKHR</type>* <name>pReferenceSlots</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceVideoMaintenance1FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_1_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>videoMaintenance1</name></member>
+ </type>
+ <type category="struct" name="VkVideoInlineQueryInfoKHR" structextends="VkVideoDecodeInfoKHR,VkVideoEncodeInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_INLINE_QUERY_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkQueryPool</type> <name>queryPool</name></member>
+ <member><type>uint32_t</type> <name>firstQuery</name></member>
+ <member><type>uint32_t</type> <name>queryCount</name></member>
</type>
<comment>Video Decode Codec Standard specific structures</comment>
<type category="include" name="vk_video/vulkan_video_codec_h264std.h">#include "vk_video/vulkan_video_codec_h264std.h"</type>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264ProfileIdc"/>
- <type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264Level"/>
+ <type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264LevelIdc"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264ChromaFormatIdc"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264PocType"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264SpsFlags"/>
@@ -5632,7 +6751,7 @@ typedef void <name>CAMetalLayer</name>;
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264AspectRatioIdc"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264HrdParameters"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264SpsVuiFlags"/>
- <type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264WeightedBiPredIdc"/>
+ <type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264WeightedBipredIdc"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264PpsFlags"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264SliceType"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264CabacInitIdc"/>
@@ -5643,63 +6762,48 @@ typedef void <name>CAMetalLayer</name>;
<type category="include" name="vk_video/vulkan_video_codec_h264std_decode.h">#include "vk_video/vulkan_video_codec_h264std_decode.h"</type>
<type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264PictureInfo"/>
<type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264ReferenceInfo"/>
- <type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264Mvc"/>
<type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264PictureInfoFlags"/>
<type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264ReferenceInfoFlags"/>
- <type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264MvcElement"/>
- <type requires="vk_video/vulkan_video_codec_h264std_decode.h" name="StdVideoDecodeH264MvcElementFlags"/>
- <type category="struct" name="VkVideoDecodeH264ProfileEXT" structextends="VkVideoProfileKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>StdVideoH264ProfileIdc</type> <name>stdProfileIdc</name></member>
- <member><type>VkVideoDecodeH264PictureLayoutFlagsEXT</type> <name>pictureLayout</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH264CapabilitiesEXT" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>maxLevel</name></member>
- <member><type>VkOffset2D</type> <name>fieldOffsetGranularity</name></member>
- <member><type>VkExtensionProperties</type> <name>stdExtensionVersion</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH264SessionCreateInfoEXT" structextends="VkVideoSessionCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_CREATE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoDecodeH264CreateFlagsEXT</type> <name>flags</name></member>
- <member>const <type>VkExtensionProperties</type>* <name>pStdExtensionVersion</name></member>
+ <type category="struct" name="VkVideoDecodeH264ProfileInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoH264ProfileIdc</type> <name>stdProfileIdc</name></member>
+ <member optional="true"><type>VkVideoDecodeH264PictureLayoutFlagBitsKHR</type> <name>pictureLayout</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH264CapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoH264LevelIdc</type> <name>maxLevelIdc</name></member>
+ <member><type>VkOffset2D</type> <name>fieldOffsetGranularity</name></member>
</type>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264SequenceParameterSet"/>
<type requires="vk_video/vulkan_video_codec_h264std.h" name="StdVideoH264PictureParameterSet"/>
- <type category="struct" name="VkVideoDecodeH264SessionParametersAddInfoEXT" structextends="VkVideoSessionParametersUpdateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>spsStdCount</name></member>
- <member len="spsStdCount" optional="true">const <type>StdVideoH264SequenceParameterSet</type>* <name>pSpsStd</name></member>
- <member><type>uint32_t</type> <name>ppsStdCount</name></member>
- <member len="ppsStdCount" optional="true">const <type>StdVideoH264PictureParameterSet</type>* <name>pPpsStd</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
- </type>
- <type category="struct" name="VkVideoDecodeH264SessionParametersCreateInfoEXT" structextends="VkVideoSessionParametersCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>maxSpsStdCount</name></member>
- <member><type>uint32_t</type> <name>maxPpsStdCount</name></member>
- <member optional="true">const <type>VkVideoDecodeH264SessionParametersAddInfoEXT</type>* <name>pParametersAddInfo</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH264PictureInfoEXT" structextends="VkVideoDecodeInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
- <member>const <type>StdVideoDecodeH264PictureInfo</type>* <name>pStdPictureInfo</name></member>
- <member><type>uint32_t</type> <name>slicesCount</name></member>
- <member len="slicesCount">const <type>uint32_t</type>* <name>pSlicesDataOffsets</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH264DpbSlotInfoEXT" structextends="VkVideoReferenceSlotKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member>const <type>StdVideoDecodeH264ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH264MvcEXT" structextends="VkVideoDecodeH264PictureInfoEXT">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true" noautovalidity="true">const <type>void</type>*<name>pNext</name></member>
- <member>const <type>StdVideoDecodeH264Mvc</type>* <name>pStdMvc</name></member>
+ <type category="struct" name="VkVideoDecodeH264SessionParametersAddInfoKHR" structextends="VkVideoSessionParametersUpdateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdSPSCount</name></member>
+ <member len="stdSPSCount">const <type>StdVideoH264SequenceParameterSet</type>* <name>pStdSPSs</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdPPSCount</name></member>
+ <member len="stdPPSCount">const <type>StdVideoH264PictureParameterSet</type>* <name>pStdPPSs</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH264SessionParametersCreateInfoKHR" structextends="VkVideoSessionParametersCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>maxStdSPSCount</name></member>
+ <member><type>uint32_t</type> <name>maxStdPPSCount</name></member>
+ <member optional="true">const <type>VkVideoDecodeH264SessionParametersAddInfoKHR</type>* <name>pParametersAddInfo</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH264PictureInfoKHR" structextends="VkVideoDecodeInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoDecodeH264PictureInfo</type>* <name>pStdPictureInfo</name></member>
+ <member><type>uint32_t</type> <name>sliceCount</name></member>
+ <member len="sliceCount">const <type>uint32_t</type>* <name>pSliceOffsets</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH264DpbSlotInfoKHR" structextends="VkVideoReferenceSlotInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoDecodeH264ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
</type>
<type category="include" name="vk_video/vulkan_video_codec_h265std.h">#include "vk_video/vulkan_video_codec_h265std.h"</type>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265ProfileIdc"/>
@@ -5709,7 +6813,7 @@ typedef void <name>CAMetalLayer</name>;
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265DecPicBufMgr"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265HrdParameters"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265VpsFlags"/>
- <type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265Level"/>
+ <type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265LevelIdc"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265SpsFlags"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265ScalingLists"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265SequenceParameterSetVui"/>
@@ -5718,228 +6822,511 @@ typedef void <name>CAMetalLayer</name>;
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265SubLayerHrdParameters"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265HrdFlags"/>
<type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265SpsVuiFlags"/>
+ <type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265SliceType"/>
+ <type requires="vk_video/vulkan_video_codec_h265std.h" name="StdVideoH265PictureType"/>
<type category="include" name="vk_video/vulkan_video_codec_h265std_decode.h">#include "vk_video/vulkan_video_codec_h265std_decode.h"</type>
<type requires="vk_video/vulkan_video_codec_h265std_decode.h" name="StdVideoDecodeH265PictureInfo"/>
<type requires="vk_video/vulkan_video_codec_h265std_decode.h" name="StdVideoDecodeH265ReferenceInfo"/>
<type requires="vk_video/vulkan_video_codec_h265std_decode.h" name="StdVideoDecodeH265PictureInfoFlags"/>
<type requires="vk_video/vulkan_video_codec_h265std_decode.h" name="StdVideoDecodeH265ReferenceInfoFlags"/>
- <type category="struct" name="VkVideoDecodeH265ProfileEXT" structextends="VkVideoProfileKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>StdVideoH265ProfileIdc</type> <name>stdProfileIdc</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH265CapabilitiesEXT" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>maxLevel</name></member>
- <member><type>VkExtensionProperties</type> <name>stdExtensionVersion</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH265SessionCreateInfoEXT" structextends="VkVideoSessionCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_CREATE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoDecodeH265CreateFlagsEXT</type> <name>flags</name></member>
- <member>const <type>VkExtensionProperties</type>* <name>pStdExtensionVersion</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH265SessionParametersAddInfoEXT" structextends="VkVideoSessionParametersUpdateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>spsStdCount</name></member>
- <member len="spsStdCount" optional="true">const <type>StdVideoH265SequenceParameterSet</type>* <name>pSpsStd</name></member>
- <member><type>uint32_t</type> <name>ppsStdCount</name></member>
- <member len="ppsStdCount" optional="true">const <type>StdVideoH265PictureParameterSet</type>* <name>pPpsStd</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
- </type>
- <type category="struct" name="VkVideoDecodeH265SessionParametersCreateInfoEXT" structextends="VkVideoSessionParametersCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>maxSpsStdCount</name></member>
- <member><type>uint32_t</type> <name>maxPpsStdCount</name></member>
- <member optional="true">const <type>VkVideoDecodeH265SessionParametersAddInfoEXT</type>* <name>pParametersAddInfo</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH265PictureInfoEXT" structextends="VkVideoDecodeInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>StdVideoDecodeH265PictureInfo</type>* <name>pStdPictureInfo</name></member>
- <member><type>uint32_t</type> <name>slicesCount</name></member>
- <member len="slicesCount">const <type>uint32_t</type>* <name>pSlicesDataOffsets</name></member>
- </type>
- <type category="struct" name="VkVideoDecodeH265DpbSlotInfoEXT" structextends="VkVideoReferenceSlotKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member>const <type>StdVideoDecodeH265ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
+ <type category="struct" name="VkVideoDecodeH265ProfileInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoH265ProfileIdc</type> <name>stdProfileIdc</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH265CapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoH265LevelIdc</type> <name>maxLevelIdc</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH265SessionParametersAddInfoKHR" structextends="VkVideoSessionParametersUpdateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdVPSCount</name></member>
+ <member len="stdVPSCount">const <type>StdVideoH265VideoParameterSet</type>* <name>pStdVPSs</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdSPSCount</name></member>
+ <member len="stdSPSCount">const <type>StdVideoH265SequenceParameterSet</type>* <name>pStdSPSs</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdPPSCount</name></member>
+ <member len="stdPPSCount">const <type>StdVideoH265PictureParameterSet</type>* <name>pStdPPSs</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH265SessionParametersCreateInfoKHR" structextends="VkVideoSessionParametersCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>maxStdVPSCount</name></member>
+ <member><type>uint32_t</type> <name>maxStdSPSCount</name></member>
+ <member><type>uint32_t</type> <name>maxStdPPSCount</name></member>
+ <member optional="true">const <type>VkVideoDecodeH265SessionParametersAddInfoKHR</type>* <name>pParametersAddInfo</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH265PictureInfoKHR" structextends="VkVideoDecodeInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoDecodeH265PictureInfo</type>* <name>pStdPictureInfo</name></member>
+ <member><type>uint32_t</type> <name>sliceSegmentCount</name></member>
+ <member len="sliceSegmentCount">const <type>uint32_t</type>* <name>pSliceSegmentOffsets</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeH265DpbSlotInfoKHR" structextends="VkVideoReferenceSlotInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoDecodeH265ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
+ </type>
+ <type category="include" name="vk_video/vulkan_video_codec_av1std.h">#include "vk_video/vulkan_video_codec_av1std.h"</type>
+ <type requires="vk_video/vulkan_video_codec_av1std.h" name="StdVideoAV1Profile"/>
+ <type requires="vk_video/vulkan_video_codec_av1std.h" name="StdVideoAV1Level"/>
+ <type requires="vk_video/vulkan_video_codec_av1std.h" name="StdVideoAV1SequenceHeader"/>
+ <type category="include" name="vk_video/vulkan_video_codec_av1std_decode.h">#include "vk_video/vulkan_video_codec_av1std_decode.h"</type>
+ <type requires="vk_video/vulkan_video_codec_av1std_decode.h" name="StdVideoDecodeAV1PictureInfo"/>
+ <type requires="vk_video/vulkan_video_codec_av1std_decode.h" name="StdVideoDecodeAV1ReferenceInfo"/>
+ <type category="struct" name="VkVideoDecodeAV1ProfileInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PROFILE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoAV1Profile</type> <name>stdProfile</name></member>
+ <member><type>VkBool32</type> <name>filmGrainSupport</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeAV1CapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoAV1Level</type> <name>maxLevel</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeAV1SessionParametersCreateInfoKHR" structextends="VkVideoSessionParametersCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoAV1SequenceHeader</type>* <name>pStdSequenceHeader</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeAV1PictureInfoKHR" structextends="VkVideoDecodeInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PICTURE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoDecodeAV1PictureInfo</type>* <name>pStdPictureInfo</name></member>
+ <member><type>int32_t</type> <name>referenceNameSlotIndices</name>[<enum>VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR</enum>]</member>
+ <member><type>uint32_t</type> <name>frameHeaderOffset</name></member>
+ <member><type>uint32_t</type> <name>tileCount</name></member>
+ <member len="tileCount">const <type>uint32_t</type>* <name>pTileOffsets</name></member>
+ <member len="tileCount">const <type>uint32_t</type>* <name>pTileSizes</name></member>
+ </type>
+ <type category="struct" name="VkVideoDecodeAV1DpbSlotInfoKHR" structextends="VkVideoReferenceSlotInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_DPB_SLOT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoDecodeAV1ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
</type>
<type category="struct" name="VkVideoSessionCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>queueFamilyIndex</name></member>
- <member optional="true"><type>VkVideoSessionCreateFlagsKHR</type> <name>flags</name></member>
- <member>const <type>VkVideoProfileKHR</type>* <name>pVideoProfile</name></member>
- <member><type>VkFormat</type> <name>pictureFormat</name></member>
- <member><type>VkExtent2D</type> <name>maxCodedExtent</name></member>
- <member><type>VkFormat</type> <name>referencePicturesFormat</name></member>
- <member><type>uint32_t</type> <name>maxReferencePicturesSlotsCount</name></member>
- <member><type>uint32_t</type> <name>maxReferencePicturesActiveCount</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>queueFamilyIndex</name></member>
+ <member optional="true"><type>VkVideoSessionCreateFlagsKHR</type> <name>flags</name></member>
+ <member>const <type>VkVideoProfileInfoKHR</type>* <name>pVideoProfile</name></member>
+ <member><type>VkFormat</type> <name>pictureFormat</name></member>
+ <member><type>VkExtent2D</type> <name>maxCodedExtent</name></member>
+ <member><type>VkFormat</type> <name>referencePictureFormat</name></member>
+ <member><type>uint32_t</type> <name>maxDpbSlots</name></member>
+ <member><type>uint32_t</type> <name>maxActiveReferencePictures</name></member>
+ <member>const <type>VkExtensionProperties</type>* <name>pStdHeaderVersion</name></member>
</type>
<type category="struct" name="VkVideoSessionParametersCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoSessionParametersKHR</type> <name>videoSessionParametersTemplate</name></member>
- <member><type>VkVideoSessionKHR</type> <name>videoSession</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoSessionParametersCreateFlagsKHR</type> <name>flags</name></member>
+ <member optional="true"><type>VkVideoSessionParametersKHR</type> <name>videoSessionParametersTemplate</name></member>
+ <member><type>VkVideoSessionKHR</type> <name>videoSession</name></member>
</type>
<type category="struct" name="VkVideoSessionParametersUpdateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>updateSequenceCount</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>updateSequenceCount</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeSessionParametersGetInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_PARAMETERS_GET_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoSessionParametersKHR</type> <name>videoSessionParameters</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeSessionParametersFeedbackInfoKHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_PARAMETERS_FEEDBACK_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>hasOverrides</name></member>
</type>
<type category="struct" name="VkVideoBeginCodingInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkVideoBeginCodingFlagsKHR</type> <name>flags</name></member>
- <member><type>VkVideoCodingQualityPresetFlagsKHR</type> <name>codecQualityPreset</name></member>
- <member><type>VkVideoSessionKHR</type> <name>videoSession</name></member>
- <member optional="true"><type>VkVideoSessionParametersKHR</type> <name>videoSessionParameters</name></member>
- <member><type>uint32_t</type> <name>referenceSlotCount</name></member>
- <member len="referenceSlotCount">const <type>VkVideoReferenceSlotKHR</type>* <name>pReferenceSlots</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoBeginCodingFlagsKHR</type> <name>flags</name></member>
+ <member><type>VkVideoSessionKHR</type> <name>videoSession</name></member>
+ <member optional="true"><type>VkVideoSessionParametersKHR</type> <name>videoSessionParameters</name></member>
+ <member optional="true"><type>uint32_t</type> <name>referenceSlotCount</name></member>
+ <member len="referenceSlotCount">const <type>VkVideoReferenceSlotInfoKHR</type>* <name>pReferenceSlots</name></member>
</type>
<type category="struct" name="VkVideoEndCodingInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkVideoEndCodingFlagsKHR</type> <name>flags</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoEndCodingFlagsKHR</type> <name>flags</name></member>
</type>
<type category="struct" name="VkVideoCodingControlInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkVideoCodingControlFlagsKHR</type> <name>flags</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoCodingControlFlagsKHR</type> <name>flags</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeUsageInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_USAGE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoEncodeUsageFlagsKHR</type> <name>videoUsageHints</name></member>
+ <member optional="true"><type>VkVideoEncodeContentFlagsKHR</type> <name>videoContentHints</name></member>
+ <member optional="true"><type>VkVideoEncodeTuningModeKHR</type> <name>tuningMode</name></member>
</type>
<type category="struct" name="VkVideoEncodeInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member>const <type>void</type>* <name>pNext</name></member>
- <member optional="true"><type>VkVideoEncodeFlagsKHR</type> <name>flags</name></member>
- <member><type>uint32_t</type> <name>qualityLevel</name></member>
- <member><type>VkExtent2D</type> <name>codedExtent</name></member>
- <member><type>VkBuffer</type> <name>dstBitstreamBuffer</name></member>
- <member><type>VkDeviceSize</type> <name>dstBitstreamBufferOffset</name></member>
- <member><type>VkDeviceSize</type> <name>dstBitstreamBufferMaxRange</name></member>
- <member><type>VkVideoPictureResourceKHR</type> <name>srcPictureResource</name></member>
- <member>const <type>VkVideoReferenceSlotKHR</type>* <name>pSetupReferenceSlot</name></member>
- <member><type>uint32_t</type> <name>referenceSlotCount</name></member>
- <member len="referenceSlotCount">const <type>VkVideoReferenceSlotKHR</type>* <name>pReferenceSlots</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeRateControlInfoKHR" structextends="VkVideoCodingControlInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoEncodeRateControlFlagsKHR</type> <name>flags</name></member>
- <member><type>VkVideoEncodeRateControlModeFlagBitsKHR</type> <name>rateControlMode</name></member>
- <member><type>uint32_t</type> <name>averageBitrate</name></member>
- <member><type>uint16_t</type> <name>peakToAverageBitrateRatio</name></member>
- <member><type>uint16_t</type> <name>frameRateNumerator</name></member>
- <member><type>uint16_t</type> <name>frameRateDenominator</name></member>
- <member><type>uint32_t</type> <name>virtualBufferSizeInMs</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264CapabilitiesEXT" structextends="VkVideoCapabilitiesKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoEncodeH264CapabilityFlagsEXT</type> <name>flags</name></member>
- <member><type>VkVideoEncodeH264InputModeFlagsEXT</type> <name>inputModeFlags</name></member>
- <member><type>VkVideoEncodeH264OutputModeFlagsEXT</type> <name>outputModeFlags</name></member>
- <member><type>VkExtent2D</type> <name>minPictureSizeInMbs</name></member>
- <member><type>VkExtent2D</type> <name>maxPictureSizeInMbs</name></member>
- <member><type>VkExtent2D</type> <name>inputImageDataAlignment</name></member>
- <member><type>uint8_t</type> <name>maxNumL0ReferenceForP</name></member>
- <member><type>uint8_t</type> <name>maxNumL0ReferenceForB</name></member>
- <member><type>uint8_t</type> <name>maxNumL1Reference</name></member>
- <member><type>uint8_t</type> <name>qualityLevelCount</name></member>
- <member><type>VkExtensionProperties</type> <name>stdExtensionVersion</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264SessionCreateInfoEXT" structextends="VkVideoSessionCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkVideoEncodeH264CreateFlagsEXT</type> <name>flags</name></member>
- <member><type>VkExtent2D</type> <name>maxPictureSizeInMbs</name></member>
- <member>const <type>VkExtensionProperties</type>* <name>pStdExtensionVersion</name></member>
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoEncodeFlagsKHR</type> <name>flags</name></member>
+ <member><type>VkBuffer</type> <name>dstBuffer</name></member>
+ <member><type>VkDeviceSize</type> <name>dstBufferOffset</name></member>
+ <member><type>VkDeviceSize</type> <name>dstBufferRange</name></member>
+ <member><type>VkVideoPictureResourceInfoKHR</type> <name>srcPictureResource</name></member>
+ <member optional="true">const <type>VkVideoReferenceSlotInfoKHR</type>* <name>pSetupReferenceSlot</name></member>
+ <member optional="true"><type>uint32_t</type> <name>referenceSlotCount</name></member>
+ <member len="referenceSlotCount">const <type>VkVideoReferenceSlotInfoKHR</type>* <name>pReferenceSlots</name></member>
+ <member><type>uint32_t</type> <name>precedingExternallyEncodedBytes</name></member>
+ </type>
+ <type category="struct" name="VkQueryPoolVideoEncodeFeedbackCreateInfoKHR" structextends="VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoEncodeFeedbackFlagsKHR</type> <name>encodeFeedbackFlags</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeQualityLevelInfoKHR" structextends="VkVideoCodingControlInfoKHR,VkVideoSessionParametersCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUALITY_LEVEL_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>qualityLevel</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_QUALITY_LEVEL_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>VkVideoProfileInfoKHR</type>* <name>pVideoProfile</name></member>
+ <member><type>uint32_t</type> <name>qualityLevel</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeQualityLevelPropertiesKHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUALITY_LEVEL_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoEncodeRateControlModeFlagBitsKHR</type> <name>preferredRateControlMode</name></member>
+ <member><type>uint32_t</type> <name>preferredRateControlLayerCount</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeRateControlInfoKHR" structextends="VkVideoCodingControlInfoKHR,VkVideoBeginCodingInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoEncodeRateControlFlagsKHR</type> <name>flags</name></member>
+ <member optional="true"><type>VkVideoEncodeRateControlModeFlagBitsKHR</type> <name>rateControlMode</name></member>
+ <member optional="true"><type>uint32_t</type> <name>layerCount</name></member>
+ <member len="layerCount">const <type>VkVideoEncodeRateControlLayerInfoKHR</type>* <name>pLayers</name></member>
+ <member><type>uint32_t</type> <name>virtualBufferSizeInMs</name></member>
+ <member><type>uint32_t</type> <name>initialVirtualBufferSizeInMs</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeRateControlLayerInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint64_t</type> <name>averageBitrate</name></member>
+ <member><type>uint64_t</type> <name>maxBitrate</name></member>
+ <member><type>uint32_t</type> <name>frameRateNumerator</name></member>
+ <member><type>uint32_t</type> <name>frameRateDenominator</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeCapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>VkVideoEncodeCapabilityFlagsKHR</type> <name>flags</name></member>
+ <member><type>VkVideoEncodeRateControlModeFlagsKHR</type> <name>rateControlModes</name></member>
+ <member><type>uint32_t</type> <name>maxRateControlLayers</name></member>
+ <member><type>uint64_t</type> <name>maxBitrate</name></member>
+ <member><type>uint32_t</type> <name>maxQualityLevels</name></member>
+ <member><type>VkExtent2D</type> <name>encodeInputPictureGranularity</name></member>
+ <member><type>VkVideoEncodeFeedbackFlagsKHR</type> <name>supportedEncodeFeedbackFlags</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264CapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>VkVideoEncodeH264CapabilityFlagsKHR</type> <name>flags</name></member>
+ <member><type>StdVideoH264LevelIdc</type> <name>maxLevelIdc</name></member>
+ <member><type>uint32_t</type> <name>maxSliceCount</name></member>
+ <member><type>uint32_t</type> <name>maxPPictureL0ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>maxBPictureL0ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>maxL1ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>maxTemporalLayerCount</name></member>
+ <member><type>VkBool32</type> <name>expectDyadicTemporalLayerPattern</name></member>
+ <member><type>int32_t</type> <name>minQp</name></member>
+ <member><type>int32_t</type> <name>maxQp</name></member>
+ <member><type>VkBool32</type> <name>prefersGopRemainingFrames</name></member>
+ <member><type>VkBool32</type> <name>requiresGopRemainingFrames</name></member>
+ <member noautovalidity="true"><type>VkVideoEncodeH264StdFlagsKHR</type> <name>stdSyntaxFlags</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264QualityLevelPropertiesKHR" returnedonly="true" structextends="VkVideoEncodeQualityLevelPropertiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_QUALITY_LEVEL_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoEncodeH264RateControlFlagsKHR</type> <name>preferredRateControlFlags</name></member>
+ <member><type>uint32_t</type> <name>preferredGopFrameCount</name></member>
+ <member><type>uint32_t</type> <name>preferredIdrPeriod</name></member>
+ <member><type>uint32_t</type> <name>preferredConsecutiveBFrameCount</name></member>
+ <member><type>uint32_t</type> <name>preferredTemporalLayerCount</name></member>
+ <member><type>VkVideoEncodeH264QpKHR</type> <name>preferredConstantQp</name></member>
+ <member><type>uint32_t</type> <name>preferredMaxL0ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>preferredMaxL1ReferenceCount</name></member>
+ <member><type>VkBool32</type> <name>preferredStdEntropyCodingModeFlag</name></member>
</type>
<type category="include" name="vk_video/vulkan_video_codec_h264std_encode.h">#include "vk_video/vulkan_video_codec_h264std_encode.h"</type>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264SliceHeader"/>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264PictureInfo"/>
+ <type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264ReferenceInfo"/>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264SliceHeaderFlags"/>
- <type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264RefMemMgmtCtrlOperations"/>
+ <type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264ReferenceListsInfo"/>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264PictureInfoFlags"/>
+ <type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264ReferenceInfoFlags"/>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264RefMgmtFlags"/>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264RefListModEntry"/>
<type requires="vk_video/vulkan_video_codec_h264std_encode.h" name="StdVideoEncodeH264RefPicMarkingEntry"/>
- <type category="struct" name="VkVideoEncodeH264SessionParametersAddInfoEXT" structextends="VkVideoSessionParametersUpdateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>spsStdCount</name></member>
- <member len="spsStdCount" optional="true">const <type>StdVideoH264SequenceParameterSet</type>* <name>pSpsStd</name></member>
- <member><type>uint32_t</type> <name>ppsStdCount</name></member>
- <member len="ppsStdCount" optional="true">const <type>StdVideoH264PictureParameterSet</type>* <name>pPpsStd</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264SessionParametersCreateInfoEXT" structextends="VkVideoSessionParametersCreateInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint32_t</type> <name>maxSpsStdCount</name></member>
- <member><type>uint32_t</type> <name>maxPpsStdCount</name></member>
- <member optional="true">const <type>VkVideoEncodeH264SessionParametersAddInfoEXT</type>* <name>pParametersAddInfo</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264DpbSlotInfoEXT">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>int8_t</type> <name>slotIndex</name></member>
- <member>const <type>StdVideoEncodeH264PictureInfo</type>* <name>pStdPictureInfo</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264VclFrameInfoEXT" structextends="VkVideoEncodeInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint8_t</type> <name>refDefaultFinalList0EntryCount</name></member>
- <member len="refDefaultFinalList0EntryCount">const <type>VkVideoEncodeH264DpbSlotInfoEXT</type>* <name>pRefDefaultFinalList0Entries</name></member>
- <member><type>uint8_t</type> <name>refDefaultFinalList1EntryCount</name></member>
- <member len="refDefaultFinalList1EntryCount">const <type>VkVideoEncodeH264DpbSlotInfoEXT</type>* <name>pRefDefaultFinalList1Entries</name></member>
- <member><type>uint32_t</type> <name>naluSliceEntryCount</name></member>
- <member len="naluSliceEntryCount">const <type>VkVideoEncodeH264NaluSliceEXT</type>* <name>pNaluSliceEntries</name></member>
- <member>const <type>VkVideoEncodeH264DpbSlotInfoEXT</type>* <name>pCurrentPictureInfo</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264EmitPictureParametersEXT" structextends="VkVideoEncodeInfoKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>uint8_t</type> <name>spsId</name></member>
- <member><type>VkBool32</type> <name>emitSpsEnable</name></member>
- <member><type>uint32_t</type> <name>ppsIdEntryCount</name></member>
- <member len="ppsIdEntryCount">const <type>uint8_t</type>* <name>ppsIdEntries</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264ProfileEXT" structextends="VkVideoProfileKHR">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>StdVideoH264ProfileIdc</type> <name>stdProfileIdc</name></member>
- </type>
- <type category="struct" name="VkVideoEncodeH264NaluSliceEXT">
- <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_EXT"><type>VkStructureType</type><name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member>const <type>StdVideoEncodeH264SliceHeader</type>* <name>pSliceHeaderStd</name></member>
- <member><type>uint32_t</type> <name>mbCount</name></member>
- <member><type>uint8_t</type> <name>refFinalList0EntryCount</name></member>
- <member len="refFinalList0EntryCount">const <type>VkVideoEncodeH264DpbSlotInfoEXT</type>* <name>pRefFinalList0Entries</name></member>
- <member><type>uint8_t</type> <name>refFinalList1EntryCount</name></member>
- <member len="refFinalList1EntryCount">const <type>VkVideoEncodeH264DpbSlotInfoEXT</type>* <name>pRefFinalList1Entries</name></member>
- <member><type>uint32_t</type> <name>precedingNaluBytes</name></member>
- <member><type>uint8_t</type> <name>minQp</name></member>
- <member><type>uint8_t</type> <name>maxQp</name></member>
+ <type category="struct" name="VkVideoEncodeH264SessionCreateInfoKHR" structextends="VkVideoSessionCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>useMaxLevelIdc</name></member>
+ <member><type>StdVideoH264LevelIdc</type> <name>maxLevelIdc</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264SessionParametersAddInfoKHR" structextends="VkVideoSessionParametersUpdateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdSPSCount</name></member>
+ <member len="stdSPSCount" optional="true">const <type>StdVideoH264SequenceParameterSet</type>* <name>pStdSPSs</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdPPSCount</name></member>
+ <member len="stdPPSCount" optional="true">const <type>StdVideoH264PictureParameterSet</type>* <name>pStdPPSs</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264SessionParametersCreateInfoKHR" structextends="VkVideoSessionParametersCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>maxStdSPSCount</name></member>
+ <member><type>uint32_t</type> <name>maxStdPPSCount</name></member>
+ <member optional="true">const <type>VkVideoEncodeH264SessionParametersAddInfoKHR</type>* <name>pParametersAddInfo</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264SessionParametersGetInfoKHR" structextends="VkVideoEncodeSessionParametersGetInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_GET_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>writeStdSPS</name></member>
+ <member><type>VkBool32</type> <name>writeStdPPS</name></member>
+ <member><type>uint32_t</type> <name>stdSPSId</name></member>
+ <member><type>uint32_t</type> <name>stdPPSId</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264SessionParametersFeedbackInfoKHR" structextends="VkVideoEncodeSessionParametersFeedbackInfoKHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_FEEDBACK_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>hasStdSPSOverrides</name></member>
+ <member><type>VkBool32</type> <name>hasStdPPSOverrides</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264DpbSlotInfoKHR" structextends="VkVideoReferenceSlotInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoEncodeH264ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264PictureInfoKHR" structextends="VkVideoEncodeInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PICTURE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>naluSliceEntryCount</name></member>
+ <member len="naluSliceEntryCount">const <type>VkVideoEncodeH264NaluSliceInfoKHR</type>* <name>pNaluSliceEntries</name></member>
+ <member>const <type>StdVideoEncodeH264PictureInfo</type>* <name>pStdPictureInfo</name></member>
+ <member><type>VkBool32</type> <name>generatePrefixNalu</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264ProfileInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoH264ProfileIdc</type> <name>stdProfileIdc</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264NaluSliceInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>int32_t</type> <name>constantQp</name></member>
+ <member>const <type>StdVideoEncodeH264SliceHeader</type>* <name>pStdSliceHeader</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264RateControlInfoKHR" structextends="VkVideoCodingControlInfoKHR,VkVideoBeginCodingInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoEncodeH264RateControlFlagsKHR</type> <name>flags</name></member>
+ <member><type>uint32_t</type> <name>gopFrameCount</name></member>
+ <member><type>uint32_t</type> <name>idrPeriod</name></member>
+ <member><type>uint32_t</type> <name>consecutiveBFrameCount</name></member>
+ <member><type>uint32_t</type> <name>temporalLayerCount</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264QpKHR">
+ <member noautovalidity="true"><type>int32_t</type> <name>qpI</name></member>
+ <member noautovalidity="true"><type>int32_t</type> <name>qpP</name></member>
+ <member noautovalidity="true"><type>int32_t</type> <name>qpB</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264FrameSizeKHR">
+ <member noautovalidity="true"><type>uint32_t</type> <name>frameISize</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>framePSize</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>frameBSize</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264GopRemainingFrameInfoKHR" structextends="VkVideoBeginCodingInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_GOP_REMAINING_FRAME_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>useGopRemainingFrames</name></member>
+ <member><type>uint32_t</type> <name>gopRemainingI</name></member>
+ <member><type>uint32_t</type> <name>gopRemainingP</name></member>
+ <member><type>uint32_t</type> <name>gopRemainingB</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH264RateControlLayerInfoKHR" structextends="VkVideoEncodeRateControlLayerInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>useMinQp</name></member>
+ <member><type>VkVideoEncodeH264QpKHR</type> <name>minQp</name></member>
+ <member><type>VkBool32</type> <name>useMaxQp</name></member>
+ <member><type>VkVideoEncodeH264QpKHR</type> <name>maxQp</name></member>
+ <member><type>VkBool32</type> <name>useMaxFrameSize</name></member>
+ <member><type>VkVideoEncodeH264FrameSizeKHR</type> <name>maxFrameSize</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265CapabilitiesKHR" returnedonly="true" structextends="VkVideoCapabilitiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_CAPABILITIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>VkVideoEncodeH265CapabilityFlagsKHR</type> <name>flags</name></member>
+ <member><type>StdVideoH265LevelIdc</type> <name>maxLevelIdc</name></member>
+ <member><type>uint32_t</type> <name>maxSliceSegmentCount</name></member>
+ <member><type>VkExtent2D</type> <name>maxTiles</name></member>
+ <member><type>VkVideoEncodeH265CtbSizeFlagsKHR</type> <name>ctbSizes</name></member>
+ <member><type>VkVideoEncodeH265TransformBlockSizeFlagsKHR</type> <name>transformBlockSizes</name></member>
+ <member><type>uint32_t</type> <name>maxPPictureL0ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>maxBPictureL0ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>maxL1ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>maxSubLayerCount</name></member>
+ <member><type>VkBool32</type> <name>expectDyadicTemporalSubLayerPattern</name></member>
+ <member><type>int32_t</type> <name>minQp</name></member>
+ <member><type>int32_t</type> <name>maxQp</name></member>
+ <member><type>VkBool32</type> <name>prefersGopRemainingFrames</name></member>
+ <member><type>VkBool32</type> <name>requiresGopRemainingFrames</name></member>
+ <member noautovalidity="true"><type>VkVideoEncodeH265StdFlagsKHR</type> <name>stdSyntaxFlags</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265QualityLevelPropertiesKHR" returnedonly="true" structextends="VkVideoEncodeQualityLevelPropertiesKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_QUALITY_LEVEL_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkVideoEncodeH265RateControlFlagsKHR</type> <name>preferredRateControlFlags</name></member>
+ <member><type>uint32_t</type> <name>preferredGopFrameCount</name></member>
+ <member><type>uint32_t</type> <name>preferredIdrPeriod</name></member>
+ <member><type>uint32_t</type> <name>preferredConsecutiveBFrameCount</name></member>
+ <member><type>uint32_t</type> <name>preferredSubLayerCount</name></member>
+ <member><type>VkVideoEncodeH265QpKHR</type> <name>preferredConstantQp</name></member>
+ <member><type>uint32_t</type> <name>preferredMaxL0ReferenceCount</name></member>
+ <member><type>uint32_t</type> <name>preferredMaxL1ReferenceCount</name></member>
+ </type>
+ <type category="include" name="vk_video/vulkan_video_codec_h265std_encode.h">#include "vk_video/vulkan_video_codec_h265std_encode.h"</type>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265PictureInfoFlags"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265PictureInfo"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265SliceSegmentHeader"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265ReferenceInfo"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265ReferenceListsInfo"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265SliceSegmentHeaderFlags"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265ReferenceInfoFlags"/>
+ <type requires="vk_video/vulkan_video_codec_h265std_encode.h" name="StdVideoEncodeH265ReferenceModificationFlags"/>
+ <type category="struct" name="VkVideoEncodeH265SessionCreateInfoKHR" structextends="VkVideoSessionCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>useMaxLevelIdc</name></member>
+ <member><type>StdVideoH265LevelIdc</type> <name>maxLevelIdc</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265SessionParametersAddInfoKHR" structextends="VkVideoSessionParametersUpdateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdVPSCount</name></member>
+ <member len="stdVPSCount" optional="true">const <type>StdVideoH265VideoParameterSet</type>* <name>pStdVPSs</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdSPSCount</name></member>
+ <member len="stdSPSCount" optional="true">const <type>StdVideoH265SequenceParameterSet</type>* <name>pStdSPSs</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stdPPSCount</name></member>
+ <member len="stdPPSCount" optional="true">const <type>StdVideoH265PictureParameterSet</type>* <name>pStdPPSs</name><comment>List of Picture Parameters associated with the spsStd, above</comment></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265SessionParametersCreateInfoKHR" structextends="VkVideoSessionParametersCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>maxStdVPSCount</name></member>
+ <member><type>uint32_t</type> <name>maxStdSPSCount</name></member>
+ <member><type>uint32_t</type> <name>maxStdPPSCount</name></member>
+ <member optional="true">const <type>VkVideoEncodeH265SessionParametersAddInfoKHR</type>* <name>pParametersAddInfo</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265SessionParametersGetInfoKHR" structextends="VkVideoEncodeSessionParametersGetInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_GET_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>writeStdVPS</name></member>
+ <member><type>VkBool32</type> <name>writeStdSPS</name></member>
+ <member><type>VkBool32</type> <name>writeStdPPS</name></member>
+ <member><type>uint32_t</type> <name>stdVPSId</name></member>
+ <member><type>uint32_t</type> <name>stdSPSId</name></member>
+ <member><type>uint32_t</type> <name>stdPPSId</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265SessionParametersFeedbackInfoKHR" structextends="VkVideoEncodeSessionParametersFeedbackInfoKHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_FEEDBACK_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>hasStdVPSOverrides</name></member>
+ <member><type>VkBool32</type> <name>hasStdSPSOverrides</name></member>
+ <member><type>VkBool32</type> <name>hasStdPPSOverrides</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265PictureInfoKHR" structextends="VkVideoEncodeInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PICTURE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>naluSliceSegmentEntryCount</name></member>
+ <member len="naluSliceSegmentEntryCount">const <type>VkVideoEncodeH265NaluSliceSegmentInfoKHR</type>* <name>pNaluSliceSegmentEntries</name></member>
+ <member>const <type>StdVideoEncodeH265PictureInfo</type>* <name>pStdPictureInfo</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265NaluSliceSegmentInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>int32_t</type> <name>constantQp</name></member>
+ <member>const <type>StdVideoEncodeH265SliceSegmentHeader</type>* <name>pStdSliceSegmentHeader</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265RateControlInfoKHR" structextends="VkVideoCodingControlInfoKHR,VkVideoBeginCodingInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkVideoEncodeH265RateControlFlagsKHR</type> <name>flags</name></member>
+ <member><type>uint32_t</type> <name>gopFrameCount</name></member>
+ <member><type>uint32_t</type> <name>idrPeriod</name></member>
+ <member><type>uint32_t</type> <name>consecutiveBFrameCount</name></member>
+ <member><type>uint32_t</type> <name>subLayerCount</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265QpKHR">
+ <member noautovalidity="true"><type>int32_t</type> <name>qpI</name></member>
+ <member noautovalidity="true"><type>int32_t</type> <name>qpP</name></member>
+ <member noautovalidity="true"><type>int32_t</type> <name>qpB</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265FrameSizeKHR">
+ <member noautovalidity="true"><type>uint32_t</type> <name>frameISize</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>framePSize</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>frameBSize</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265GopRemainingFrameInfoKHR" structextends="VkVideoBeginCodingInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_GOP_REMAINING_FRAME_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>useGopRemainingFrames</name></member>
+ <member><type>uint32_t</type> <name>gopRemainingI</name></member>
+ <member><type>uint32_t</type> <name>gopRemainingP</name></member>
+ <member><type>uint32_t</type> <name>gopRemainingB</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265RateControlLayerInfoKHR" structextends="VkVideoEncodeRateControlLayerInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>useMinQp</name></member>
+ <member><type>VkVideoEncodeH265QpKHR</type> <name>minQp</name></member>
+ <member><type>VkBool32</type> <name>useMaxQp</name></member>
+ <member><type>VkVideoEncodeH265QpKHR</type> <name>maxQp</name></member>
+ <member><type>VkBool32</type> <name>useMaxFrameSize</name></member>
+ <member><type>VkVideoEncodeH265FrameSizeKHR</type> <name>maxFrameSize</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265ProfileInfoKHR" structextends="VkVideoProfileInfoKHR,VkQueryPoolCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PROFILE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>StdVideoH265ProfileIdc</type> <name>stdProfileIdc</name></member>
+ </type>
+ <type category="struct" name="VkVideoEncodeH265DpbSlotInfoKHR" structextends="VkVideoReferenceSlotInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_DPB_SLOT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>StdVideoEncodeH265ReferenceInfo</type>* <name>pStdReferenceInfo</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceInheritedViewportScissorFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>inheritedViewportScissor2D</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>inheritedViewportScissor2D</name></member>
</type>
<type category="struct" name="VkCommandBufferInheritanceViewportScissorInfoNV" structextends="VkCommandBufferInheritanceInfo">
- <member values="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>viewportScissor2D</name></member>
- <member><type>uint32_t</type> <name>viewportDepthCount</name></member>
- <member noautovalidity="true">const <type>VkViewport</type>* <name>pViewportDepths</name></member>
+ <member values="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>viewportScissor2D</name></member>
+ <member><type>uint32_t</type> <name>viewportDepthCount</name></member>
+ <member noautovalidity="true">const <type>VkViewport</type>* <name>pViewportDepths</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
- <member optional="true"><type>void</type>* <name>pNext</name></member>
- <member><type>VkBool32</type> <name>ycbcr2plane444Formats</name></member>
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>ycbcr2plane444Formats</name></member>
</type>
<type category="struct" name="VkPhysicalDeviceProvokingVertexFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
@@ -5961,8 +7348,8 @@ typedef void <name>CAMetalLayer</name>;
<type category="struct" name="VkCuModuleCreateInfoNVX">
<member values="VK_STRUCTURE_TYPE_CU_MODULE_CREATE_INFO_NVX"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true">const <type>void</type>* <name>pNext</name></member>
- <member><type>size_t</type> <name>dataSize</name></member>
- <member>const <type>void</type>* <name>pData</name></member>
+ <member optional="true"><type>size_t</type> <name>dataSize</name></member>
+ <member len="dataSize">const <type>void</type>* <name>pData</name></member>
</type>
<type category="struct" name="VkCuFunctionCreateInfoNVX">
<member values="VK_STRUCTURE_TYPE_CU_FUNCTION_CREATE_INFO_NVX"><type>VkStructureType</type> <name>sType</name></member>
@@ -5981,18 +7368,136 @@ typedef void <name>CAMetalLayer</name>;
<member><type>uint32_t</type> <name>blockDimY</name></member>
<member><type>uint32_t</type> <name>blockDimZ</name></member>
<member><type>uint32_t</type> <name>sharedMemBytes</name></member>
- <member><type>size_t</type> <name>paramCount</name></member>
+ <member optional="true"><type>size_t</type> <name>paramCount</name></member>
<member len="paramCount">const <type>void</type>* const * <name>pParams</name></member>
- <member><type>size_t</type> <name>extraCount</name></member>
+ <member optional="true"><type>size_t</type> <name>extraCount</name></member>
<member len="extraCount">const <type>void</type>* const * <name>pExtras</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceDescriptorBufferFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>descriptorBuffer</name></member>
+ <member><type>VkBool32</type> <name>descriptorBufferCaptureReplay</name></member>
+ <member><type>VkBool32</type> <name>descriptorBufferImageLayoutIgnored</name></member>
+ <member><type>VkBool32</type> <name>descriptorBufferPushDescriptors</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDescriptorBufferPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>combinedImageSamplerDescriptorSingleArray</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>bufferlessPushDescriptors</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>allowSamplerImageViewPostSubmitCreation</name></member>
+ <member limittype="noauto"><type>VkDeviceSize</type> <name>descriptorBufferOffsetAlignment</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDescriptorBufferBindings</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxResourceDescriptorBufferBindings</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxSamplerDescriptorBufferBindings</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxEmbeddedImmutableSamplerBindings</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxEmbeddedImmutableSamplers</name></member>
+ <member limittype="noauto"><type>size_t</type> <name>bufferCaptureReplayDescriptorDataSize</name></member>
+ <member limittype="noauto"><type>size_t</type> <name>imageCaptureReplayDescriptorDataSize</name></member>
+ <member limittype="noauto"><type>size_t</type> <name>imageViewCaptureReplayDescriptorDataSize</name></member>
+ <member limittype="noauto"><type>size_t</type> <name>samplerCaptureReplayDescriptorDataSize</name></member>
+ <member limittype="noauto"><type>size_t</type> <name>accelerationStructureCaptureReplayDescriptorDataSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>samplerDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>combinedImageSamplerDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>sampledImageDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>storageImageDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>uniformTexelBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>robustUniformTexelBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>storageTexelBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>robustStorageTexelBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>uniformBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>robustUniformBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>storageBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>robustStorageBufferDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>inputAttachmentDescriptorSize</name></member>
+ <member limittype="max"><type>size_t</type> <name>accelerationStructureDescriptorSize</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>maxSamplerDescriptorBufferRange</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>maxResourceDescriptorBufferRange</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>samplerDescriptorBufferAddressSpaceSize</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>resourceDescriptorBufferAddressSpaceSize</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>descriptorBufferAddressSpaceSize</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDescriptorBufferDensityMapPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_DENSITY_MAP_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>size_t</type> <name>combinedImageSamplerDensityMapDescriptorSize</name></member>
+ </type>
+ <type category="struct" name="VkDescriptorAddressInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_ADDRESS_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceAddress</type> <name>address</name></member>
+ <member><type>VkDeviceSize</type> <name>range</name></member>
+ <member><type>VkFormat</type> <name>format</name></member>
+ </type>
+ <type category="struct" name="VkDescriptorBufferBindingInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceAddress</type> <name>address</name></member>
+ <member optional="true" noautovalidity="true"><type>VkBufferUsageFlags</type> <name>usage</name></member>
+ </type>
+ <type category="struct" name="VkDescriptorBufferBindingPushDescriptorBufferHandleEXT" structextends="VkDescriptorBufferBindingInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_PUSH_DESCRIPTOR_BUFFER_HANDLE_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member ><type>VkBuffer</type> <name>buffer</name></member>
+ </type>
+ <type category="union" name="VkDescriptorDataEXT">
+ <member selection="VK_DESCRIPTOR_TYPE_SAMPLER">const <type>VkSampler</type>* <name>pSampler</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER">const <type>VkDescriptorImageInfo</type>* <name>pCombinedImageSampler</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT">const <type>VkDescriptorImageInfo</type>* <name>pInputAttachmentImage</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE" optional="true">const <type>VkDescriptorImageInfo</type>* <name>pSampledImage</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_STORAGE_IMAGE" optional="true">const <type>VkDescriptorImageInfo</type>* <name>pStorageImage</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER" optional="true">const <type>VkDescriptorAddressInfoEXT</type>* <name>pUniformTexelBuffer</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER" optional="true">const <type>VkDescriptorAddressInfoEXT</type>* <name>pStorageTexelBuffer</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER" optional="true">const <type>VkDescriptorAddressInfoEXT</type>* <name>pUniformBuffer</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_STORAGE_BUFFER" optional="true">const <type>VkDescriptorAddressInfoEXT</type>* <name>pStorageBuffer</name></member>
+ <member selection="VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR,VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV"><type>VkDeviceAddress</type> <name>accelerationStructure</name></member>
+ </type>
+ <type category="struct" name="VkDescriptorGetInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_GET_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDescriptorType</type> <name>type</name></member>
+ <member selector="type" noautovalidity="true"><type>VkDescriptorDataEXT</type> <name>data</name></member>
+ </type>
+ <type category="struct" name="VkBufferCaptureDescriptorDataInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBuffer</type> <name>buffer</name></member>
+ </type>
+ <type category="struct" name="VkImageCaptureDescriptorDataInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>image</name></member>
+ </type>
+ <type category="struct" name="VkImageViewCaptureDescriptorDataInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_VIEW_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageView</type> <name>imageView</name></member>
+ </type>
+ <type category="struct" name="VkSamplerCaptureDescriptorDataInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_SAMPLER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkSampler</type> <name>sampler</name></member>
+ </type>
+ <type category="struct" name="VkAccelerationStructureCaptureDescriptorDataInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkAccelerationStructureKHR</type> <name>accelerationStructure</name></member>
+ <member optional="true"><type>VkAccelerationStructureNV</type> <name>accelerationStructureNV</name></member>
+ </type>
+ <type category="struct" name="VkOpaqueCaptureDescriptorDataCreateInfoEXT" structextends="VkBufferCreateInfo,VkImageCreateInfo,VkImageViewCreateInfo,VkSamplerCreateInfo,VkAccelerationStructureCreateInfoKHR,VkAccelerationStructureCreateInfoNV">
+ <member values="VK_STRUCTURE_TYPE_OPAQUE_CAPTURE_DESCRIPTOR_DATA_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>void</type>* <name>opaqueCaptureDescriptorData</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderIntegerDotProductFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member><type>VkBool32</type> <name>shaderIntegerDotProduct</name></member>
</type>
- <type category="struct" name="VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR" structextends="VkPhysicalDeviceProperties2">
- <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <type category="struct" name="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR" alias="VkPhysicalDeviceShaderIntegerDotProductFeatures"/>
+ <type category="struct" name="VkPhysicalDeviceShaderIntegerDotProductProperties" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct8BitUnsignedAccelerated</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>integerDotProduct8BitSignedAccelerated</name></member>
@@ -6025,6 +7530,7 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating64BitSignedAccelerated</name></member>
<member limittype="bitmask"><type>VkBool32</type> <name>integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR" alias="VkPhysicalDeviceShaderIntegerDotProductProperties"/>
<type category="struct" name="VkPhysicalDeviceDrmPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true"><type>void</type>* <name>pNext</name></member>
@@ -6035,6 +7541,16 @@ typedef void <name>CAMetalLayer</name>;
<member limittype="noauto"><type>int64_t</type> <name>renderMajor</name></member>
<member limittype="noauto"><type>int64_t</type> <name>renderMinor</name></member>
</type>
+ <type category="struct" name="VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>fragmentShaderBarycentric</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>triStripVertexOrderIndependentOfProvokingVertex</name></member>
+ </type>
<type category="struct" name="VkPhysicalDeviceRayTracingMotionBlurFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
<member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
<member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
@@ -6108,7 +7624,1488 @@ typedef void <name>CAMetalLayer</name>;
<member><type>VkDeviceMemory</type> <name>memory</name></member>
<member><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></member>
</type>
+ <type category="struct" name="VkImportMemoryBufferCollectionFUCHSIA" structextends="VkMemoryAllocateInfo">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></member>
+ <member><type>uint32_t</type> <name>index</name></member>
+ </type>
+ <type category="struct" name="VkBufferCollectionImageCreateInfoFUCHSIA" structextends="VkImageCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></member>
+ <member><type>uint32_t</type> <name>index</name></member>
+ </type>
+ <type category="struct" name="VkBufferCollectionBufferCreateInfoFUCHSIA" structextends="VkBufferCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></member>
+ <member><type>uint32_t</type> <name>index</name></member>
+ </type>
+ <type category="struct" name="VkBufferCollectionCreateInfoFUCHSIA">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>zx_handle_t</type> <name>collectionToken</name></member>
+ </type>
+ <type category="struct" name="VkBufferCollectionPropertiesFUCHSIA" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>memoryTypeBits</name></member>
+ <member><type>uint32_t</type> <name>bufferCount</name></member>
+ <member><type>uint32_t</type> <name>createInfoIndex</name></member>
+ <member><type>uint64_t</type> <name>sysmemPixelFormat</name></member>
+ <member><type>VkFormatFeatureFlags</type> <name>formatFeatures</name></member>
+ <member><type>VkSysmemColorSpaceFUCHSIA</type> <name>sysmemColorSpaceIndex</name></member>
+ <member><type>VkComponentMapping</type> <name>samplerYcbcrConversionComponents</name></member>
+ <member><type>VkSamplerYcbcrModelConversion</type> <name>suggestedYcbcrModel</name></member>
+ <member><type>VkSamplerYcbcrRange</type> <name>suggestedYcbcrRange</name></member>
+ <member><type>VkChromaLocation</type> <name>suggestedXChromaOffset</name></member>
+ <member><type>VkChromaLocation</type> <name>suggestedYChromaOffset</name></member>
+ </type>
+ <type category="struct" name="VkBufferConstraintsInfoFUCHSIA">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBufferCreateInfo</type> <name>createInfo</name></member>
+ <member optional="true"><type>VkFormatFeatureFlags</type> <name>requiredFormatFeatures</name></member>
+ <member><type>VkBufferCollectionConstraintsInfoFUCHSIA</type> <name>bufferCollectionConstraints</name></member>
+ </type>
+ <type category="struct" name="VkSysmemColorSpaceFUCHSIA">
+ <member values="VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>colorSpace</name></member>
+ </type>
+ <type category="struct" name="VkImageFormatConstraintsInfoFUCHSIA">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageCreateInfo</type> <name>imageCreateInfo</name></member>
+ <member><type>VkFormatFeatureFlags</type> <name>requiredFormatFeatures</name></member>
+ <member optional="true"><type>VkImageFormatConstraintsFlagsFUCHSIA</type> <name>flags</name></member>
+ <member optional="true"><type>uint64_t</type> <name>sysmemPixelFormat</name></member>
+ <member><type>uint32_t</type> <name>colorSpaceCount</name></member>
+ <member len="colorSpaceCount">const <type>VkSysmemColorSpaceFUCHSIA</type>* <name>pColorSpaces</name></member>
+ </type>
+ <type category="struct" name="VkImageConstraintsInfoFUCHSIA">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>formatConstraintsCount</name></member>
+ <member len="formatConstraintsCount">const <type>VkImageFormatConstraintsInfoFUCHSIA</type>* <name>pFormatConstraints</name></member>
+ <member><type>VkBufferCollectionConstraintsInfoFUCHSIA</type> <name>bufferCollectionConstraints</name></member>
+ <member optional="true"><type>VkImageConstraintsInfoFlagsFUCHSIA</type> <name>flags</name></member>
+ </type>
+ <type category="struct" name="VkBufferCollectionConstraintsInfoFUCHSIA">
+ <member values="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>minBufferCount</name></member>
+ <member><type>uint32_t</type> <name>maxBufferCount</name></member>
+ <member><type>uint32_t</type> <name>minBufferCountForCamping</name></member>
+ <member><type>uint32_t</type> <name>minBufferCountForDedicatedSlack</name></member>
+ <member><type>uint32_t</type> <name>minBufferCountForSharedSlack</name></member>
+ </type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_CUDA_MODULE_NV"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkCudaModuleNV</name>)</type>
+ <type category="handle" parent="VkDevice" objtypeenum="VK_OBJECT_TYPE_CUDA_FUNCTION_NV"><type>VK_DEFINE_NON_DISPATCHABLE_HANDLE</type>(<name>VkCudaFunctionNV</name>)</type>
+ <type category="struct" name="VkCudaModuleCreateInfoNV">
+ <member values="VK_STRUCTURE_TYPE_CUDA_MODULE_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>size_t</type> <name>dataSize</name></member>
+ <member len="dataSize">const <type>void</type>* <name>pData</name></member>
+ </type>
+ <type category="struct" name="VkCudaFunctionCreateInfoNV">
+ <member values="VK_STRUCTURE_TYPE_CUDA_FUNCTION_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkCudaModuleNV</type> <name>module</name></member>
+ <member len="null-terminated">const <type>char</type>* <name>pName</name></member>
+ </type>
+ <type category="struct" name="VkCudaLaunchInfoNV">
+ <member values="VK_STRUCTURE_TYPE_CUDA_LAUNCH_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkCudaFunctionNV</type> <name>function</name></member>
+ <member><type>uint32_t</type> <name>gridDimX</name></member>
+ <member><type>uint32_t</type> <name>gridDimY</name></member>
+ <member><type>uint32_t</type> <name>gridDimZ</name></member>
+ <member><type>uint32_t</type> <name>blockDimX</name></member>
+ <member><type>uint32_t</type> <name>blockDimY</name></member>
+ <member><type>uint32_t</type> <name>blockDimZ</name></member>
+ <member><type>uint32_t</type> <name>sharedMemBytes</name></member>
+ <member optional="true"><type>size_t</type> <name>paramCount</name></member>
+ <member noautovalidity="true" len="paramCount">const <type>void</type>* const * <name>pParams</name></member>
+ <member optional="true"><type>size_t</type> <name>extraCount</name></member>
+ <member noautovalidity="true" len="extraCount">const <type>void</type>* const * <name>pExtras</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>formatRgba10x6WithoutYCbCrSampler</name></member>
+ </type>
+ <type category="struct" name="VkFormatProperties3" returnedonly="true" structextends="VkFormatProperties2">
+ <member values="VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true" limittype="bitmask"><type>VkFormatFeatureFlags2</type> <name>linearTilingFeatures</name></member>
+ <member optional="true" limittype="bitmask"><type>VkFormatFeatureFlags2</type> <name>optimalTilingFeatures</name></member>
+ <member optional="true" limittype="bitmask"><type>VkFormatFeatureFlags2</type> <name>bufferFeatures</name></member>
+ </type>
+ <type category="struct" name="VkFormatProperties3KHR" alias="VkFormatProperties3"/>
+ <type category="struct" name="VkDrmFormatModifierPropertiesList2EXT" returnedonly="true" structextends="VkFormatProperties2">
+ <member values="VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>drmFormatModifierCount</name></member>
+ <member optional="true" len="drmFormatModifierCount"><type>VkDrmFormatModifierProperties2EXT</type>* <name>pDrmFormatModifierProperties</name></member>
+ </type>
+ <type category="struct" name="VkDrmFormatModifierProperties2EXT" returnedonly="true">
+ <member><type>uint64_t</type> <name>drmFormatModifier</name></member>
+ <member><type>uint32_t</type> <name>drmFormatModifierPlaneCount</name></member>
+ <member><type>VkFormatFeatureFlags2</type> <name>drmFormatModifierTilingFeatures</name></member>
+ </type>
+ <type category="struct" name="VkAndroidHardwareBufferFormatProperties2ANDROID" structextends="VkAndroidHardwareBufferPropertiesANDROID" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_2_ANDROID"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkFormat</type> <name>format</name></member>
+ <member><type>uint64_t</type> <name>externalFormat</name></member>
+ <member><type>VkFormatFeatureFlags2</type> <name>formatFeatures</name></member>
+ <member><type>VkComponentMapping</type> <name>samplerYcbcrConversionComponents</name></member>
+ <member><type>VkSamplerYcbcrModelConversion</type> <name>suggestedYcbcrModel</name></member>
+ <member><type>VkSamplerYcbcrRange</type> <name>suggestedYcbcrRange</name></member>
+ <member><type>VkChromaLocation</type> <name>suggestedXChromaOffset</name></member>
+ <member><type>VkChromaLocation</type> <name>suggestedYChromaOffset</name></member>
+ </type>
+ <type category="struct" name="VkPipelineRenderingCreateInfo" structextends="VkGraphicsPipelineCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>viewMask</name></member>
+ <member optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member noautovalidity="true" len="colorAttachmentCount">const <type>VkFormat</type>* <name>pColorAttachmentFormats</name></member>
+ <member noautovalidity="true"><type>VkFormat</type> <name>depthAttachmentFormat</name></member>
+ <member noautovalidity="true"><type>VkFormat</type> <name>stencilAttachmentFormat</name></member>
+ </type>
+ <type category="struct" name="VkPipelineRenderingCreateInfoKHR" alias="VkPipelineRenderingCreateInfo"/>
+ <type category="struct" name="VkRenderingInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkRenderingFlags</type> <name>flags</name></member>
+ <member><type>VkRect2D</type> <name>renderArea</name></member>
+ <member><type>uint32_t</type> <name>layerCount</name></member>
+ <member><type>uint32_t</type> <name>viewMask</name></member>
+ <member optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member len="colorAttachmentCount">const <type>VkRenderingAttachmentInfo</type>* <name>pColorAttachments</name></member>
+ <member optional="true">const <type>VkRenderingAttachmentInfo</type>* <name>pDepthAttachment</name></member>
+ <member optional="true">const <type>VkRenderingAttachmentInfo</type>* <name>pStencilAttachment</name></member>
+ </type>
+ <type category="struct" name="VkRenderingInfoKHR" alias="VkRenderingInfo"/>
+ <type category="struct" name="VkRenderingAttachmentInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkImageView</type> <name>imageView</name></member>
+ <member><type>VkImageLayout</type> <name>imageLayout</name></member>
+ <member optional="true"><type>VkResolveModeFlagBits</type> <name>resolveMode</name></member>
+ <member optional="true"><type>VkImageView</type> <name>resolveImageView</name></member>
+ <member><type>VkImageLayout</type> <name>resolveImageLayout</name></member>
+ <member><type>VkAttachmentLoadOp</type> <name>loadOp</name></member>
+ <member><type>VkAttachmentStoreOp</type> <name>storeOp</name></member>
+ <member><type>VkClearValue</type> <name>clearValue</name></member>
+ </type>
+ <type category="struct" name="VkRenderingAttachmentInfoKHR" alias="VkRenderingAttachmentInfo"/>
+ <type category="struct" name="VkRenderingFragmentShadingRateAttachmentInfoKHR" structextends="VkRenderingInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkImageView</type> <name>imageView</name></member>
+ <member><type>VkImageLayout</type> <name>imageLayout</name></member>
+ <member><type>VkExtent2D</type> <name>shadingRateAttachmentTexelSize</name></member>
+ </type>
+ <type category="struct" name="VkRenderingFragmentDensityMapAttachmentInfoEXT" structextends="VkRenderingInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageView</type> <name>imageView</name></member>
+ <member><type>VkImageLayout</type> <name>imageLayout</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDynamicRenderingFeatures" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>dynamicRendering</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDynamicRenderingFeaturesKHR" alias="VkPhysicalDeviceDynamicRenderingFeatures"/>
+ <type category="struct" name="VkCommandBufferInheritanceRenderingInfo" structextends="VkCommandBufferInheritanceInfo">
+ <member values="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkRenderingFlags</type> <name>flags</name></member>
+ <member><type>uint32_t</type> <name>viewMask</name></member>
+ <member api="vulkan" optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member api="vulkansc"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member len="colorAttachmentCount">const <type>VkFormat</type>* <name>pColorAttachmentFormats</name></member>
+ <member><type>VkFormat</type> <name>depthAttachmentFormat</name></member>
+ <member><type>VkFormat</type> <name>stencilAttachmentFormat</name></member>
+ <member optional="true"><type>VkSampleCountFlagBits</type> <name>rasterizationSamples</name></member>
+ </type>
+ <type category="struct" name="VkCommandBufferInheritanceRenderingInfoKHR" alias="VkCommandBufferInheritanceRenderingInfo"/>
+ <type category="struct" name="VkAttachmentSampleCountInfoAMD" structextends="VkCommandBufferInheritanceInfo,VkGraphicsPipelineCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member noautovalidity="true" len="colorAttachmentCount">const <type>VkSampleCountFlagBits</type>* <name>pColorAttachmentSamples</name></member>
+ <member noautovalidity="true" optional="true"><type>VkSampleCountFlagBits</type> <name>depthStencilAttachmentSamples</name></member>
+ </type>
+ <type category="struct" name="VkAttachmentSampleCountInfoNV" alias="VkAttachmentSampleCountInfoAMD"/>
+ <type category="struct" name="VkMultiviewPerViewAttributesInfoNVX" structextends="VkCommandBufferInheritanceInfo,VkGraphicsPipelineCreateInfo,VkRenderingInfo">
+ <member values="VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_ATTRIBUTES_INFO_NVX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>perViewAttributes</name></member>
+ <member><type>VkBool32</type> <name>perViewAttributesPositionXOnly</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageViewMinLodFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>minLod</name></member>
+ </type>
+ <type category="struct" name="VkImageViewMinLodCreateInfoEXT" structextends="VkImageViewCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>float</type> <name>minLod</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>rasterizationOrderColorAttachmentAccess</name></member>
+ <member><type>VkBool32</type> <name>rasterizationOrderDepthAttachmentAccess</name></member>
+ <member><type>VkBool32</type> <name>rasterizationOrderStencilAttachmentAccess</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM" alias="VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT"/>
+ <type category="struct" name="VkPhysicalDeviceLinearColorAttachmentFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>linearColorAttachment</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>graphicsPipelineLibrary</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>graphicsPipelineLibraryFastLinking</name></member>
+ <member limittype="bitmask"><type>VkBool32</type> <name>graphicsPipelineLibraryIndependentInterpolationDecoration</name></member>
+ </type>
+ <type category="struct" name="VkGraphicsPipelineLibraryCreateInfoEXT" structextends="VkGraphicsPipelineCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkGraphicsPipelineLibraryFlagsEXT</type> <name>flags</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>descriptorSetHostMapping</name></member>
+ </type>
+ <type category="struct" name="VkDescriptorSetBindingReferenceVALVE">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDescriptorSetLayout</type> <name>descriptorSetLayout</name></member>
+ <member><type>uint32_t</type> <name>binding</name></member>
+ </type>
+ <type category="struct" name="VkDescriptorSetLayoutHostMappingInfoVALVE">
+ <member values="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>size_t</type> <name>descriptorOffset</name></member>
+ <member><type>uint32_t</type> <name>descriptorSize</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceNestedCommandBufferFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>nestedCommandBuffer</name></member>
+ <member><type>VkBool32</type> <name>nestedCommandBufferRendering</name></member>
+ <member><type>VkBool32</type> <name>nestedCommandBufferSimultaneousUse</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceNestedCommandBufferPropertiesEXT" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxCommandBufferNestingLevel</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderModuleIdentifier</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="noauto"><type>uint8_t</type> <name>shaderModuleIdentifierAlgorithmUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ </type>
+ <type category="struct" name="VkPipelineShaderStageModuleIdentifierCreateInfoEXT" structextends="VkPipelineShaderStageCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>identifierSize</name></member>
+ <member len="identifierSize">const <type>uint8_t</type>* <name>pIdentifier</name></member>
+ </type>
+ <type category="struct" name="VkShaderModuleIdentifierEXT" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>uint32_t</type> <name>identifierSize</name></member>
+ <member len="identifierSize"><type>uint8_t</type> <name>identifier</name>[<enum>VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT</enum>]</member>
+ </type>
+ <type category="struct" name="VkImageCompressionControlEXT" structextends="VkImageCreateInfo,VkSwapchainCreateInfoKHR,VkPhysicalDeviceImageFormatInfo2">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>VkImageCompressionFlagsEXT</type> <name>flags</name></member>
+ <member optional="true"><type>uint32_t</type> <name>compressionControlPlaneCount</name></member>
+ <member noautovalidity="true" len="compressionControlPlaneCount"><type>VkImageCompressionFixedRateFlagsEXT</type>* <name>pFixedRateFlags</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageCompressionControlFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>imageCompressionControl</name></member>
+ </type>
+ <type category="struct" name="VkImageCompressionPropertiesEXT" structextends="VkImageFormatProperties2,VkSurfaceFormat2KHR,VkSubresourceLayout2KHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageCompressionFlagsEXT</type> <name>imageCompressionFlags</name></member>
+ <member><type>VkImageCompressionFixedRateFlagsEXT</type> <name>imageCompressionFixedRateFlags</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>imageCompressionControlSwapchain</name></member>
+ </type>
+ <type category="struct" name="VkImageSubresource2KHR">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageSubresource</type> <name>imageSubresource</name></member>
+ </type>
+ <type category="struct" name="VkImageSubresource2EXT" alias="VkImageSubresource2KHR"/>
+ <type category="struct" name="VkSubresourceLayout2KHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkSubresourceLayout</type> <name>subresourceLayout</name></member>
+ </type>
+ <type category="struct" name="VkSubresourceLayout2EXT" alias="VkSubresourceLayout2KHR"/>
+ <type category="struct" name="VkRenderPassCreationControlEXT" structextends="VkRenderPassCreateInfo2,VkSubpassDescription2">
+ <member values="VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>disallowMerging</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassCreationFeedbackInfoEXT" returnedonly="true">
+ <member><type>uint32_t</type> <name>postMergeSubpassCount</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassCreationFeedbackCreateInfoEXT" structextends="VkRenderPassCreateInfo2">
+ <member values="VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkRenderPassCreationFeedbackInfoEXT</type>* <name>pRenderPassFeedback</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassSubpassFeedbackInfoEXT" returnedonly="true">
+ <member><type>VkSubpassMergeStatusEXT</type> <name>subpassMergeStatus</name></member>
+ <member len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]</member>
+ <member><type>uint32_t</type> <name>postMergeIndex</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassSubpassFeedbackCreateInfoEXT" structextends="VkSubpassDescription2">
+ <member values="VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkRenderPassSubpassFeedbackInfoEXT</type>* <name>pSubpassFeedback</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>subpassMergeFeedback</name></member>
+ </type>
+ <type category="struct" name="VkMicromapBuildInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_MICROMAP_BUILD_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkMicromapTypeEXT</type> <name>type</name></member>
+ <member optional="true"><type>VkBuildMicromapFlagsEXT</type> <name>flags</name></member>
+ <member noautovalidity="true"><type>VkBuildMicromapModeEXT</type> <name>mode</name></member>
+ <member optional="true" noautovalidity="true"><type>VkMicromapEXT</type> <name>dstMicromap</name></member>
+ <member optional="true"><type>uint32_t</type> <name>usageCountsCount</name></member>
+ <member len="usageCountsCount" optional="true">const <type>VkMicromapUsageEXT</type>* <name>pUsageCounts</name></member>
+ <member len="usageCountsCount,1" optional="true,false">const <type>VkMicromapUsageEXT</type>* const* <name>ppUsageCounts</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>data</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressKHR</type> <name>scratchData</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>triangleArray</name></member>
+ <member><type>VkDeviceSize</type> <name>triangleArrayStride</name></member>
+ </type>
+ <type category="struct" name="VkMicromapCreateInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_MICROMAP_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkMicromapCreateFlagsEXT</type> <name>createFlags</name></member>
+ <member><type>VkBuffer</type> <name>buffer</name></member>
+ <member><type>VkDeviceSize</type> <name>offset</name><comment>Specified in bytes</comment></member>
+ <member><type>VkDeviceSize</type> <name>size</name></member>
+ <member><type>VkMicromapTypeEXT</type> <name>type</name></member>
+ <member optional="true"><type>VkDeviceAddress</type> <name>deviceAddress</name></member>
+ </type>
+ <type category="struct" name="VkMicromapVersionInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_MICROMAP_VERSION_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member len="latexmath:[2 \times \mathtt{VK\_UUID\_SIZE}]" altlen="2*VK_UUID_SIZE">const <type>uint8_t</type>* <name>pVersionData</name></member>
+ </type>
+ <type category="struct" name="VkCopyMicromapInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_COPY_MICROMAP_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkMicromapEXT</type> <name>src</name></member>
+ <member><type>VkMicromapEXT</type> <name>dst</name></member>
+ <member><type>VkCopyMicromapModeEXT</type> <name>mode</name></member>
+ </type>
+ <type category="struct" name="VkCopyMicromapToMemoryInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_COPY_MICROMAP_TO_MEMORY_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkMicromapEXT</type> <name>src</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressKHR</type> <name>dst</name></member>
+ <member><type>VkCopyMicromapModeEXT</type> <name>mode</name></member>
+ </type>
+ <type category="struct" name="VkCopyMemoryToMicromapInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_COPY_MEMORY_TO_MICROMAP_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>src</name></member>
+ <member><type>VkMicromapEXT</type> <name>dst</name></member>
+ <member><type>VkCopyMicromapModeEXT</type> <name>mode</name></member>
+ </type>
+ <type category="struct" name="VkMicromapBuildSizesInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_MICROMAP_BUILD_SIZES_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>micromapSize</name></member>
+ <member><type>VkDeviceSize</type> <name>buildScratchSize</name></member>
+ <member><type>VkBool32</type> <name>discardable</name></member>
+ </type>
+ <type category="struct" name="VkMicromapUsageEXT">
+ <member><type>uint32_t</type> <name>count</name></member>
+ <member><type>uint32_t</type> <name>subdivisionLevel</name></member>
+ <member><type>uint32_t</type> <name>format</name><comment>Interpretation depends on parent type</comment></member>
+ </type>
+ <type category="struct" name="VkMicromapTriangleEXT">
+ <member><type>uint32_t</type> <name>dataOffset</name><comment>Specified in bytes</comment></member>
+ <member><type>uint16_t</type> <name>subdivisionLevel</name></member>
+ <member><type>uint16_t</type> <name>format</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceOpacityMicromapFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>micromap</name></member>
+ <member><type>VkBool32</type> <name>micromapCaptureReplay</name></member>
+ <member><type>VkBool32</type> <name>micromapHostCommands</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceOpacityMicromapPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxOpacity2StateSubdivisionLevel</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxOpacity4StateSubdivisionLevel</name></member>
+ </type>
+ <type category="struct" name="VkAccelerationStructureTrianglesOpacityMicromapEXT" structextends="VkAccelerationStructureGeometryTrianglesDataKHR">
+ <member values="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkIndexType</type> <name>indexType</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>indexBuffer</name></member>
+ <member><type>VkDeviceSize</type> <name>indexStride</name></member>
+ <member><type>uint32_t</type> <name>baseTriangle</name></member>
+ <member optional="true"><type>uint32_t</type> <name>usageCountsCount</name></member>
+ <member len="usageCountsCount" optional="true">const <type>VkMicromapUsageEXT</type>* <name>pUsageCounts</name></member>
+ <member len="usageCountsCount,1" optional="true,false">const <type>VkMicromapUsageEXT</type>* const* <name>ppUsageCounts</name></member>
+ <member optional="true"><type>VkMicromapEXT</type> <name>micromap</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDisplacementMicromapFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>displacementMicromap</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDisplacementMicromapPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxDisplacementMicromapSubdivisionLevel</name></member>
+ </type>
+ <type category="struct" name="VkAccelerationStructureTrianglesDisplacementMicromapNV" structextends="VkAccelerationStructureGeometryTrianglesDataKHR">
+ <member values="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+
+ <member><type>VkFormat</type> <name>displacementBiasAndScaleFormat</name></member>
+ <member><type>VkFormat</type> <name>displacementVectorFormat</name></member>
+
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>displacementBiasAndScaleBuffer</name></member>
+ <member><type>VkDeviceSize</type> <name>displacementBiasAndScaleStride</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>displacementVectorBuffer</name></member>
+ <member><type>VkDeviceSize</type> <name>displacementVectorStride</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>displacedMicromapPrimitiveFlags</name></member>
+ <member><type>VkDeviceSize</type> <name>displacedMicromapPrimitiveFlagsStride</name></member>
+ <member><type>VkIndexType</type> <name>indexType</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstKHR</type> <name>indexBuffer</name></member>
+ <member><type>VkDeviceSize</type> <name>indexStride</name></member>
+
+ <member><type>uint32_t</type> <name>baseTriangle</name></member>
+
+ <member optional="true"><type>uint32_t</type> <name>usageCountsCount</name></member>
+ <member len="usageCountsCount" optional="true">const <type>VkMicromapUsageEXT</type>* <name>pUsageCounts</name></member>
+ <member len="usageCountsCount,1" optional="true,false">const <type>VkMicromapUsageEXT</type>* const* <name>ppUsageCounts</name></member>
+
+ <member optional="true"><type>VkMicromapEXT</type> <name>micromap</name></member>
+ </type>
+ <type category="struct" name="VkPipelinePropertiesIdentifierEXT" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_PROPERTIES_IDENTIFIER_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint8_t</type> <name>pipelineIdentifier</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ </type>
+ <type category="struct" name="VkPhysicalDevicePipelinePropertiesFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>pipelinePropertiesIdentifier</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderEarlyAndLateFragmentTests</name></member>
+ </type>
+ <type category="struct" name="VkExternalMemoryAcquireUnmodifiedEXT" structextends="VkBufferMemoryBarrier,VkBufferMemoryBarrier2,VkImageMemoryBarrier,VkImageMemoryBarrier2">
+ <member values="VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>acquireUnmodifiedMemory</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalObjectCreateInfoEXT" structextends="VkInstanceCreateInfo,VkMemoryAllocateInfo,VkImageCreateInfo,VkImageViewCreateInfo,VkBufferViewCreateInfo,VkSemaphoreCreateInfo,VkEventCreateInfo" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECT_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkExportMetalObjectTypeFlagBitsEXT</type> <name>exportObjectType</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalObjectsInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalDeviceInfoEXT" structextends="VkExportMetalObjectsInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>MTLDevice_id</type> <name>mtlDevice</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalCommandQueueInfoEXT" structextends="VkExportMetalObjectsInfoEXT" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkQueue</type> <name>queue</name></member>
+ <member><type>MTLCommandQueue_id</type> <name>mtlCommandQueue</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalBufferInfoEXT" structextends="VkExportMetalObjectsInfoEXT" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceMemory</type> <name>memory</name></member>
+ <member><type>MTLBuffer_id</type> <name>mtlBuffer</name></member>
+ </type>
+ <type category="struct" name="VkImportMetalBufferInfoEXT" structextends="VkMemoryAllocateInfo" allowduplicate="false">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_METAL_BUFFER_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>MTLBuffer_id</type> <name>mtlBuffer</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalTextureInfoEXT" structextends="VkExportMetalObjectsInfoEXT" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkImage</type> <name>image</name></member>
+ <member optional="true"><type>VkImageView</type> <name>imageView</name></member>
+ <member optional="true"><type>VkBufferView</type> <name>bufferView</name></member>
+ <member><type>VkImageAspectFlagBits</type> <name>plane</name></member>
+ <member><type>MTLTexture_id</type> <name>mtlTexture</name></member>
+ </type>
+ <type category="struct" name="VkImportMetalTextureInfoEXT" structextends="VkImageCreateInfo" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_METAL_TEXTURE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImageAspectFlagBits</type> <name>plane</name></member>
+ <member><type>MTLTexture_id</type> <name>mtlTexture</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalIOSurfaceInfoEXT" structextends="VkExportMetalObjectsInfoEXT" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkImage</type> <name>image</name></member>
+ <member><type>IOSurfaceRef</type> <name>ioSurface</name></member>
+ </type>
+ <type category="struct" name="VkImportMetalIOSurfaceInfoEXT" structextends="VkImageCreateInfo" allowduplicate="false">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_METAL_IO_SURFACE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>IOSurfaceRef</type> <name>ioSurface</name></member>
+ </type>
+ <type category="struct" name="VkExportMetalSharedEventInfoEXT" structextends="VkExportMetalObjectsInfoEXT" allowduplicate="true">
+ <member values="VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkSemaphore</type> <name>semaphore</name></member>
+ <member optional="true"><type>VkEvent</type> <name>event</name></member>
+ <member><type>MTLSharedEvent_id</type> <name>mtlSharedEvent</name></member>
+ </type>
+ <type category="struct" name="VkImportMetalSharedEventInfoEXT" structextends="VkSemaphoreCreateInfo,VkEventCreateInfo" allowduplicate="false">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>MTLSharedEvent_id</type> <name>mtlSharedEvent</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>nonSeamlessCubeMap</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDevicePipelineRobustnessFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>pipelineRobustness</name></member>
+ </type>
+ <type category="struct" name="VkPipelineRobustnessCreateInfoEXT" structextends="VkGraphicsPipelineCreateInfo,VkComputePipelineCreateInfo,VkPipelineShaderStageCreateInfo,VkRayTracingPipelineCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkPipelineRobustnessBufferBehaviorEXT</type> <name>storageBuffers</name></member>
+ <member><type>VkPipelineRobustnessBufferBehaviorEXT</type> <name>uniformBuffers</name></member>
+ <member><type>VkPipelineRobustnessBufferBehaviorEXT</type> <name>vertexInputs</name></member>
+ <member><type>VkPipelineRobustnessImageBehaviorEXT</type> <name>images</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDevicePipelineRobustnessPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="exact"><type>VkPipelineRobustnessBufferBehaviorEXT</type> <name>defaultRobustnessStorageBuffers</name></member>
+ <member limittype="exact"><type>VkPipelineRobustnessBufferBehaviorEXT</type> <name>defaultRobustnessUniformBuffers</name></member>
+ <member limittype="exact"><type>VkPipelineRobustnessBufferBehaviorEXT</type> <name>defaultRobustnessVertexInputs</name></member>
+ <member limittype="exact"><type>VkPipelineRobustnessImageBehaviorEXT</type> <name>defaultRobustnessImages</name></member>
+ </type>
+ <type category="struct" name="VkImageViewSampleWeightCreateInfoQCOM" structextends="VkImageViewCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkOffset2D</type> <name>filterCenter</name></member>
+ <member><type>VkExtent2D</type> <name>filterSize</name></member>
+ <member><type>uint32_t</type> <name>numPhases</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageProcessingFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>textureSampleWeighted</name></member>
+ <member><type>VkBool32</type> <name>textureBoxFilter</name></member>
+ <member><type>VkBool32</type> <name>textureBlockMatch</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageProcessingPropertiesQCOM" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max" optional="true"><type>uint32_t</type> <name>maxWeightFilterPhases</name></member>
+ <member limittype="max" optional="true"><type>VkExtent2D</type> <name>maxWeightFilterDimension</name></member>
+ <member limittype="max" optional="true"><type>VkExtent2D</type> <name>maxBlockMatchRegion</name></member>
+ <member limittype="max" optional="true"><type>VkExtent2D</type> <name>maxBoxFilterBlockSize</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceTilePropertiesFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>tileProperties</name></member>
+ </type>
+ <type category="struct" name="VkTilePropertiesQCOM">
+ <member values="VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkExtent3D</type> <name>tileSize</name></member>
+ <member><type>VkExtent2D</type> <name>apronSize</name></member>
+ <member><type>VkOffset2D</type> <name>origin</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceAmigoProfilingFeaturesSEC" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>amigoProfiling</name></member>
+ </type>
+ <type category="struct" name="VkAmigoProfilingSubmitInfoSEC" structextends="VkSubmitInfo">
+ <member values="VK_STRUCTURE_TYPE_AMIGO_PROFILING_SUBMIT_INFO_SEC"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint64_t</type> <name>firstDrawTimestamp</name></member>
+ <member><type>uint64_t</type> <name>swapBufferTimestamp</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>attachmentFeedbackLoopLayout</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDepthClampZeroOneFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>depthClampZeroOne</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceAddressBindingReportFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ADDRESS_BINDING_REPORT_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>reportAddressBinding</name></member>
+ </type>
+ <type category="struct" name="VkDeviceAddressBindingCallbackDataEXT" structextends="VkDebugUtilsMessengerCallbackDataEXT">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_ADDRESS_BINDING_CALLBACK_DATA_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkDeviceAddressBindingFlagsEXT</type> <name>flags</name></member>
+ <member><type>VkDeviceAddress</type> <name>baseAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name></member>
+ <member><type>VkDeviceAddressBindingTypeEXT</type> <name>bindingType</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceOpticalFlowFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>opticalFlow</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceOpticalFlowPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkOpticalFlowGridSizeFlagsNV</type> <name>supportedOutputGridSizes</name></member>
+ <member limittype="bitmask"><type>VkOpticalFlowGridSizeFlagsNV</type> <name>supportedHintGridSizes</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>hintSupported</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>costSupported</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>bidirectionalFlowSupported</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>globalFlowSupported</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>minWidth</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>minHeight</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>maxWidth</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>maxHeight</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>maxNumRegionsOfInterest</name></member>
+ </type>
+ <type category="struct" name="VkOpticalFlowImageFormatInfoNV" structextends="VkPhysicalDeviceImageFormatInfo2,VkImageCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkOpticalFlowUsageFlagsNV</type> <name>usage</name></member>
+ </type>
+ <type category="struct" name="VkOpticalFlowImageFormatPropertiesNV" returnedonly="true" >
+ <member values="VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkFormat</type> <name>format</name></member>
+ </type>
+ <type category="struct" name="VkOpticalFlowSessionCreateInfoNV">
+ <member values="VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>width</name></member>
+ <member><type>uint32_t</type> <name>height</name></member>
+ <member><type>VkFormat</type> <name>imageFormat</name></member>
+ <member><type>VkFormat</type> <name>flowVectorFormat</name></member>
+ <member optional="true"><type>VkFormat</type> <name>costFormat</name></member>
+ <member><type>VkOpticalFlowGridSizeFlagsNV</type> <name>outputGridSize</name></member>
+ <member optional="true"><type>VkOpticalFlowGridSizeFlagsNV</type> <name>hintGridSize</name></member>
+ <member optional="true"><type>VkOpticalFlowPerformanceLevelNV</type> <name>performanceLevel</name></member>
+ <member optional="true"><type>VkOpticalFlowSessionCreateFlagsNV</type> <name>flags</name></member>
+ </type>
+ <type category="struct" name="VkOpticalFlowSessionCreatePrivateDataInfoNV" structextends="VkOpticalFlowSessionCreateInfoNV"><comment>NV internal use only</comment>
+ <member values="VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>id</name></member>
+ <member><type>uint32_t</type> <name>size</name></member>
+ <member>const <type>void</type>* <name>pPrivateData</name></member>
+ </type>
+ <type category="struct" name="VkOpticalFlowExecuteInfoNV">
+ <member values="VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkOpticalFlowExecuteFlagsNV</type> <name>flags</name></member>
+ <member optional="true"><type>uint32_t</type> <name>regionCount</name></member>
+ <member len="regionCount">const <type>VkRect2D</type>* <name>pRegions</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceFaultFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>deviceFault</name></member>
+ <member><type>VkBool32</type> <name>deviceFaultVendorBinary</name></member>
+ </type>
+ <type category="struct" name="VkDeviceFaultAddressInfoEXT">
+ <member><type>VkDeviceFaultAddressTypeEXT</type> <name>addressType</name></member>
+ <member><type>VkDeviceAddress</type> <name>reportedAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>addressPrecision</name></member>
+ </type>
+ <type category="struct" name="VkDeviceFaultVendorInfoEXT">
+ <member noautovalidity="true" len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]<comment>Free-form description of the fault</comment></member>
+ <member><type>uint64_t</type> <name>vendorFaultCode</name></member>
+ <member><type>uint64_t</type> <name>vendorFaultData</name></member>
+ </type>
+ <type category="struct" name="VkDeviceFaultCountsEXT">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>addressInfoCount</name></member>
+ <member optional="true"><type>uint32_t</type> <name>vendorInfoCount</name></member>
+ <member optional="true"><type>VkDeviceSize</type> <name>vendorBinarySize</name><comment>Specified in bytes</comment></member>
+ </type>
+ <type category="struct" name="VkDeviceFaultInfoEXT" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true" len="null-terminated"><type>char</type> <name>description</name>[<enum>VK_MAX_DESCRIPTION_SIZE</enum>]<comment>Free-form description of the fault</comment></member>
+ <member optional="true"><type>VkDeviceFaultAddressInfoEXT</type>* <name>pAddressInfos</name></member>
+ <member optional="true"><type>VkDeviceFaultVendorInfoEXT</type>* <name>pVendorInfos</name></member>
+ <member optional="true"><type>void</type>* <name>pVendorBinaryData</name></member>
+ </type>
+ <type category="struct" name="VkDeviceFaultVendorBinaryHeaderVersionOneEXT">
+ <comment>The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout.</comment>
+ <member><type>uint32_t</type> <name>headerSize</name></member>
+ <member><type>VkDeviceFaultVendorBinaryHeaderVersionEXT</type> <name>headerVersion</name></member>
+ <member><type>uint32_t</type> <name>vendorID</name></member>
+ <member><type>uint32_t</type> <name>deviceID</name></member>
+ <member><type>uint32_t</type> <name>driverVersion</name></member>
+ <member><type>uint8_t</type> <name>pipelineCacheUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member><type>uint32_t</type> <name>applicationNameOffset</name></member>
+ <member><type>uint32_t</type> <name>applicationVersion</name></member>
+ <member><type>uint32_t</type> <name>engineNameOffset</name></member>
+ <member><type>uint32_t</type> <name>engineVersion</name></member>
+ <member><type>uint32_t</type> <name>apiVersion</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>pipelineLibraryGroupHandles</name></member>
+ </type>
+ <type category="struct" name="VkDepthBiasInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_DEPTH_BIAS_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>float</type> <name>depthBiasConstantFactor</name></member>
+ <member><type>float</type> <name>depthBiasClamp</name></member>
+ <member><type>float</type> <name>depthBiasSlopeFactor</name></member>
+ </type>
+ <type category="struct" name="VkDepthBiasRepresentationInfoEXT" structextends="VkDepthBiasInfoEXT,VkPipelineRasterizationStateCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DEPTH_BIAS_REPRESENTATION_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDepthBiasRepresentationEXT</type> <name>depthBiasRepresentation</name></member>
+ <member><type>VkBool32</type> <name>depthBiasExact</name></member>
+ </type>
+ <type category="struct" name="VkDecompressMemoryRegionNV">
+ <member><type>VkDeviceAddress</type> <name>srcAddress</name></member>
+ <member><type>VkDeviceAddress</type> <name>dstAddress</name></member>
+ <member><type>VkDeviceSize</type> <name>compressedSize</name><comment>Specified in bytes</comment></member>
+ <member><type>VkDeviceSize</type> <name>decompressedSize</name><comment>Specified in bytes</comment></member>
+ <member><type>VkMemoryDecompressionMethodFlagsNV</type> <name>decompressionMethod</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderCoreBuiltinsPropertiesARM" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>uint64_t</type> <name>shaderCoreMask</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>shaderCoreCount</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>shaderWarpsPerCore</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderCoreBuiltins</name></member>
+ </type>
+ <type category="struct" name="VkFrameBoundaryEXT" structextends="VkSubmitInfo,VkSubmitInfo2,VkPresentInfoKHR,VkBindSparseInfo">
+ <member values="VK_STRUCTURE_TYPE_FRAME_BOUNDARY_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkFrameBoundaryFlagsEXT</type> <name>flags</name></member>
+ <member><type>uint64_t</type> <name>frameID</name></member>
+ <member optional="true"><type>uint32_t</type> <name>imageCount</name></member>
+ <member optional="true" len="imageCount">const <type>VkImage</type>* <name>pImages</name></member>
+ <member optional="true"><type>uint32_t</type> <name>bufferCount</name></member>
+ <member optional="true" len="bufferCount">const <type>VkBuffer</type>* <name>pBuffers</name></member>
+ <member optional="true"><type>uint64_t</type> <name>tagName</name></member>
+ <member optional="true"><type>size_t</type> <name>tagSize</name></member>
+ <member optional="true" len="tagSize">const <type>void</type>* <name>pTag</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceFrameBoundaryFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAME_BOUNDARY_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>frameBoundary</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>dynamicRenderingUnusedAttachments</name></member>
+ </type>
+ <type category="struct" name="VkSurfacePresentModeEXT" structextends="VkPhysicalDeviceSurfaceInfo2KHR">
+ <member values="VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkPresentModeKHR</type> <name>presentMode</name></member>
+ </type>
+ <type category="struct" name="VkSurfacePresentScalingCapabilitiesEXT" structextends="VkSurfaceCapabilities2KHR">
+ <member values="VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkPresentScalingFlagsEXT</type> <name>supportedPresentScaling</name></member>
+ <member optional="true"><type>VkPresentGravityFlagsEXT</type> <name>supportedPresentGravityX</name></member>
+ <member optional="true"><type>VkPresentGravityFlagsEXT</type> <name>supportedPresentGravityY</name></member>
+ <member optional="true"><type>VkExtent2D</type> <name>minScaledImageExtent</name><comment>Supported minimum image width and height for the surface when scaling is used</comment></member>
+ <member optional="true"><type>VkExtent2D</type> <name>maxScaledImageExtent</name><comment>Supported maximum image width and height for the surface when scaling is used</comment></member>
+ </type>
+ <type category="struct" name="VkSurfacePresentModeCompatibilityEXT" structextends="VkSurfaceCapabilities2KHR">
+ <member values="VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>presentModeCount</name></member>
+ <member optional="true" len="presentModeCount"><type>VkPresentModeKHR</type>* <name>pPresentModes</name><comment>Output list of present modes compatible with the one specified in VkSurfacePresentModeEXT</comment></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>swapchainMaintenance1</name></member>
+ </type>
+ <type category="struct" name="VkSwapchainPresentFenceInfoEXT" structextends="VkPresentInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_FENCE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>swapchainCount</name><comment>Copy of VkPresentInfoKHR::swapchainCount</comment></member>
+ <member len="swapchainCount">const <type>VkFence</type>* <name>pFences</name><comment>Fence to signal for each swapchain</comment></member>
+ </type>
+ <type category="struct" name="VkSwapchainPresentModesCreateInfoEXT" structextends="VkSwapchainCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODES_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>presentModeCount</name></member><comment>Length of the pPresentModes array</comment>
+ <member len="presentModeCount">const <type>VkPresentModeKHR</type>* <name>pPresentModes</name></member><comment>Presentation modes which will be usable with this swapchain</comment>
+ </type>
+ <type category="struct" name="VkSwapchainPresentModeInfoEXT" structextends="VkPresentInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>swapchainCount</name><comment>Copy of VkPresentInfoKHR::swapchainCount</comment></member>
+ <member len="swapchainCount">const <type>VkPresentModeKHR</type>* <name>pPresentModes</name><comment>Presentation mode for each swapchain</comment></member>
+ </type>
+ <type category="struct" name="VkSwapchainPresentScalingCreateInfoEXT" structextends="VkSwapchainCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_SCALING_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkPresentScalingFlagsEXT</type> <name>scalingBehavior</name></member>
+ <member optional="true"><type>VkPresentGravityFlagsEXT</type> <name>presentGravityX</name></member>
+ <member optional="true"><type>VkPresentGravityFlagsEXT</type> <name>presentGravityY</name></member>
+ </type>
+ <type category="struct" name="VkReleaseSwapchainImagesInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_RELEASE_SWAPCHAIN_IMAGES_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member externsync="true"><type>VkSwapchainKHR</type> <name>swapchain</name><comment>Swapchain for which images are being released</comment></member>
+ <member><type>uint32_t</type> <name>imageIndexCount</name><comment>Number of indices to release</comment></member>
+ <member len="imageIndexCount">const <type>uint32_t</type>* <name>pImageIndices</name><comment>Indices of which presentable images to release</comment></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDepthBiasControlFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_BIAS_CONTROL_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>depthBiasControl</name></member>
+ <member><type>VkBool32</type> <name>leastRepresentableValueForceUnormRepresentation</name></member>
+ <member><type>VkBool32</type> <name>floatRepresentation</name></member>
+ <member><type>VkBool32</type> <name>depthBiasExact</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>rayTracingInvocationReorder</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRayTracingInvocationReorderPropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="noauto"><type>VkRayTracingInvocationReorderModeNV</type> <name>rayTracingInvocationReorderReorderingHint</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>extendedSparseAddressSpace</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExtendedSparseAddressSpacePropertiesNV" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>VkDeviceSize</type> <name>extendedSparseAddressSpaceSize</name><comment>Total address space available for extended sparse allocations (bytes)</comment></member>
+ <member limittype="bitmask"><type>VkImageUsageFlags</type> <name>extendedSparseImageUsageFlags</name><comment>Bitfield of which image usages are supported for extended sparse allocations</comment></member>
+ <member limittype="bitmask"><type>VkBufferUsageFlags</type> <name>extendedSparseBufferUsageFlags</name><comment>Bitfield of which buffer usages are supported for extended sparse allocations</comment></member>
+ </type>
+ <type category="struct" name="VkDirectDriverLoadingInfoLUNARG">
+ <member values="VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_INFO_LUNARG"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDirectDriverLoadingFlagsLUNARG</type> <name>flags</name></member>
+ <member noautovalidity="true"><type>PFN_vkGetInstanceProcAddrLUNARG</type> <name>pfnGetInstanceProcAddr</name></member>
+ </type>
+ <type category="struct" name="VkDirectDriverLoadingListLUNARG" structextends="VkInstanceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDirectDriverLoadingModeLUNARG</type> <name>mode</name></member>
+ <member><type>uint32_t</type> <name>driverCount</name></member>
+ <member len="driverCount">const <type>VkDirectDriverLoadingInfoLUNARG</type>* <name>pDrivers</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_VIEWPORTS_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>multiviewPerViewViewports</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>rayTracingPositionFetch</name></member>
+ </type>
+ <type category="struct" name="VkDeviceImageSubresourceInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member>const <type>VkImageCreateInfo</type>* <name>pCreateInfo</name></member>
+ <member>const <type>VkImageSubresource2KHR</type>* <name>pSubresource</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderCorePropertiesARM" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>pixelRate</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>texelRate</name></member>
+ <member limittype="exact"><type>uint32_t</type> <name>fmaRate</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>multiviewPerViewRenderAreas</name></member>
+ </type>
+ <type category="struct" name="VkMultiviewPerViewRenderAreasRenderPassBeginInfoQCOM" structextends="VkRenderPassBeginInfo,VkRenderingInfo">
+ <member values="VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>perViewRenderAreaCount</name></member>
+ <member len="perViewRenderAreaCount">const <type>VkRect2D</type>* <name>pPerViewRenderAreas</name></member>
+ </type>
+ <type category="struct" name="VkQueryLowLatencySupportNV" structextends="VkSemaphoreCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_QUERY_LOW_LATENCY_SUPPORT_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>void</type>* <name>pQueriedLowLatencyData</name></member>
+ </type>
+ <type category="struct" name="VkMemoryMapInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkMemoryMapFlags</type> <name>flags</name></member>
+ <member externsync="true"><type>VkDeviceMemory</type> <name>memory</name></member>
+ <member><type>VkDeviceSize</type> <name>offset</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name></member>
+ </type>
+ <type category="struct" name="VkMemoryUnmapInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkMemoryUnmapFlagsKHR</type> <name>flags</name></member>
+ <member externsync="true"><type>VkDeviceMemory</type> <name>memory</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderObjectFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderObject</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderObjectPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="noauto"><type>uint8_t</type> <name>shaderBinaryUUID</name>[<enum>VK_UUID_SIZE</enum>]</member>
+ <member limittype="noauto"><type>uint32_t</type> <name>shaderBinaryVersion</name></member>
+ </type>
+ <type category="struct" name="VkShaderCreateInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_SHADER_CREATE_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkShaderCreateFlagsEXT</type> <name>flags</name></member>
+ <member><type>VkShaderStageFlagBits</type> <name>stage</name></member>
+ <member optional="true"><type>VkShaderStageFlags</type> <name>nextStage</name></member>
+ <member><type>VkShaderCodeTypeEXT</type> <name>codeType</name></member>
+ <member><type>size_t</type> <name>codeSize</name></member>
+ <member len="codeSize">const <type>void</type>* <name>pCode</name></member>
+ <member optional="true" len="null-terminated">const <type>char</type>* <name>pName</name></member>
+ <member optional="true"><type>uint32_t</type> <name>setLayoutCount</name></member>
+ <member optional="true" len="setLayoutCount">const <type>VkDescriptorSetLayout</type>* <name>pSetLayouts</name></member>
+ <member optional="true"><type>uint32_t</type> <name>pushConstantRangeCount</name></member>
+ <member optional="true" len="pushConstantRangeCount">const <type>VkPushConstantRange</type>* <name>pPushConstantRanges</name></member>
+ <member optional="true">const <type>VkSpecializationInfo</type>* <name>pSpecializationInfo</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderTileImageFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderTileImageColorReadAccess</name></member>
+ <member><type>VkBool32</type> <name>shaderTileImageDepthReadAccess</name></member>
+ <member><type>VkBool32</type> <name>shaderTileImageStencilReadAccess</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderTileImagePropertiesEXT" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>shaderTileImageCoherentReadAccelerated</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>shaderTileImageReadSampleFromPixelRateInvocation</name></member>
+ <member limittype="noauto"><type>VkBool32</type> <name>shaderTileImageReadFromHelperInvocation</name></member>
+ </type>
+ <type category="struct" name="VkImportScreenBufferInfoQNX" structextends="VkMemoryAllocateInfo">
+ <member values="VK_STRUCTURE_TYPE_IMPORT_SCREEN_BUFFER_INFO_QNX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true">struct <type>_screen_buffer</type>* <name>buffer</name></member>
+ </type>
+ <type category="struct" name="VkScreenBufferPropertiesQNX" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_SCREEN_BUFFER_PROPERTIES_QNX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>allocationSize</name></member>
+ <member><type>uint32_t</type> <name>memoryTypeBits</name></member>
+ </type>
+ <type category="struct" name="VkScreenBufferFormatPropertiesQNX" structextends="VkScreenBufferPropertiesQNX" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_SCREEN_BUFFER_FORMAT_PROPERTIES_QNX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkFormat</type> <name>format</name></member>
+ <member><type>uint64_t</type> <name>externalFormat</name></member>
+ <member><type>uint64_t</type> <name>screenUsage</name></member>
+ <member><type>VkFormatFeatureFlags</type> <name>formatFeatures</name></member>
+ <member><type>VkComponentMapping</type> <name>samplerYcbcrConversionComponents</name></member>
+ <member><type>VkSamplerYcbcrModelConversion</type> <name>suggestedYcbcrModel</name></member>
+ <member><type>VkSamplerYcbcrRange</type> <name>suggestedYcbcrRange</name></member>
+ <member><type>VkChromaLocation</type> <name>suggestedXChromaOffset</name></member>
+ <member><type>VkChromaLocation</type> <name>suggestedYChromaOffset</name></member>
+ </type>
+ <type category="struct" name="VkExternalFormatQNX" structextends="VkImageCreateInfo,VkSamplerYcbcrConversionCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_QNX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint64_t</type> <name>externalFormat</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>screenBufferImport</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCooperativeMatrixFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>cooperativeMatrix</name></member>
+ <member><type>VkBool32</type> <name>cooperativeMatrixRobustBufferAccess</name></member>
+ </type>
+ <type category="struct" name="VkCooperativeMatrixPropertiesKHR" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>MSize</name></member>
+ <member><type>uint32_t</type> <name>NSize</name></member>
+ <member><type>uint32_t</type> <name>KSize</name></member>
+ <member><type>VkComponentTypeKHR</type> <name>AType</name></member>
+ <member><type>VkComponentTypeKHR</type> <name>BType</name></member>
+ <member><type>VkComponentTypeKHR</type> <name>CType</name></member>
+ <member><type>VkComponentTypeKHR</type> <name>ResultType</name></member>
+ <member><type>VkBool32</type> <name>saturatingAccumulation</name></member>
+ <member><type>VkScopeKHR</type> <name>scope</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCooperativeMatrixPropertiesKHR" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="bitmask"><type>VkShaderStageFlags</type> <name>cooperativeMatrixSupportedStages</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderEnqueuePropertiesAMDX" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_PROPERTIES_AMDX"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxExecutionGraphDepth</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxExecutionGraphShaderOutputNodes</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxExecutionGraphShaderPayloadSize</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>maxExecutionGraphShaderPayloadCount</name></member>
+ <member limittype="noauto"><type>uint32_t</type> <name>executionGraphDispatchAddressAlignment</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderEnqueueFeaturesAMDX" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_FEATURES_AMDX"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderEnqueue</name></member>
+ </type>
+ <type category="struct" name="VkExecutionGraphPipelineCreateInfoAMDX">
+ <member values="VK_STRUCTURE_TYPE_EXECUTION_GRAPH_PIPELINE_CREATE_INFO_AMDX"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true" optional="true"><type>VkPipelineCreateFlags</type> <name>flags</name></member>
+ <member optional="true"><type>uint32_t</type> <name>stageCount</name></member>
+ <member optional="true" len="stageCount">const <type>VkPipelineShaderStageCreateInfo</type>* <name>pStages</name></member>
+ <member optional="true">const <type>VkPipelineLibraryCreateInfoKHR</type>* <name>pLibraryInfo</name></member>
+ <member><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member noautovalidity="true" optional="true"><type>VkPipeline</type> <name>basePipelineHandle</name></member>
+ <member><type>int32_t</type> <name>basePipelineIndex</name></member>
+ </type>
+ <type category="struct" name="VkPipelineShaderStageNodeCreateInfoAMDX" structextends="VkPipelineShaderStageCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NODE_CREATE_INFO_AMDX"> <type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true" len="null-terminated">const <type>char</type>* <name>pName</name></member>
+ <member><type>uint32_t</type> <name>index</name></member>
+ </type>
+ <type category="struct" name="VkExecutionGraphPipelineScratchSizeAMDX">
+ <member values="VK_STRUCTURE_TYPE_EXECUTION_GRAPH_PIPELINE_SCRATCH_SIZE_AMDX"><type>VkStructureType</type> <name>sType</name></member>
+ <member noautovalidity="true" optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkDeviceSize</type> <name>size</name></member>
+ </type>
+ <type category="struct" name="VkDispatchGraphInfoAMDX">
+ <member><type>uint32_t</type> <name>nodeIndex</name></member>
+ <member optional="true"><type>uint32_t</type> <name>payloadCount</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstAMDX</type> <name>payloads</name></member>
+ <member><type>uint64_t</type> <name>payloadStride</name></member>
+ </type>
+ <type category="struct" name="VkDispatchGraphCountInfoAMDX">
+ <member optional="true"><type>uint32_t</type> <name>count</name></member>
+ <member noautovalidity="true"><type>VkDeviceOrHostAddressConstAMDX</type> <name>infos</name></member>
+ <member><type>uint64_t</type> <name>stride</name></member>
+ </type>
+ <type category="struct" name="VkBindMemoryStatusKHR" structextends="VkBindBufferMemoryInfo,VkBindImageMemoryInfo">
+ <member values="VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkResult</type>* <name>pResult</name></member>
+ </type>
+ <type category="struct" name="VkBindDescriptorSetsInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkShaderStageFlags</type> <name>stageFlags</name></member>
+ <member optional="true"><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member optional="true"><type>uint32_t</type> <name>firstSet</name></member>
+ <member><type>uint32_t</type> <name>descriptorSetCount</name></member>
+ <member len="descriptorSetCount">const <type>VkDescriptorSet</type>* <name>pDescriptorSets</name></member>
+ <member optional="true"><type>uint32_t</type> <name>dynamicOffsetCount</name></member>
+ <member optional="true,true" len="dynamicOffsetCount">const <type>uint32_t</type>* <name>pDynamicOffsets</name></member>
+ </type>
+ <type category="struct" name="VkPushConstantsInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member><type>VkShaderStageFlags</type> <name>stageFlags</name></member>
+ <member optional="true"><type>uint32_t</type> <name>offset</name></member>
+ <member><type>uint32_t</type> <name>size</name></member>
+ <member len="size">const <type>void</type>* <name>pValues</name></member>
+ </type>
+ <type category="struct" name="VkPushDescriptorSetInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkShaderStageFlags</type> <name>stageFlags</name></member>
+ <member optional="true"><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member optional="true"><type>uint32_t</type> <name>set</name></member>
+ <member><type>uint32_t</type> <name>descriptorWriteCount</name></member>
+ <member len="descriptorWriteCount">const <type>VkWriteDescriptorSet</type>* <name>pDescriptorWrites</name></member>
+ </type>
+ <type category="struct" name="VkPushDescriptorSetWithTemplateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkDescriptorUpdateTemplate</type> <name>descriptorUpdateTemplate</name></member>
+ <member optional="true"><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member optional="true"><type>uint32_t</type> <name>set</name></member>
+ <member>const <type>void</type>* <name>pData</name></member>
+ </type>
+ <type category="struct" name="VkSetDescriptorBufferOffsetsInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkShaderStageFlags</type> <name>stageFlags</name></member>
+ <member optional="true"><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member optional="true"><type>uint32_t</type> <name>firstSet</name></member>
+ <member><type>uint32_t</type> <name>setCount</name></member>
+ <member len="setCount">const <type>uint32_t</type>* <name>pBufferIndices</name></member>
+ <member len="setCount">const <type>VkDeviceSize</type>* <name>pOffsets</name></member>
+ </type>
+ <type category="struct" name="VkBindDescriptorBufferEmbeddedSamplersInfoEXT">
+ <member values="VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkShaderStageFlags</type> <name>stageFlags</name></member>
+ <member optional="true"><type>VkPipelineLayout</type> <name>layout</name></member>
+ <member optional="true"><type>uint32_t</type> <name>set</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCubicClampFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>cubicRangeClamp</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceYcbcrDegammaFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_DEGAMMA_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>ycbcrDegamma</name></member>
+ </type>
+ <type category="struct" name="VkSamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM" structextends="VkSamplerYcbcrConversionCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_YCBCR_DEGAMMA_CREATE_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>enableYDegamma</name></member>
+ <member><type>VkBool32</type> <name>enableCbCrDegamma</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCubicWeightsFeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_WEIGHTS_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>selectableCubicWeights</name></member>
+ </type>
+ <type category="struct" name="VkSamplerCubicWeightsCreateInfoQCOM" structextends="VkSamplerCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_SAMPLER_CUBIC_WEIGHTS_CREATE_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkCubicFilterWeightsQCOM</type> <name>cubicWeights</name></member>
+ </type>
+ <type category="struct" name="VkBlitImageCubicWeightsInfoQCOM" structextends="VkBlitImageInfo2">
+ <member values="VK_STRUCTURE_TYPE_BLIT_IMAGE_CUBIC_WEIGHTS_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkCubicFilterWeightsQCOM</type> <name>cubicWeights</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageProcessing2FeaturesQCOM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_FEATURES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>textureBlockMatch2</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceImageProcessing2PropertiesQCOM" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_PROPERTIES_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max" optional="true"><type>VkExtent2D</type> <name>maxBlockMatchWindow</name></member>
+ </type>
+ <type category="struct" name="VkSamplerBlockMatchWindowCreateInfoQCOM" structextends="VkSamplerCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_SAMPLER_BLOCK_MATCH_WINDOW_CREATE_INFO_QCOM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkExtent2D</type> <name>windowExtent</name></member>
+ <member><type>VkBlockMatchWindowCompareModeQCOM</type> <name>windowCompareMode</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>descriptorPoolOverallocation</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceLayeredDriverPropertiesMSFT" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkLayeredDriverUnderlyingApiMSFT</type> <name>underlyingAPI</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDevicePerStageDescriptorSetFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PER_STAGE_DESCRIPTOR_SET_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>perStageDescriptorSet</name></member>
+ <member><type>VkBool32</type> <name>dynamicPipelineLayout</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalFormatResolveFeaturesANDROID" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>externalFormatResolve</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceExternalFormatResolvePropertiesANDROID" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_PROPERTIES_ANDROID"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="not"><type>VkBool32</type> <name>nullColorAttachmentWithExternalFormatResolve</name></member>
+ <member limittype="noauto"><type>VkChromaLocation</type> <name>externalFormatResolveChromaOffsetX</name></member>
+ <member limittype="noauto"><type>VkChromaLocation</type> <name>externalFormatResolveChromaOffsetY</name></member>
+ </type>
+ <type category="struct" name="VkAndroidHardwareBufferFormatResolvePropertiesANDROID" structextends="VkAndroidHardwareBufferPropertiesANDROID" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_RESOLVE_PROPERTIES_ANDROID"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkFormat</type> <name>colorAttachmentFormat</name></member>
+ </type>
+ <type category="struct" name="VkLatencySleepModeInfoNV">
+ <member values="VK_STRUCTURE_TYPE_LATENCY_SLEEP_MODE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>lowLatencyMode</name></member>
+ <member><type>VkBool32</type> <name>lowLatencyBoost</name></member>
+ <member><type>uint32_t</type> <name>minimumIntervalUs</name></member>
+ </type>
+ <type category="struct" name="VkLatencySleepInfoNV">
+ <member values="VK_STRUCTURE_TYPE_LATENCY_SLEEP_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkSemaphore</type> <name>signalSemaphore</name></member>
+ <member><type>uint64_t</type> <name>value</name></member>
+ </type>
+ <type category="struct" name="VkSetLatencyMarkerInfoNV">
+ <member values="VK_STRUCTURE_TYPE_SET_LATENCY_MARKER_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint64_t</type> <name>presentID</name></member>
+ <member><type>VkLatencyMarkerNV</type> <name>marker</name></member>
+ </type>
+ <type category="struct" name="VkGetLatencyMarkerInfoNV">
+ <member values="VK_STRUCTURE_TYPE_GET_LATENCY_MARKER_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>timingCount</name></member>
+ <member optional="true" len="timingCount"><type>VkLatencyTimingsFrameReportNV</type>* <name>pTimings</name></member>
+ </type>
+ <type category="struct" name="VkLatencyTimingsFrameReportNV" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_LATENCY_TIMINGS_FRAME_REPORT_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint64_t</type> <name>presentID</name></member>
+ <member><type>uint64_t</type> <name>inputSampleTimeUs</name></member>
+ <member><type>uint64_t</type> <name>simStartTimeUs</name></member>
+ <member><type>uint64_t</type> <name>simEndTimeUs</name></member>
+ <member><type>uint64_t</type> <name>renderSubmitStartTimeUs</name></member>
+ <member><type>uint64_t</type> <name>renderSubmitEndTimeUs</name></member>
+ <member><type>uint64_t</type> <name>presentStartTimeUs</name></member>
+ <member><type>uint64_t</type> <name>presentEndTimeUs</name></member>
+ <member><type>uint64_t</type> <name>driverStartTimeUs</name></member>
+ <member><type>uint64_t</type> <name>driverEndTimeUs</name></member>
+ <member><type>uint64_t</type> <name>osRenderQueueStartTimeUs</name></member>
+ <member><type>uint64_t</type> <name>osRenderQueueEndTimeUs</name></member>
+ <member><type>uint64_t</type> <name>gpuRenderStartTimeUs</name></member>
+ <member><type>uint64_t</type> <name>gpuRenderEndTimeUs</name></member>
+ </type>
+ <type category="struct" name="VkOutOfBandQueueTypeInfoNV">
+ <member values="VK_STRUCTURE_TYPE_OUT_OF_BAND_QUEUE_TYPE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkOutOfBandQueueTypeNV</type> <name>queueType</name></member>
+ </type>
+ <type category="struct" name="VkLatencySubmissionPresentIdNV" structextends="VkSubmitInfo,VkSubmitInfo2">
+ <member values="VK_STRUCTURE_TYPE_LATENCY_SUBMISSION_PRESENT_ID_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint64_t</type> <name>presentID</name></member>
+ </type>
+ <type category="struct" name="VkSwapchainLatencyCreateInfoNV" structextends="VkSwapchainCreateInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_SWAPCHAIN_LATENCY_CREATE_INFO_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>VkBool32</type> <name>latencyModeEnable</name></member>
+ </type>
+ <type category="struct" name="VkLatencySurfaceCapabilitiesNV" structextends="VkSurfaceCapabilities2KHR">
+ <member values="VK_STRUCTURE_TYPE_LATENCY_SURFACE_CAPABILITIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>presentModeCount</name></member>
+ <member optional="true" len="presentModeCount"><type>VkPresentModeKHR</type>* <name>pPresentModes</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCudaKernelLaunchFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>cudaKernelLaunchFeatures</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceCudaKernelLaunchPropertiesNV" structextends="VkPhysicalDeviceProperties2" returnedonly="true">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_PROPERTIES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="max"><type>uint32_t</type> <name>computeCapabilityMinor</name></member>
+ <member limittype="min"><type>uint32_t</type> <name>computeCapabilityMajor</name></member>
+ </type>
+ <type category="struct" name="VkDeviceQueueShaderCoreControlCreateInfoARM" structextends="VkDeviceQueueCreateInfo,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>shaderCoreCount</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceSchedulingControlsFeaturesARM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>schedulingControls</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceSchedulingControlsPropertiesARM" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkPhysicalDeviceSchedulingControlsFlagsARM</type> <name>schedulingControlsFlags</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RELAXED_LINE_RASTERIZATION_FEATURES_IMG"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>relaxedLineRasterization</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRenderPassStripedFeaturesARM" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>renderPassStriped</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceRenderPassStripedPropertiesARM" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_PROPERTIES_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkExtent2D</type> <name>renderPassStripeGranularity</name></member>
+ <member><type>uint32_t</type> <name>maxRenderPassStripes</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassStripeInfoARM">
+ <member values="VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_INFO_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>VkRect2D</type> <name>stripeArea</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassStripeBeginInfoARM" structextends="VkRenderingInfo,VkRenderPassBeginInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_BEGIN_INFO_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>stripeInfoCount</name></member>
+ <member len="stripeInfoCount">const <type>VkRenderPassStripeInfoARM</type>* <name>pStripeInfos</name></member>
+ </type>
+ <type category="struct" name="VkRenderPassStripeSubmitInfoARM" structextends="VkCommandBufferSubmitInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_SUBMIT_INFO_ARM"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member><type>uint32_t</type> <name>stripeSemaphoreInfoCount</name></member>
+ <member len="stripeSemaphoreInfoCount">const <type>VkSemaphoreSubmitInfo</type>* <name>pStripeSemaphoreInfos</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderMaximalReconvergence</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderSubgroupRotate</name></member>
+ <member><type>VkBool32</type> <name>shaderSubgroupRotateClustered</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderExpectAssumeFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderExpectAssume</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderFloatControls2FeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderFloatControls2</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>dynamicRenderingLocalRead</name></member>
+ </type>
+ <type category="struct" name="VkRenderingAttachmentLocationInfoKHR" structextends="VkGraphicsPipelineCreateInfo,VkCommandBufferInheritanceInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member noautovalidity="true" len="colorAttachmentCount">const <type>uint32_t</type>* <name>pColorAttachmentLocations</name></member>
+ </type>
+ <type category="struct" name="VkRenderingInputAttachmentIndexInfoKHR" structextends="VkGraphicsPipelineCreateInfo,VkCommandBufferInheritanceInfo">
+ <member values="VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member optional="true"><type>uint32_t</type> <name>colorAttachmentCount</name></member>
+ <member len="colorAttachmentCount" optional="true">const <type>uint32_t</type>* <name>pColorAttachmentInputIndices</name></member>
+ <member optional="true">const <type>uint32_t</type>* <name>pDepthInputAttachmentIndex</name></member>
+ <member optional="true">const <type>uint32_t</type>* <name>pStencilInputAttachmentIndex</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderQuadControlFeaturesKHR" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true" noautovalidity="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderQuadControl</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>shaderFloat16VectorAtomics</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMapMemoryPlacedFeaturesEXT" structextends="VkPhysicalDeviceFeatures2,VkDeviceCreateInfo">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member><type>VkBool32</type> <name>memoryMapPlaced</name></member>
+ <member><type>VkBool32</type> <name>memoryMapRangePlaced</name></member>
+ <member><type>VkBool32</type> <name>memoryUnmapReserve</name></member>
+ </type>
+ <type category="struct" name="VkPhysicalDeviceMapMemoryPlacedPropertiesEXT" returnedonly="true" structextends="VkPhysicalDeviceProperties2">
+ <member values="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_PROPERTIES_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true"><type>void</type>* <name>pNext</name></member>
+ <member limittype="min,pot"><type>VkDeviceSize</type> <name>minPlacedMemoryMapAlignment</name></member>
+ </type>
+ <type category="struct" name="VkMemoryMapPlacedInfoEXT" structextends="VkMemoryMapInfoKHR">
+ <member values="VK_STRUCTURE_TYPE_MEMORY_MAP_PLACED_INFO_EXT"><type>VkStructureType</type> <name>sType</name></member>
+ <member optional="true">const <type>void</type>* <name>pNext</name></member>
+ <member noautovalidity="true"><type>void</type>* <name>pPlacedAddress</name></member>
+ </type>
</types>
+
+
<comment>Vulkan enumerant (token) definitions</comment>
<enums name="API Constants" comment="Vulkan hardcoded constants - not an enumerated type, part of the header boilerplate">
@@ -6123,6 +9120,7 @@ typedef void <name>CAMetalLayer</name>;
<enum type="float" value="1000.0F" name="VK_LOD_CLAMP_NONE"/>
<enum type="uint32_t" value="(~0U)" name="VK_REMAINING_MIP_LEVELS"/>
<enum type="uint32_t" value="(~0U)" name="VK_REMAINING_ARRAY_LAYERS"/>
+ <enum type="uint32_t" value="(~0U)" name="VK_REMAINING_3D_SLICES_EXT"/>
<enum type="uint64_t" value="(~0ULL)" name="VK_WHOLE_SIZE"/>
<enum type="uint32_t" value="(~0U)" name="VK_ATTACHMENT_UNUSED"/>
<enum type="uint32_t" value="1" name="VK_TRUE"/>
@@ -6140,7 +9138,11 @@ typedef void <name>CAMetalLayer</name>;
<enum name="VK_MAX_DRIVER_INFO_SIZE_KHR" alias="VK_MAX_DRIVER_INFO_SIZE"/>
<enum type="uint32_t" value="(~0U)" name="VK_SHADER_UNUSED_KHR"/>
<enum name="VK_SHADER_UNUSED_NV" alias="VK_SHADER_UNUSED_KHR"/>
- <enum type="uint32_t" value="16" name="VK_MAX_GLOBAL_PRIORITY_SIZE_EXT"/>
+ <enum type="uint32_t" value="16" name="VK_MAX_GLOBAL_PRIORITY_SIZE_KHR"/>
+ <enum name="VK_MAX_GLOBAL_PRIORITY_SIZE_EXT" alias="VK_MAX_GLOBAL_PRIORITY_SIZE_KHR"/>
+ <enum type="uint32_t" value="32" name="VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT"/>
+ <enum type="uint32_t" value="7" name="VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR"/>
+ <enum type="uint32_t" value="(~0U)" name="VK_SHADER_INDEX_UNUSED_AMDX"/>
</enums>
<comment>
@@ -6627,7 +9629,7 @@ typedef void <name>CAMetalLayer</name>;
<comment>Error codes (negative values)</comment>
<enum value="-1" name="VK_ERROR_OUT_OF_HOST_MEMORY" comment="A host memory allocation has failed"/>
<enum value="-2" name="VK_ERROR_OUT_OF_DEVICE_MEMORY" comment="A device memory allocation has failed"/>
- <enum value="-3" name="VK_ERROR_INITIALIZATION_FAILED" comment="Initialization of a object has failed"/>
+ <enum value="-3" name="VK_ERROR_INITIALIZATION_FAILED" comment="Initialization of an object has failed"/>
<enum value="-4" name="VK_ERROR_DEVICE_LOST" comment="The logical device has been lost. See &lt;&lt;devsandqueues-lost-device&gt;&gt;"/>
<enum value="-5" name="VK_ERROR_MEMORY_MAP_FAILED" comment="Mapping of a memory object has failed"/>
<enum value="-6" name="VK_ERROR_LAYER_NOT_PRESENT" comment="Layer specified does not exist"/>
@@ -6682,6 +9684,14 @@ typedef void <name>CAMetalLayer</name>;
<enum value="24" name="VK_OBJECT_TYPE_FRAMEBUFFER"/>
<enum value="25" name="VK_OBJECT_TYPE_COMMAND_POOL"/>
</enums>
+ <enums name="VkRayTracingInvocationReorderModeNV" type="enum">
+ <enum value="0" name="VK_RAY_TRACING_INVOCATION_REORDER_MODE_NONE_NV"/>
+ <enum value="1" name="VK_RAY_TRACING_INVOCATION_REORDER_MODE_REORDER_NV"/>
+ </enums>
+ <enums name="VkDirectDriverLoadingModeLUNARG" type="enum">
+ <enum value="0" name="VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG"/>
+ <enum value="1" name="VK_DIRECT_DRIVER_LOADING_MODE_INCLUSIVE_LUNARG"/>
+ </enums>
<comment>Flags</comment>
<enums name="VkQueueFlagBits" type="bitmask">
@@ -6740,6 +9750,17 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="7" name="VK_BUFFER_USAGE_VERTEX_BUFFER_BIT" comment="Can be used as source of fixed-function vertex fetch (VBO)"/>
<enum bitpos="8" name="VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT" comment="Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)"/>
</enums>
+ <enums name="VkBufferUsageFlagBits2KHR" type="bitmask" bitwidth="64">
+ <enum bitpos="0" name="VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT_KHR"/>
+ <enum bitpos="1" name="VK_BUFFER_USAGE_2_TRANSFER_DST_BIT_KHR"/>
+ <enum bitpos="2" name="VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR"/>
+ <enum bitpos="3" name="VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR"/>
+ <enum bitpos="4" name="VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR"/>
+ <enum bitpos="5" name="VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR"/>
+ <enum bitpos="6" name="VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT_KHR"/>
+ <enum bitpos="7" name="VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT_KHR"/>
+ <enum bitpos="8" name="VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR"/>
+ </enums>
<enums name="VkBufferCreateFlagBits" type="bitmask">
<enum bitpos="0" name="VK_BUFFER_CREATE_SPARSE_BINDING_BIT" comment="Buffer should support sparse backing"/>
<enum bitpos="1" name="VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT" comment="Buffer should support sparse backing with partial residency"/>
@@ -6776,11 +9797,16 @@ typedef void <name>CAMetalLayer</name>;
</enums>
<enums name="VkSamplerCreateFlagBits" type="bitmask">
</enums>
- <enums name="VkPipelineCreateFlagBits" type="bitmask" comment="Note that the gap at bitpos 10 is unused, and can be reserved">
+ <enums name="VkPipelineCreateFlagBits" type="bitmask">
<enum bitpos="0" name="VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT"/>
<enum bitpos="1" name="VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT"/>
<enum bitpos="2" name="VK_PIPELINE_CREATE_DERIVATIVE_BIT"/>
</enums>
+ <enums name="VkPipelineCreateFlagBits2KHR" type="bitmask" bitwidth="64">
+ <enum bitpos="0" name="VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR"/>
+ <enum bitpos="1" name="VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR"/>
+ <enum bitpos="2" name="VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR"/>
+ </enums>
<enums name="VkPipelineShaderStageCreateFlagBits" type="bitmask">
</enums>
<enums name="VkColorComponentFlagBits" type="bitmask">
@@ -6836,6 +9862,8 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="9" name="VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT" comment="Optional"/>
<enum bitpos="10" name="VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT" comment="Optional"/>
</enums>
+ <enums name="VkMemoryMapFlagBits" type="bitmask">
+ </enums>
<enums name="VkImageAspectFlagBits" type="bitmask">
<enum bitpos="0" name="VK_IMAGE_ASPECT_COLOR_BIT"/>
<enum bitpos="1" name="VK_IMAGE_ASPECT_DEPTH_BIT"/>
@@ -6895,7 +9923,7 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="0" name="VK_STENCIL_FACE_FRONT_BIT" comment="Front face"/>
<enum bitpos="1" name="VK_STENCIL_FACE_BACK_BIT" comment="Back face"/>
<enum value="0x00000003" name="VK_STENCIL_FACE_FRONT_AND_BACK" comment="Front and back faces"/>
- <enum name="VK_STENCIL_FRONT_AND_BACK" alias="VK_STENCIL_FACE_FRONT_AND_BACK" comment="Alias for backwards compatibility"/>
+ <enum api="vulkan" name="VK_STENCIL_FRONT_AND_BACK" alias="VK_STENCIL_FACE_FRONT_AND_BACK" deprecated="aliased"/>
</enums>
<enums name="VkDescriptorPoolCreateFlagBits" type="bitmask">
<enum bitpos="0" name="VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT" comment="Descriptor sets may be freed individually"/>
@@ -6920,7 +9948,7 @@ typedef void <name>CAMetalLayer</name>;
</enums>
<enums name="VkColorSpaceKHR" type="enum">
<enum value="0" name="VK_COLOR_SPACE_SRGB_NONLINEAR_KHR"/>
- <enum name="VK_COLORSPACE_SRGB_NONLINEAR_KHR" alias="VK_COLOR_SPACE_SRGB_NONLINEAR_KHR" comment="Backwards-compatible alias containing a typo"/>
+ <enum api="vulkan" name="VK_COLORSPACE_SRGB_NONLINEAR_KHR" alias="VK_COLOR_SPACE_SRGB_NONLINEAR_KHR" deprecated="aliased"/>
</enums>
<enums name="VkDisplayPlaneAlphaFlagBitsKHR" type="bitmask">
<enum bitpos="0" name="VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR"/>
@@ -6948,11 +9976,11 @@ typedef void <name>CAMetalLayer</name>;
<enums name="VkSwapchainImageUsageFlagBitsANDROID" type="bitmask">
<enum bitpos="0" name="VK_SWAPCHAIN_IMAGE_USAGE_SHARED_BIT_ANDROID"/>
</enums>
- <enums name="VkTimeDomainEXT" type="enum">
- <enum value="0" name="VK_TIME_DOMAIN_DEVICE_EXT"/>
- <enum value="1" name="VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT"/>
- <enum value="2" name="VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT"/>
- <enum value="3" name="VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT"/>
+ <enums name="VkTimeDomainKHR" type="enum">
+ <enum value="0" name="VK_TIME_DOMAIN_DEVICE_KHR"/>
+ <enum value="1" name="VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR"/>
+ <enum value="2" name="VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_KHR"/>
+ <enum value="3" name="VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_KHR"/>
</enums>
<enums name="VkDebugReportFlagBitsEXT" type="bitmask">
<enum bitpos="0" name="VK_DEBUG_REPORT_INFORMATION_BIT_EXT"/>
@@ -6991,13 +10019,15 @@ typedef void <name>CAMetalLayer</name>;
<enum value="26" name="VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT"/>
<enum value="27" name="VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT"/>
<enum value="28" name="VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT"/>
- <enum name="VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT" comment="Backwards-compatible alias containing a typo"/>
+ <enum name="VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT" deprecated="aliased"/>
<enum value="29" name="VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT"/>
<enum value="30" name="VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT"/>
- <!--<enum value="31" name="VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT" comment="Removed NVX_device_generated_commands"/>-->
- <!--<enum value="32" name="VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT" comment="Removed NVX_device_generated_commands"/>-->
+ <comment>NVX_device_generated_commands formerly used these enum values, but that extension has been removed
+ value 31 / name VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT
+ value 32 / name VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT
+ </comment>
<enum value="33" name="VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT"/>
- <enum name="VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT" comment="Backwards-compatible alias containing a typo"/>
+ <enum name="VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT" deprecated="aliased"/>
</enums>
<enums name="VkDeviceMemoryReportEventTypeEXT" type="enum">
<enum value="0" name="VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT"/>
@@ -7042,6 +10072,16 @@ typedef void <name>CAMetalLayer</name>;
<enum value="6" name="VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT"/>
<enum value="7" name="VK_VALIDATION_FEATURE_DISABLE_SHADER_VALIDATION_CACHE_EXT"/>
</enums>
+ <enums name="VkLayerSettingTypeEXT" type="enum">
+ <enum value="0" name="VK_LAYER_SETTING_TYPE_BOOL32_EXT"/>
+ <enum value="1" name="VK_LAYER_SETTING_TYPE_INT32_EXT"/>
+ <enum value="2" name="VK_LAYER_SETTING_TYPE_INT64_EXT"/>
+ <enum value="3" name="VK_LAYER_SETTING_TYPE_UINT32_EXT"/>
+ <enum value="4" name="VK_LAYER_SETTING_TYPE_UINT64_EXT"/>
+ <enum value="5" name="VK_LAYER_SETTING_TYPE_FLOAT32_EXT"/>
+ <enum value="6" name="VK_LAYER_SETTING_TYPE_FLOAT64_EXT"/>
+ <enum value="7" name="VK_LAYER_SETTING_TYPE_STRING_EXT"/>
+ </enums>
<enums name="VkSubgroupFeatureFlagBits" type="bitmask">
<enum bitpos="0" name="VK_SUBGROUP_FEATURE_BASIC_BIT" comment="Basic subgroup operations"/>
<enum bitpos="1" name="VK_SUBGROUP_FEATURE_VOTE_BIT" comment="Vote subgroup operations"/>
@@ -7070,7 +10110,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="6" name="VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV"/>
<enum value="7" name="VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV"/>
</enums>
- <enums name="VkPrivateDataSlotCreateFlagBitsEXT" type="bitmask">
+ <enums name="VkPrivateDataSlotCreateFlagBits" type="bitmask">
</enums>
<enums name="VkDescriptorSetLayoutCreateFlagBits" type="bitmask">
</enums>
@@ -7118,7 +10158,7 @@ typedef void <name>CAMetalLayer</name>;
</enums>
<enums name="VkSurfaceCounterFlagBitsEXT" type="bitmask">
<enum bitpos="0" name="VK_SURFACE_COUNTER_VBLANK_BIT_EXT"/>
- <enum name="VK_SURFACE_COUNTER_VBLANK_EXT" alias="VK_SURFACE_COUNTER_VBLANK_BIT_EXT" comment="Backwards-compatible alias containing a typo"/>
+ <enum name="VK_SURFACE_COUNTER_VBLANK_EXT" alias="VK_SURFACE_COUNTER_VBLANK_BIT_EXT" deprecated="aliased"/>
</enums>
<enums name="VkDisplayPowerStateEXT" type="enum">
<enum value="0" name="VK_DISPLAY_POWER_STATE_OFF_EXT"/>
@@ -7215,11 +10255,15 @@ typedef void <name>CAMetalLayer</name>;
<enum value="1" name="VK_SHADER_INFO_TYPE_BINARY_AMD"/>
<enum value="2" name="VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD"/>
</enums>
- <enums name="VkQueueGlobalPriorityEXT" type="enum">
- <enum value="128" name="VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT"/>
- <enum value="256" name="VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT"/>
- <enum value="512" name="VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT"/>
- <enum value="1024" name="VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT"/>
+ <enums name="VkQueueGlobalPriorityKHR" type="enum">
+ <enum value="128" name="VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR"/>
+ <enum value="256" name="VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR"/>
+ <enum value="512" name="VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR"/>
+ <enum value="1024" name="VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR"/>
+ <enum name="VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT" alias="VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR"/>
+ <enum name="VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT" alias="VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR"/>
+ <enum name="VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT" alias="VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR"/>
+ <enum name="VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT" alias="VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR"/>
</enums>
<enums name="VkDebugUtilsMessageSeverityFlagBitsEXT" type="bitmask">
<enum bitpos="0" name="VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT"/>
@@ -7253,7 +10297,8 @@ typedef void <name>CAMetalLayer</name>;
<enum value="0x10004" name="VK_VENDOR_ID_CODEPLAY" comment="Codeplay Software Ltd. vendor ID"/>
<enum value="0x10005" name="VK_VENDOR_ID_MESA" comment="Mesa vendor ID"/>
<enum value="0x10006" name="VK_VENDOR_ID_POCL" comment="PoCL vendor ID"/>
- <unused start="0x10007" comment="This is the next unused available Khronos vendor ID"/>
+ <enum value="0x10007" name="VK_VENDOR_ID_MOBILEYE" comment="Mobileye vendor ID"/>
+ <unused start="0x10008" comment="This is the next unused available Khronos vendor ID"/>
</enums>
<enums name="VkDriverId" type="enum">
<comment>Driver IDs are now represented as enums instead of the old
@@ -7276,6 +10321,15 @@ typedef void <name>CAMetalLayer</name>;
<enum value="15" name="VK_DRIVER_ID_COREAVI_PROPRIETARY" comment="Core Avionics &amp; Industrial Inc."/>
<enum value="16" name="VK_DRIVER_ID_JUICE_PROPRIETARY" comment="Juice Technologies, Inc."/>
<enum value="17" name="VK_DRIVER_ID_VERISILICON_PROPRIETARY" comment="Verisilicon, Inc."/>
+ <enum value="18" name="VK_DRIVER_ID_MESA_TURNIP" comment="Mesa open source project"/>
+ <enum value="19" name="VK_DRIVER_ID_MESA_V3DV" comment="Mesa open source project"/>
+ <enum value="20" name="VK_DRIVER_ID_MESA_PANVK" comment="Mesa open source project"/>
+ <enum value="21" name="VK_DRIVER_ID_SAMSUNG_PROPRIETARY" comment="Samsung Electronics Co., Ltd."/>
+ <enum value="22" name="VK_DRIVER_ID_MESA_VENUS" comment="Mesa open source project"/>
+ <enum value="23" name="VK_DRIVER_ID_MESA_DOZEN" comment="Mesa open source project"/>
+ <enum value="24" name="VK_DRIVER_ID_MESA_NVK" comment="Mesa open source project"/>
+ <enum value="25" name="VK_DRIVER_ID_IMAGINATION_OPEN_SOURCE_MESA" comment="Imagination Technologies"/>
+ <enum value="26" name="VK_DRIVER_ID_MESA_AGXV" comment="Mesa open source project"/>
</enums>
<enums name="VkConditionalRenderingFlagBitsEXT" type="bitmask">
<enum bitpos="0" name="VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT"/>
@@ -7380,34 +10434,19 @@ typedef void <name>CAMetalLayer</name>;
</enums>
<enums name="VkFramebufferCreateFlagBits" type="bitmask">
</enums>
- <enums name="VkScopeNV" type="enum">
- <enum value="1" name="VK_SCOPE_DEVICE_NV"/>
- <enum value="2" name="VK_SCOPE_WORKGROUP_NV"/>
- <enum value="3" name="VK_SCOPE_SUBGROUP_NV"/>
- <enum value="5" name="VK_SCOPE_QUEUE_FAMILY_NV"/>
- </enums>
- <enums name="VkComponentTypeNV" type="enum">
- <enum value="0" name="VK_COMPONENT_TYPE_FLOAT16_NV"/>
- <enum value="1" name="VK_COMPONENT_TYPE_FLOAT32_NV"/>
- <enum value="2" name="VK_COMPONENT_TYPE_FLOAT64_NV"/>
- <enum value="3" name="VK_COMPONENT_TYPE_SINT8_NV"/>
- <enum value="4" name="VK_COMPONENT_TYPE_SINT16_NV"/>
- <enum value="5" name="VK_COMPONENT_TYPE_SINT32_NV"/>
- <enum value="6" name="VK_COMPONENT_TYPE_SINT64_NV"/>
- <enum value="7" name="VK_COMPONENT_TYPE_UINT8_NV"/>
- <enum value="8" name="VK_COMPONENT_TYPE_UINT16_NV"/>
- <enum value="9" name="VK_COMPONENT_TYPE_UINT32_NV"/>
- <enum value="10" name="VK_COMPONENT_TYPE_UINT64_NV"/>
- </enums>
<enums name="VkDeviceDiagnosticsConfigFlagBitsNV" type="bitmask">
<enum bitpos="0" name="VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV"/>
<enum bitpos="1" name="VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV"/>
<enum bitpos="2" name="VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV"/>
+ <enum bitpos="3" name="VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_ERROR_REPORTING_BIT_NV"/>
</enums>
- <enums name="VkPipelineCreationFeedbackFlagBitsEXT" type="bitmask">
- <enum bitpos="0" name="VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT"/>
- <enum bitpos="1" name="VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT"/>
- <enum bitpos="2" name="VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT"/>
+ <enums name="VkPipelineCreationFeedbackFlagBits" type="bitmask">
+ <enum bitpos="0" name="VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT"/>
+ <enum name="VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT" alias="VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT"/>
+ <enum bitpos="1" name="VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT"/>
+ <enum name="VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT" alias="VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT"/>
+ <enum bitpos="2" name="VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT"/>
+ <enum name="VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT" alias="VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT"/>
</enums>
<enums name="VkFullScreenExclusiveEXT" type="enum">
<enum value="0" name="VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT"/>
@@ -7419,9 +10458,12 @@ typedef void <name>CAMetalLayer</name>;
<enum value="0" name="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR"/>
<enum value="1" name="VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR"/>
<enum value="2" name="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR"/>
- <enum name="VK_QUERY_SCOPE_COMMAND_BUFFER_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR"/>
- <enum name="VK_QUERY_SCOPE_RENDER_PASS_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR"/>
- <enum name="VK_QUERY_SCOPE_COMMAND_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR"/>
+ <enum name="VK_QUERY_SCOPE_COMMAND_BUFFER_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR" deprecated="aliased"/>
+ <enum name="VK_QUERY_SCOPE_RENDER_PASS_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR" deprecated="aliased"/>
+ <enum name="VK_QUERY_SCOPE_COMMAND_KHR" alias="VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR" deprecated="aliased"/>
+ </enums>
+ <enums name="VkMemoryDecompressionMethodFlagBitsNV" type="bitmask" bitwidth="64">
+ <enum bitpos="0" name="VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_NV"/>
</enums>
<enums name="VkPerformanceCounterUnitKHR" type="enum">
<enum value="0" name="VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR"/>
@@ -7446,14 +10488,16 @@ typedef void <name>CAMetalLayer</name>;
</enums>
<enums name="VkPerformanceCounterDescriptionFlagBitsKHR" type="bitmask">
<enum bitpos="0" name="VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR"/>
- <enum name="VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR" alias="VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR" comment="Backwards-compatible alias containing a typo"/>
+ <enum name="VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR" alias="VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR" deprecated="aliased"/>
<enum bitpos="1" name="VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR"/>
- <enum name="VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR" alias="VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR" comment="Backwards-compatible alias containing a typo"/>
+ <enum name="VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR" alias="VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR" deprecated="aliased"/>
</enums>
<enums name="VkAcquireProfilingLockFlagBitsKHR" type="bitmask">
</enums>
<enums name="VkShaderCorePropertiesFlagBitsAMD" type="bitmask">
</enums>
+ <enums name="VkRefreshObjectFlagBitsKHR" type="bitmask">
+ </enums>
<enums name="VkPerformanceConfigurationTypeINTEL" type="enum">
<enum value="0" name="VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL"/>
</enums>
@@ -7486,22 +10530,52 @@ typedef void <name>CAMetalLayer</name>;
<enum value="2" name="VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR"/>
<enum value="3" name="VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR"/>
</enums>
- <enums name="VkLineRasterizationModeEXT" type="enum">
- <enum value="0" name="VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT"/>
- <enum value="1" name="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT"/>
- <enum value="2" name="VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT"/>
- <enum value="3" name="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT"/>
+ <enums name="VkLineRasterizationModeKHR" type="enum">
+ <enum value="0" name="VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR"/>
+ <enum name="VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT" alias="VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR"/>
+ <enum value="1" name="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR"/>
+ <enum name="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT" alias="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR"/>
+ <enum value="2" name="VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR"/>
+ <enum name="VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT" alias="VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR"/>
+ <enum value="3" name="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR"/>
+ <enum name="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT" alias="VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR"/>
</enums>
<enums name="VkShaderModuleCreateFlagBits" type="bitmask">
</enums>
<enums name="VkPipelineCompilerControlFlagBitsAMD" type="bitmask">
</enums>
- <enums name="VkToolPurposeFlagBitsEXT" type="bitmask">
- <enum bitpos="0" name="VK_TOOL_PURPOSE_VALIDATION_BIT_EXT"/>
- <enum bitpos="1" name="VK_TOOL_PURPOSE_PROFILING_BIT_EXT"/>
- <enum bitpos="2" name="VK_TOOL_PURPOSE_TRACING_BIT_EXT"/>
- <enum bitpos="3" name="VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT"/>
- <enum bitpos="4" name="VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT"/>
+ <enums name="VkFaultLevel" type="enum">
+ <enum value="0" name="VK_FAULT_LEVEL_UNASSIGNED"/>
+ <enum value="1" name="VK_FAULT_LEVEL_CRITICAL"/>
+ <enum value="2" name="VK_FAULT_LEVEL_RECOVERABLE"/>
+ <enum value="3" name="VK_FAULT_LEVEL_WARNING"/>
+ </enums>
+ <enums name="VkFaultType" type="enum">
+ <enum value="0" name="VK_FAULT_TYPE_INVALID"/>
+ <enum value="1" name="VK_FAULT_TYPE_UNASSIGNED"/>
+ <enum value="2" name="VK_FAULT_TYPE_IMPLEMENTATION"/>
+ <enum value="3" name="VK_FAULT_TYPE_SYSTEM"/>
+ <enum value="4" name="VK_FAULT_TYPE_PHYSICAL_DEVICE"/>
+ <enum value="5" name="VK_FAULT_TYPE_COMMAND_BUFFER_FULL"/>
+ <enum value="6" name="VK_FAULT_TYPE_INVALID_API_USAGE"/>
+ </enums>
+ <enums name="VkFaultQueryBehavior" type="enum">
+ <enum value="0" name="VK_FAULT_QUERY_BEHAVIOR_GET_AND_CLEAR_ALL_FAULTS"/>
+ </enums>
+ <enums name="VkToolPurposeFlagBits" type="bitmask">
+ <enum bitpos="0" name="VK_TOOL_PURPOSE_VALIDATION_BIT"/>
+ <enum name="VK_TOOL_PURPOSE_VALIDATION_BIT_EXT" alias="VK_TOOL_PURPOSE_VALIDATION_BIT"/>
+ <enum bitpos="1" name="VK_TOOL_PURPOSE_PROFILING_BIT"/>
+ <enum name="VK_TOOL_PURPOSE_PROFILING_BIT_EXT" alias="VK_TOOL_PURPOSE_PROFILING_BIT"/>
+ <enum bitpos="2" name="VK_TOOL_PURPOSE_TRACING_BIT"/>
+ <enum name="VK_TOOL_PURPOSE_TRACING_BIT_EXT" alias="VK_TOOL_PURPOSE_TRACING_BIT"/>
+ <enum bitpos="3" name="VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT"/>
+ <enum name="VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT" alias="VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT"/>
+ <enum bitpos="4" name="VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT"/>
+ <enum name="VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT" alias="VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT"/>
+ </enums>
+ <enums name="VkPipelineMatchControl" type="enum">
+ <enum value="0" name="VK_PIPELINE_MATCH_CONTROL_APPLICATION_UUID_EXACT_MATCH"/>
</enums>
<enums name="VkFragmentShadingRateCombinerOpKHR" type="enum">
<enum value="0" name="VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR"/>
@@ -7528,81 +10602,190 @@ typedef void <name>CAMetalLayer</name>;
<enum value="0" name="VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV"/>
<enum value="1" name="VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV"/>
</enums>
- <enums name="VkAccessFlagBits2KHR" type="bitmask" bitwidth="64">
- <enum value="0" name="VK_ACCESS_2_NONE_KHR"/>
- <enum bitpos="0" name="VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR"/>
- <enum bitpos="1" name="VK_ACCESS_2_INDEX_READ_BIT_KHR"/>
- <enum bitpos="2" name="VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR"/>
- <enum bitpos="3" name="VK_ACCESS_2_UNIFORM_READ_BIT_KHR"/>
- <enum bitpos="4" name="VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR"/>
- <enum bitpos="5" name="VK_ACCESS_2_SHADER_READ_BIT_KHR"/>
- <enum bitpos="6" name="VK_ACCESS_2_SHADER_WRITE_BIT_KHR"/>
- <enum bitpos="7" name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR"/>
- <enum bitpos="8" name="VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR"/>
- <enum bitpos="9" name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR"/>
- <enum bitpos="10" name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR"/>
- <enum bitpos="11" name="VK_ACCESS_2_TRANSFER_READ_BIT_KHR"/>
- <enum bitpos="12" name="VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR"/>
- <enum bitpos="13" name="VK_ACCESS_2_HOST_READ_BIT_KHR"/>
- <enum bitpos="14" name="VK_ACCESS_2_HOST_WRITE_BIT_KHR"/>
- <enum bitpos="15" name="VK_ACCESS_2_MEMORY_READ_BIT_KHR"/>
- <enum bitpos="16" name="VK_ACCESS_2_MEMORY_WRITE_BIT_KHR"/>
- <!-- bitpos 17-31 are specified by extensions to the original VkAccessFlagBits enum -->
- <enum bitpos="32" name="VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR"/>
- <enum bitpos="33" name="VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR"/>
- <enum bitpos="34" name="VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR"/>
- </enums>
- <enums name="VkPipelineStageFlagBits2KHR" type="bitmask" bitwidth="64">
- <enum value="0" name="VK_PIPELINE_STAGE_2_NONE_KHR"/>
- <enum bitpos="0" name="VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR"/>
- <enum bitpos="1" name="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR"/>
- <enum bitpos="2" name="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR"/>
- <enum bitpos="3" name="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR"/>
- <enum bitpos="4" name="VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR"/>
- <enum bitpos="5" name="VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR"/>
- <enum bitpos="6" name="VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR"/>
- <enum bitpos="7" name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR"/>
- <enum bitpos="8" name="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR"/>
- <enum bitpos="9" name="VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR"/>
- <enum bitpos="10" name="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR"/>
- <enum bitpos="11" name="VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR"/>
- <enum bitpos="12" name="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR"/>
- <enum name="VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR"/>
- <enum bitpos="13" name="VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR"/>
- <enum bitpos="14" name="VK_PIPELINE_STAGE_2_HOST_BIT_KHR"/>
- <enum bitpos="15" name="VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR"/>
- <enum bitpos="16" name="VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR"/>
- <!-- bitpos 17-31 are specified by extensions to the original VkPipelineStageFlagBits enum -->
- <enum bitpos="32" name="VK_PIPELINE_STAGE_2_COPY_BIT_KHR"/>
- <enum bitpos="33" name="VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR"/>
- <enum bitpos="34" name="VK_PIPELINE_STAGE_2_BLIT_BIT_KHR"/>
- <enum bitpos="35" name="VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR"/>
- <enum bitpos="36" name="VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR"/>
- <enum bitpos="37" name="VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR"/>
- <enum bitpos="38" name="VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR"/>
- </enums>
- <enums name="VkSubmitFlagBitsKHR" type="bitmask">
- <enum bitpos="0" name="VK_SUBMIT_PROTECTED_BIT_KHR"/>
+ <enums name="VkSubpassMergeStatusEXT" type="enum">
+ <enum value="0" name="VK_SUBPASS_MERGE_STATUS_MERGED_EXT"/>
+ <enum value="1" name="VK_SUBPASS_MERGE_STATUS_DISALLOWED_EXT"/>
+ <enum value="2" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SIDE_EFFECTS_EXT"/>
+ <enum value="3" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SAMPLES_MISMATCH_EXT"/>
+ <enum value="4" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_VIEWS_MISMATCH_EXT"/>
+ <enum value="5" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_ALIASING_EXT"/>
+ <enum value="6" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPENDENCIES_EXT"/>
+ <enum value="7" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INCOMPATIBLE_INPUT_ATTACHMENT_EXT"/>
+ <enum value="8" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_TOO_MANY_ATTACHMENTS_EXT"/>
+ <enum value="9" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INSUFFICIENT_STORAGE_EXT"/>
+ <enum value="10" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPTH_STENCIL_COUNT_EXT"/>
+ <enum value="11" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_RESOLVE_ATTACHMENT_REUSE_EXT"/>
+ <enum value="12" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SINGLE_SUBPASS_EXT"/>
+ <enum value="13" name="VK_SUBPASS_MERGE_STATUS_NOT_MERGED_UNSPECIFIED_EXT"/>
+ </enums>
+ <enums name="VkAccessFlagBits2" type="bitmask" bitwidth="64">
+ <enum value="0" name="VK_ACCESS_2_NONE"/>
+ <enum name="VK_ACCESS_2_NONE_KHR" alias="VK_ACCESS_2_NONE"/>
+ <enum bitpos="0" name="VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT"/>
+ <enum name="VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR" alias="VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT"/>
+ <enum bitpos="1" name="VK_ACCESS_2_INDEX_READ_BIT"/>
+ <enum name="VK_ACCESS_2_INDEX_READ_BIT_KHR" alias="VK_ACCESS_2_INDEX_READ_BIT"/>
+ <enum bitpos="2" name="VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT"/>
+ <enum name="VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR" alias="VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT"/>
+ <enum bitpos="3" name="VK_ACCESS_2_UNIFORM_READ_BIT"/>
+ <enum name="VK_ACCESS_2_UNIFORM_READ_BIT_KHR" alias="VK_ACCESS_2_UNIFORM_READ_BIT"/>
+ <enum bitpos="4" name="VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT"/>
+ <enum name="VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR" alias="VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT"/>
+ <enum bitpos="5" name="VK_ACCESS_2_SHADER_READ_BIT"/>
+ <enum name="VK_ACCESS_2_SHADER_READ_BIT_KHR" alias="VK_ACCESS_2_SHADER_READ_BIT"/>
+ <enum bitpos="6" name="VK_ACCESS_2_SHADER_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_SHADER_WRITE_BIT_KHR" alias="VK_ACCESS_2_SHADER_WRITE_BIT"/>
+ <enum bitpos="7" name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT"/>
+ <enum name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR" alias="VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT"/>
+ <enum bitpos="8" name="VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR" alias="VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT"/>
+ <enum bitpos="9" name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT"/>
+ <enum name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR" alias="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT"/>
+ <enum bitpos="10" name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR" alias="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT"/>
+ <enum bitpos="11" name="VK_ACCESS_2_TRANSFER_READ_BIT"/>
+ <enum name="VK_ACCESS_2_TRANSFER_READ_BIT_KHR" alias="VK_ACCESS_2_TRANSFER_READ_BIT"/>
+ <enum bitpos="12" name="VK_ACCESS_2_TRANSFER_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR" alias="VK_ACCESS_2_TRANSFER_WRITE_BIT"/>
+ <enum bitpos="13" name="VK_ACCESS_2_HOST_READ_BIT"/>
+ <enum name="VK_ACCESS_2_HOST_READ_BIT_KHR" alias="VK_ACCESS_2_HOST_READ_BIT"/>
+ <enum bitpos="14" name="VK_ACCESS_2_HOST_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_HOST_WRITE_BIT_KHR" alias="VK_ACCESS_2_HOST_WRITE_BIT"/>
+ <enum bitpos="15" name="VK_ACCESS_2_MEMORY_READ_BIT"/>
+ <enum name="VK_ACCESS_2_MEMORY_READ_BIT_KHR" alias="VK_ACCESS_2_MEMORY_READ_BIT"/>
+ <enum bitpos="16" name="VK_ACCESS_2_MEMORY_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_MEMORY_WRITE_BIT_KHR" alias="VK_ACCESS_2_MEMORY_WRITE_BIT"/>
+ <comment>bitpos 17-31 are specified by extensions to the original VkAccessFlagBits enum</comment>
+ <enum bitpos="32" name="VK_ACCESS_2_SHADER_SAMPLED_READ_BIT"/>
+ <enum name="VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR" alias="VK_ACCESS_2_SHADER_SAMPLED_READ_BIT"/>
+ <enum bitpos="33" name="VK_ACCESS_2_SHADER_STORAGE_READ_BIT"/>
+ <enum name="VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR" alias="VK_ACCESS_2_SHADER_STORAGE_READ_BIT"/>
+ <enum bitpos="34" name="VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT"/>
+ <enum name="VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR" alias="VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT"/>
+ </enums>
+ <enums name="VkPipelineStageFlagBits2" type="bitmask" bitwidth="64">
+ <enum value="0" name="VK_PIPELINE_STAGE_2_NONE"/>
+ <enum name="VK_PIPELINE_STAGE_2_NONE_KHR" alias="VK_PIPELINE_STAGE_2_NONE"/>
+ <enum bitpos="0" name="VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR" alias="VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT"/>
+ <enum bitpos="1" name="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR" alias="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT"/>
+ <enum bitpos="2" name="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR" alias="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT"/>
+ <enum bitpos="3" name="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT"/>
+ <enum bitpos="4" name="VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT"/>
+ <enum bitpos="5" name="VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT"/>
+ <enum bitpos="6" name="VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT"/>
+ <enum bitpos="7" name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT"/>
+ <enum bitpos="8" name="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR" alias="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT"/>
+ <enum bitpos="9" name="VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR" alias="VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT"/>
+ <enum bitpos="10" name="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR" alias="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT"/>
+ <enum bitpos="11" name="VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT"/>
+ <enum bitpos="12" name="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_TRANSFER_BIT" alias="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR"/>
+ <enum name="VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR" alias="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT"/>
+ <enum bitpos="13" name="VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR" alias="VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT"/>
+ <enum bitpos="14" name="VK_PIPELINE_STAGE_2_HOST_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_HOST_BIT_KHR" alias="VK_PIPELINE_STAGE_2_HOST_BIT"/>
+ <enum bitpos="15" name="VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR" alias="VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT"/>
+ <enum bitpos="16" name="VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR" alias="VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT"/>
+ <comment>bitpos 17-31 are specified by extensions to the original VkPipelineStageFlagBits enum</comment>
+ <enum bitpos="32" name="VK_PIPELINE_STAGE_2_COPY_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_COPY_BIT_KHR" alias="VK_PIPELINE_STAGE_2_COPY_BIT"/>
+ <enum bitpos="33" name="VK_PIPELINE_STAGE_2_RESOLVE_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR" alias="VK_PIPELINE_STAGE_2_RESOLVE_BIT"/>
+ <enum bitpos="34" name="VK_PIPELINE_STAGE_2_BLIT_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_BLIT_BIT_KHR" alias="VK_PIPELINE_STAGE_2_BLIT_BIT"/>
+ <enum bitpos="35" name="VK_PIPELINE_STAGE_2_CLEAR_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR" alias="VK_PIPELINE_STAGE_2_CLEAR_BIT"/>
+ <enum bitpos="36" name="VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR" alias="VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT"/>
+ <enum bitpos="37" name="VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR" alias="VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT"/>
+ <enum bitpos="38" name="VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT"/>
+ <enum name="VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR" alias="VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT"/>
+ </enums>
+ <enums name="VkSubmitFlagBits" type="bitmask">
+ <enum bitpos="0" name="VK_SUBMIT_PROTECTED_BIT"/>
+ <enum name="VK_SUBMIT_PROTECTED_BIT_KHR" alias="VK_SUBMIT_PROTECTED_BIT"/>
</enums>
<enums name="VkEventCreateFlagBits" type="bitmask">
</enums>
<enums name="VkPipelineLayoutCreateFlagBits" type="bitmask">
</enums>
+ <enums name="VkSciSyncClientTypeNV" type="enum">
+ <enum value="0" name="VK_SCI_SYNC_CLIENT_TYPE_SIGNALER_NV"/>
+ <enum value="1" name="VK_SCI_SYNC_CLIENT_TYPE_WAITER_NV"/>
+ <enum value="2" name="VK_SCI_SYNC_CLIENT_TYPE_SIGNALER_WAITER_NV"/>
+ </enums>
+ <enums name="VkSciSyncPrimitiveTypeNV" type="enum">
+ <enum value="0" name="VK_SCI_SYNC_PRIMITIVE_TYPE_FENCE_NV"/>
+ <enum value="1" name="VK_SCI_SYNC_PRIMITIVE_TYPE_SEMAPHORE_NV"/>
+ </enums>
<enums name="VkProvokingVertexModeEXT" type="enum">
<enum value="0" name="VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT"/>
<enum value="1" name="VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT"/>
</enums>
+ <enums name="VkPipelineCacheValidationVersion" type="enum">
+ <enum value="1" name="VK_PIPELINE_CACHE_VALIDATION_VERSION_SAFETY_CRITICAL_ONE"/>
+ </enums>
<enums name="VkAccelerationStructureMotionInstanceTypeNV" type="enum">
<enum value="0" name="VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_STATIC_NV"/>
<enum value="1" name="VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MATRIX_MOTION_NV"/>
<enum value="2" name="VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_SRT_MOTION_NV"/>
</enums>
+ <enums name="VkPipelineColorBlendStateCreateFlagBits" type="bitmask">
+ </enums>
+ <enums name="VkPipelineDepthStencilStateCreateFlagBits" type="bitmask">
+ </enums>
+ <enums name="VkGraphicsPipelineLibraryFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT"/>
+ <enum bitpos="1" name="VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT"/>
+ <enum bitpos="2" name="VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT"/>
+ <enum bitpos="3" name="VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT"/>
+ </enums>
+ <enums name="VkDeviceAddressBindingFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_DEVICE_ADDRESS_BINDING_INTERNAL_OBJECT_BIT_EXT"/>
+ </enums>
+ <enums name="VkDeviceAddressBindingTypeEXT" type="enum">
+ <enum value="0" name="VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT"/>
+ <enum value="1" name="VK_DEVICE_ADDRESS_BINDING_TYPE_UNBIND_EXT"/>
+ </enums>
+ <enums name="VkFrameBoundaryFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_FRAME_BOUNDARY_FRAME_END_BIT_EXT"/>
+ </enums>
+ <enums name="VkPresentScalingFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_PRESENT_SCALING_ONE_TO_ONE_BIT_EXT"/>
+ <enum bitpos="1" name="VK_PRESENT_SCALING_ASPECT_RATIO_STRETCH_BIT_EXT"/>
+ <enum bitpos="2" name="VK_PRESENT_SCALING_STRETCH_BIT_EXT"/>
+ </enums>
+ <enums name="VkPresentGravityFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_PRESENT_GRAVITY_MIN_BIT_EXT"/>
+ <enum bitpos="1" name="VK_PRESENT_GRAVITY_MAX_BIT_EXT"/>
+ <enum bitpos="2" name="VK_PRESENT_GRAVITY_CENTERED_BIT_EXT"/>
+ </enums>
+ <enums name="VkPhysicalDeviceSchedulingControlsFlagBitsARM" type="bitmask" bitwidth="64">
+ <enum bitpos="0" name="VK_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_SHADER_CORE_COUNT_ARM"/>
+ </enums>
<enums name="VkVideoCodecOperationFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_CODEC_OPERATION_INVALID_BIT_KHR"/>
+ <enum value="0" name="VK_VIDEO_CODEC_OPERATION_NONE_KHR"/>
</enums>
<enums name="VkVideoChromaSubsamplingFlagBitsKHR" type="bitmask" comment="Vulkan video chroma subsampling definitions">
- <enum value="0" name="VK_VIDEO_CHROMA_SUBSAMPLING_INVALID_BIT_KHR"/>
+ <enum value="0" name="VK_VIDEO_CHROMA_SUBSAMPLING_INVALID_KHR"/>
<enum bitpos="0" name="VK_VIDEO_CHROMA_SUBSAMPLING_MONOCHROME_BIT_KHR"/>
<enum bitpos="1" name="VK_VIDEO_CHROMA_SUBSAMPLING_420_BIT_KHR"/>
<enum bitpos="2" name="VK_VIDEO_CHROMA_SUBSAMPLING_422_BIT_KHR"/>
@@ -7619,22 +10802,14 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="1" name="VK_VIDEO_CAPABILITY_SEPARATE_REFERENCE_IMAGES_BIT_KHR"/>
</enums>
<enums name="VkVideoSessionCreateFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_SESSION_CREATE_DEFAULT_KHR"/>
<enum bitpos="0" name="VK_VIDEO_SESSION_CREATE_PROTECTED_CONTENT_BIT_KHR"/>
</enums>
- <enums name="VkVideoCodingQualityPresetFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_CODING_QUALITY_PRESET_DEFAULT_BIT_KHR"/>
- <enum bitpos="0" name="VK_VIDEO_CODING_QUALITY_PRESET_NORMAL_BIT_KHR"/>
- <enum bitpos="1" name="VK_VIDEO_CODING_QUALITY_PRESET_POWER_BIT_KHR"/>
- <enum bitpos="2" name="VK_VIDEO_CODING_QUALITY_PRESET_QUALITY_BIT_KHR"/>
- </enums>
- <enums name="VkVideoDecodeH264PictureLayoutFlagBitsEXT" type="bitmask">
- <enum value="0" name="VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_PROGRESSIVE_EXT"/>
- <enum bitpos="0" name="VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_INTERLEAVED_LINES_BIT_EXT"/>
- <enum bitpos="1" name="VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_SEPARATE_PLANES_BIT_EXT"/>
+ <enums name="VkVideoDecodeH264PictureLayoutFlagBitsKHR" type="bitmask">
+ <enum value="0" name="VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_PROGRESSIVE_KHR"/>
+ <enum bitpos="0" name="VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_INTERLEAVED_LINES_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_SEPARATE_PLANES_BIT_KHR"/>
</enums>
<enums name="VkVideoCodingControlFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_CODING_CONTROL_DEFAULT_KHR"/>
<enum bitpos="0" name="VK_VIDEO_CODING_CONTROL_RESET_BIT_KHR"/>
</enums>
<enums name="VkQueryResultStatusKHR" type="enum">
@@ -7642,49 +10817,430 @@ typedef void <name>CAMetalLayer</name>;
<enum value="0" name="VK_QUERY_RESULT_STATUS_NOT_READY_KHR"/>
<enum value="1" name="VK_QUERY_RESULT_STATUS_COMPLETE_KHR"/>
</enums>
- <enums name="VkVideoDecodeFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_DECODE_DEFAULT_KHR"/>
- <enum bitpos="0" name="VK_VIDEO_DECODE_RESERVED_0_BIT_KHR"/>
+ <enums name="VkVideoDecodeUsageFlagBitsKHR" type="bitmask">
+ <enum value="0" name="VK_VIDEO_DECODE_USAGE_DEFAULT_KHR"/>
+ <enum bitpos="0" name="VK_VIDEO_DECODE_USAGE_TRANSCODING_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_DECODE_USAGE_OFFLINE_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_DECODE_USAGE_STREAMING_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoDecodeCapabilityFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_DECODE_CAPABILITY_DPB_AND_OUTPUT_COINCIDE_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_DECODE_CAPABILITY_DPB_AND_OUTPUT_DISTINCT_BIT_KHR"/>
</enums>
<enums name="VkVideoEncodeFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_ENCODE_DEFAULT_KHR"/>
- <enum bitpos="0" name="VK_VIDEO_ENCODE_RESERVED_0_BIT_KHR"/>
</enums>
- <enums name="VkVideoEncodeRateControlFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_ENCODE_RATE_CONTROL_DEFAULT_KHR"/>
- <enum bitpos="0" name="VK_VIDEO_ENCODE_RATE_CONTROL_RESET_BIT_KHR"/>
+ <enums name="VkVideoEncodeUsageFlagBitsKHR" type="bitmask">
+ <enum value="0" name="VK_VIDEO_ENCODE_USAGE_DEFAULT_KHR"/>
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_USAGE_TRANSCODING_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_USAGE_STREAMING_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_USAGE_RECORDING_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_USAGE_CONFERENCING_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeContentFlagBitsKHR" type="bitmask">
+ <enum value="0" name="VK_VIDEO_ENCODE_CONTENT_DEFAULT_KHR"/>
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_CONTENT_CAMERA_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_CONTENT_DESKTOP_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_CONTENT_RENDERED_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeTuningModeKHR" type="enum">
+ <enum value="0" name="VK_VIDEO_ENCODE_TUNING_MODE_DEFAULT_KHR"/>
+ <enum value="1" name="VK_VIDEO_ENCODE_TUNING_MODE_HIGH_QUALITY_KHR"/>
+ <enum value="2" name="VK_VIDEO_ENCODE_TUNING_MODE_LOW_LATENCY_KHR"/>
+ <enum value="3" name="VK_VIDEO_ENCODE_TUNING_MODE_ULTRA_LOW_LATENCY_KHR"/>
+ <enum value="4" name="VK_VIDEO_ENCODE_TUNING_MODE_LOSSLESS_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeCapabilityFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_CAPABILITY_PRECEDING_EXTERNALLY_ENCODED_BYTES_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_CAPABILITY_INSUFFICIENT_BITSTREAM_BUFFER_RANGE_DETECTION_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeFeedbackFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BUFFER_OFFSET_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BYTES_WRITTEN_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_HAS_OVERRIDES_BIT_KHR"/>
</enums>
<enums name="VkVideoEncodeRateControlModeFlagBitsKHR" type="bitmask">
- <enum value="0" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_NONE_BIT_KHR"/>
- <enum value="1" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_CBR_BIT_KHR"/>
- <enum value="2" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_VBR_BIT_KHR"/>
- </enums>
- <enums name="VkVideoEncodeH264CapabilityFlagBitsEXT" type="bitmask">
- <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_CAPABILITY_CABAC_BIT_EXT"/>
- <enum bitpos="1" name="VK_VIDEO_ENCODE_H264_CAPABILITY_CAVLC_BIT_EXT"/>
- <enum bitpos="2" name="VK_VIDEO_ENCODE_H264_CAPABILITY_WEIGHTED_BI_PRED_IMPLICIT_BIT_EXT"/>
- <enum bitpos="3" name="VK_VIDEO_ENCODE_H264_CAPABILITY_TRANSFORM_8X8_BIT_EXT"/>
- <enum bitpos="4" name="VK_VIDEO_ENCODE_H264_CAPABILITY_CHROMA_QP_OFFSET_BIT_EXT"/>
- <enum bitpos="5" name="VK_VIDEO_ENCODE_H264_CAPABILITY_SECOND_CHROMA_QP_OFFSET_BIT_EXT"/>
- <enum bitpos="6" name="VK_VIDEO_ENCODE_H264_CAPABILITY_DEBLOCKING_FILTER_DISABLED_BIT_EXT"/>
- <enum bitpos="7" name="VK_VIDEO_ENCODE_H264_CAPABILITY_DEBLOCKING_FILTER_ENABLED_BIT_EXT"/>
- <enum bitpos="8" name="VK_VIDEO_ENCODE_H264_CAPABILITY_DEBLOCKING_FILTER_PARTIAL_BIT_EXT"/>
- <enum bitpos="9" name="VK_VIDEO_ENCODE_H264_CAPABILITY_MULTIPLE_SLICE_PER_FRAME_BIT_EXT"/>
- <enum bitpos="10" name="VK_VIDEO_ENCODE_H264_CAPABILITY_EVENLY_DISTRIBUTED_SLICE_SIZE_BIT_EXT"/>
- </enums>
- <enums name="VkVideoEncodeH264InputModeFlagBitsEXT" type="bitmask">
- <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_INPUT_MODE_FRAME_BIT_EXT"/>
- <enum bitpos="1" name="VK_VIDEO_ENCODE_H264_INPUT_MODE_SLICE_BIT_EXT"/>
- <enum bitpos="2" name="VK_VIDEO_ENCODE_H264_INPUT_MODE_NON_VCL_BIT_EXT"/>
- </enums>
- <enums name="VkVideoEncodeH264OutputModeFlagBitsEXT" type="bitmask">
- <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_OUTPUT_MODE_FRAME_BIT_EXT"/>
- <enum bitpos="1" name="VK_VIDEO_ENCODE_H264_OUTPUT_MODE_SLICE_BIT_EXT"/>
- <enum bitpos="2" name="VK_VIDEO_ENCODE_H264_OUTPUT_MODE_NON_VCL_BIT_EXT"/>
- </enums>
- <enums name="VkVideoEncodeH264CreateFlagBitsEXT" type="bitmask">
- <enum value="0" name="VK_VIDEO_ENCODE_H264_CREATE_DEFAULT_EXT"/>
- <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_CREATE_RESERVED_0_BIT_EXT"/>
+ <enum value="0" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_DEFAULT_KHR"/>
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_DISABLED_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_CBR_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_RATE_CONTROL_MODE_VBR_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH264CapabilityFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_CAPABILITY_HRD_COMPLIANCE_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H264_CAPABILITY_PREDICTION_WEIGHT_TABLE_GENERATED_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H264_CAPABILITY_ROW_UNALIGNED_SLICE_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H264_CAPABILITY_DIFFERENT_SLICE_TYPE_BIT_KHR"/>
+ <enum bitpos="4" name="VK_VIDEO_ENCODE_H264_CAPABILITY_B_FRAME_IN_L0_LIST_BIT_KHR"/>
+ <enum bitpos="5" name="VK_VIDEO_ENCODE_H264_CAPABILITY_B_FRAME_IN_L1_LIST_BIT_KHR"/>
+ <enum bitpos="6" name="VK_VIDEO_ENCODE_H264_CAPABILITY_PER_PICTURE_TYPE_MIN_MAX_QP_BIT_KHR"/>
+ <enum bitpos="7" name="VK_VIDEO_ENCODE_H264_CAPABILITY_PER_SLICE_CONSTANT_QP_BIT_KHR"/>
+ <enum bitpos="8" name="VK_VIDEO_ENCODE_H264_CAPABILITY_GENERATE_PREFIX_NALU_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH264StdFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_STD_SEPARATE_COLOR_PLANE_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H264_STD_QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H264_STD_SCALING_MATRIX_PRESENT_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H264_STD_CHROMA_QP_INDEX_OFFSET_BIT_KHR"/>
+ <enum bitpos="4" name="VK_VIDEO_ENCODE_H264_STD_SECOND_CHROMA_QP_INDEX_OFFSET_BIT_KHR"/>
+ <enum bitpos="5" name="VK_VIDEO_ENCODE_H264_STD_PIC_INIT_QP_MINUS26_BIT_KHR"/>
+ <enum bitpos="6" name="VK_VIDEO_ENCODE_H264_STD_WEIGHTED_PRED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="7" name="VK_VIDEO_ENCODE_H264_STD_WEIGHTED_BIPRED_IDC_EXPLICIT_BIT_KHR"/>
+ <enum bitpos="8" name="VK_VIDEO_ENCODE_H264_STD_WEIGHTED_BIPRED_IDC_IMPLICIT_BIT_KHR"/>
+ <enum bitpos="9" name="VK_VIDEO_ENCODE_H264_STD_TRANSFORM_8X8_MODE_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="10" name="VK_VIDEO_ENCODE_H264_STD_DIRECT_SPATIAL_MV_PRED_FLAG_UNSET_BIT_KHR"/>
+ <enum bitpos="11" name="VK_VIDEO_ENCODE_H264_STD_ENTROPY_CODING_MODE_FLAG_UNSET_BIT_KHR"/>
+ <enum bitpos="12" name="VK_VIDEO_ENCODE_H264_STD_ENTROPY_CODING_MODE_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="13" name="VK_VIDEO_ENCODE_H264_STD_DIRECT_8X8_INFERENCE_FLAG_UNSET_BIT_KHR"/>
+ <enum bitpos="14" name="VK_VIDEO_ENCODE_H264_STD_CONSTRAINED_INTRA_PRED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="15" name="VK_VIDEO_ENCODE_H264_STD_DEBLOCKING_FILTER_DISABLED_BIT_KHR"/>
+ <enum bitpos="16" name="VK_VIDEO_ENCODE_H264_STD_DEBLOCKING_FILTER_ENABLED_BIT_KHR"/>
+ <enum bitpos="17" name="VK_VIDEO_ENCODE_H264_STD_DEBLOCKING_FILTER_PARTIAL_BIT_KHR"/>
+ <enum bitpos="19" name="VK_VIDEO_ENCODE_H264_STD_SLICE_QP_DELTA_BIT_KHR"/>
+ <enum bitpos="20" name="VK_VIDEO_ENCODE_H264_STD_DIFFERENT_SLICE_QP_DELTA_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH264RateControlFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H264_RATE_CONTROL_ATTEMPT_HRD_COMPLIANCE_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H264_RATE_CONTROL_REGULAR_GOP_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H264_RATE_CONTROL_REFERENCE_PATTERN_FLAT_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H264_RATE_CONTROL_REFERENCE_PATTERN_DYADIC_BIT_KHR"/>
+ <enum bitpos="4" name="VK_VIDEO_ENCODE_H264_RATE_CONTROL_TEMPORAL_LAYER_PATTERN_DYADIC_BIT_KHR"/>
+ </enums>
+ <enums name="VkHostImageCopyFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_HOST_IMAGE_COPY_MEMCPY_EXT"/>
+ </enums>
+ <enums name="VkImageFormatConstraintsFlagBitsFUCHSIA" type="bitmask">
+ </enums>
+ <enums name="VkImageConstraintsInfoFlagBitsFUCHSIA" type="bitmask">
+ <enum bitpos="0" name="VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA"/>
+ <enum bitpos="1" name="VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA"/>
+ <enum bitpos="2" name="VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA"/>
+ <enum bitpos="3" name="VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA"/>
+ <enum bitpos="4" name="VK_IMAGE_CONSTRAINTS_INFO_PROTECTED_OPTIONAL_FUCHSIA"/>
+ </enums>
+ <enums name="VkFormatFeatureFlagBits2" type="bitmask" bitwidth="64">
+ <enum bitpos="0" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT"/>
+ <enum bitpos="1" name="VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR" alias="VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT"/>
+ <enum bitpos="2" name="VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR" alias="VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT"/>
+ <enum bitpos="3" name="VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR" alias="VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT"/>
+ <enum bitpos="4" name="VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR" alias="VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT"/>
+ <enum bitpos="5" name="VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR" alias="VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT"/>
+ <enum bitpos="6" name="VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR" alias="VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT"/>
+ <enum bitpos="7" name="VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR" alias="VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT"/>
+ <enum bitpos="8" name="VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR" alias="VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT"/>
+ <enum bitpos="9" name="VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR" alias="VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT"/>
+ <enum bitpos="10" name="VK_FORMAT_FEATURE_2_BLIT_SRC_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR" alias="VK_FORMAT_FEATURE_2_BLIT_SRC_BIT"/>
+ <enum bitpos="11" name="VK_FORMAT_FEATURE_2_BLIT_DST_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR" alias="VK_FORMAT_FEATURE_2_BLIT_DST_BIT"/>
+ <enum bitpos="12" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT"/>
+ <enum bitpos="13" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT"/>
+ <enum bitpos="14" name="VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR" alias="VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT"/>
+ <enum bitpos="15" name="VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR" alias="VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT"/>
+ <enum bitpos="16" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT"/>
+ <enum bitpos="17" name="VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR" alias="VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT"/>
+ <enum bitpos="18" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT"/>
+ <enum bitpos="19" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT"/>
+ <enum bitpos="20" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT"/>
+ <enum bitpos="21" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT"/>
+ <enum bitpos="22" name="VK_FORMAT_FEATURE_2_DISJOINT_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR" alias="VK_FORMAT_FEATURE_2_DISJOINT_BIT"/>
+ <enum bitpos="23" name="VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR" alias="VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT"/>
+ <enum bitpos="31" name="VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR" alias="VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT"/>
+ <enum bitpos="32" name="VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR" alias="VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT"/>
+ <enum bitpos="33" name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT"/>
+ <enum name="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR" alias="VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT"/>
+ </enums>
+ <enums name="VkRenderingFlagBits" type="bitmask">
+ <enum bitpos="0" name="VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT"/>
+ <enum name="VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR" alias="VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT"/>
+ <enum bitpos="1" name="VK_RENDERING_SUSPENDING_BIT"/>
+ <enum name="VK_RENDERING_SUSPENDING_BIT_KHR" alias="VK_RENDERING_SUSPENDING_BIT"/>
+ <enum bitpos="2" name="VK_RENDERING_RESUMING_BIT"/>
+ <enum name="VK_RENDERING_RESUMING_BIT_KHR" alias="VK_RENDERING_RESUMING_BIT"/>
+ </enums>
+ <enums name="VkVideoEncodeH265CapabilityFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H265_CAPABILITY_HRD_COMPLIANCE_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H265_CAPABILITY_PREDICTION_WEIGHT_TABLE_GENERATED_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H265_CAPABILITY_ROW_UNALIGNED_SLICE_SEGMENT_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H265_CAPABILITY_DIFFERENT_SLICE_SEGMENT_TYPE_BIT_KHR"/>
+ <enum bitpos="4" name="VK_VIDEO_ENCODE_H265_CAPABILITY_B_FRAME_IN_L0_LIST_BIT_KHR"/>
+ <enum bitpos="5" name="VK_VIDEO_ENCODE_H265_CAPABILITY_B_FRAME_IN_L1_LIST_BIT_KHR"/>
+ <enum bitpos="6" name="VK_VIDEO_ENCODE_H265_CAPABILITY_PER_PICTURE_TYPE_MIN_MAX_QP_BIT_KHR"/>
+ <enum bitpos="7" name="VK_VIDEO_ENCODE_H265_CAPABILITY_PER_SLICE_SEGMENT_CONSTANT_QP_BIT_KHR"/>
+ <enum bitpos="8" name="VK_VIDEO_ENCODE_H265_CAPABILITY_MULTIPLE_TILES_PER_SLICE_SEGMENT_BIT_KHR"/>
+ <enum bitpos="9" name="VK_VIDEO_ENCODE_H265_CAPABILITY_MULTIPLE_SLICE_SEGMENTS_PER_TILE_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH265StdFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H265_STD_SEPARATE_COLOR_PLANE_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H265_STD_SAMPLE_ADAPTIVE_OFFSET_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H265_STD_SCALING_LIST_DATA_PRESENT_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H265_STD_PCM_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="4" name="VK_VIDEO_ENCODE_H265_STD_SPS_TEMPORAL_MVP_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="5" name="VK_VIDEO_ENCODE_H265_STD_INIT_QP_MINUS26_BIT_KHR"/>
+ <enum bitpos="6" name="VK_VIDEO_ENCODE_H265_STD_WEIGHTED_PRED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="7" name="VK_VIDEO_ENCODE_H265_STD_WEIGHTED_BIPRED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="8" name="VK_VIDEO_ENCODE_H265_STD_LOG2_PARALLEL_MERGE_LEVEL_MINUS2_BIT_KHR"/>
+ <enum bitpos="9" name="VK_VIDEO_ENCODE_H265_STD_SIGN_DATA_HIDING_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="10" name="VK_VIDEO_ENCODE_H265_STD_TRANSFORM_SKIP_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="11" name="VK_VIDEO_ENCODE_H265_STD_TRANSFORM_SKIP_ENABLED_FLAG_UNSET_BIT_KHR"/>
+ <enum bitpos="12" name="VK_VIDEO_ENCODE_H265_STD_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="13" name="VK_VIDEO_ENCODE_H265_STD_TRANSQUANT_BYPASS_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="14" name="VK_VIDEO_ENCODE_H265_STD_CONSTRAINED_INTRA_PRED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="15" name="VK_VIDEO_ENCODE_H265_STD_ENTROPY_CODING_SYNC_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="16" name="VK_VIDEO_ENCODE_H265_STD_DEBLOCKING_FILTER_OVERRIDE_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="17" name="VK_VIDEO_ENCODE_H265_STD_DEPENDENT_SLICE_SEGMENTS_ENABLED_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="18" name="VK_VIDEO_ENCODE_H265_STD_DEPENDENT_SLICE_SEGMENT_FLAG_SET_BIT_KHR"/>
+ <enum bitpos="19" name="VK_VIDEO_ENCODE_H265_STD_SLICE_QP_DELTA_BIT_KHR"/>
+ <enum bitpos="20" name="VK_VIDEO_ENCODE_H265_STD_DIFFERENT_SLICE_QP_DELTA_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH265RateControlFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H265_RATE_CONTROL_ATTEMPT_HRD_COMPLIANCE_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H265_RATE_CONTROL_REGULAR_GOP_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H265_RATE_CONTROL_REFERENCE_PATTERN_FLAT_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H265_RATE_CONTROL_REFERENCE_PATTERN_DYADIC_BIT_KHR"/>
+ <enum bitpos="4" name="VK_VIDEO_ENCODE_H265_RATE_CONTROL_TEMPORAL_SUB_LAYER_PATTERN_DYADIC_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH265CtbSizeFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H265_CTB_SIZE_16_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H265_CTB_SIZE_32_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H265_CTB_SIZE_64_BIT_KHR"/>
+ </enums>
+ <enums name="VkVideoEncodeH265TransformBlockSizeFlagBitsKHR" type="bitmask">
+ <enum bitpos="0" name="VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_4_BIT_KHR"/>
+ <enum bitpos="1" name="VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_8_BIT_KHR"/>
+ <enum bitpos="2" name="VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_16_BIT_KHR"/>
+ <enum bitpos="3" name="VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_32_BIT_KHR"/>
+ </enums>
+ <enums name="VkExportMetalObjectTypeFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_EXPORT_METAL_OBJECT_TYPE_METAL_DEVICE_BIT_EXT"/>
+ <enum bitpos="1" name="VK_EXPORT_METAL_OBJECT_TYPE_METAL_COMMAND_QUEUE_BIT_EXT"/>
+ <enum bitpos="2" name="VK_EXPORT_METAL_OBJECT_TYPE_METAL_BUFFER_BIT_EXT"/>
+ <enum bitpos="3" name="VK_EXPORT_METAL_OBJECT_TYPE_METAL_TEXTURE_BIT_EXT"/>
+ <enum bitpos="4" name="VK_EXPORT_METAL_OBJECT_TYPE_METAL_IOSURFACE_BIT_EXT"/>
+ <enum bitpos="5" name="VK_EXPORT_METAL_OBJECT_TYPE_METAL_SHARED_EVENT_BIT_EXT"/>
+ </enums>
+ <enums name="VkInstanceCreateFlagBits" type="bitmask">
+ </enums>
+ <enums name="VkImageCompressionFlagBitsEXT" type="bitmask">
+ <enum value="0" name="VK_IMAGE_COMPRESSION_DEFAULT_EXT"/>
+ <enum bitpos="0" name="VK_IMAGE_COMPRESSION_FIXED_RATE_DEFAULT_EXT"/>
+ <enum bitpos="1" name="VK_IMAGE_COMPRESSION_FIXED_RATE_EXPLICIT_EXT"/>
+ <enum bitpos="2" name="VK_IMAGE_COMPRESSION_DISABLED_EXT"/>
+ </enums>
+ <enums name="VkImageCompressionFixedRateFlagBitsEXT" type="bitmask">
+ <enum value="0" name="VK_IMAGE_COMPRESSION_FIXED_RATE_NONE_EXT"/>
+ <enum bitpos="0" name="VK_IMAGE_COMPRESSION_FIXED_RATE_1BPC_BIT_EXT"/>
+ <enum bitpos="1" name="VK_IMAGE_COMPRESSION_FIXED_RATE_2BPC_BIT_EXT"/>
+ <enum bitpos="2" name="VK_IMAGE_COMPRESSION_FIXED_RATE_3BPC_BIT_EXT"/>
+ <enum bitpos="3" name="VK_IMAGE_COMPRESSION_FIXED_RATE_4BPC_BIT_EXT"/>
+ <enum bitpos="4" name="VK_IMAGE_COMPRESSION_FIXED_RATE_5BPC_BIT_EXT"/>
+ <enum bitpos="5" name="VK_IMAGE_COMPRESSION_FIXED_RATE_6BPC_BIT_EXT"/>
+ <enum bitpos="6" name="VK_IMAGE_COMPRESSION_FIXED_RATE_7BPC_BIT_EXT"/>
+ <enum bitpos="7" name="VK_IMAGE_COMPRESSION_FIXED_RATE_8BPC_BIT_EXT"/>
+ <enum bitpos="8" name="VK_IMAGE_COMPRESSION_FIXED_RATE_9BPC_BIT_EXT"/>
+ <enum bitpos="9" name="VK_IMAGE_COMPRESSION_FIXED_RATE_10BPC_BIT_EXT"/>
+ <enum bitpos="10" name="VK_IMAGE_COMPRESSION_FIXED_RATE_11BPC_BIT_EXT"/>
+ <enum bitpos="11" name="VK_IMAGE_COMPRESSION_FIXED_RATE_12BPC_BIT_EXT"/>
+ <enum bitpos="12" name="VK_IMAGE_COMPRESSION_FIXED_RATE_13BPC_BIT_EXT"/>
+ <enum bitpos="13" name="VK_IMAGE_COMPRESSION_FIXED_RATE_14BPC_BIT_EXT"/>
+ <enum bitpos="14" name="VK_IMAGE_COMPRESSION_FIXED_RATE_15BPC_BIT_EXT"/>
+ <enum bitpos="15" name="VK_IMAGE_COMPRESSION_FIXED_RATE_16BPC_BIT_EXT"/>
+ <enum bitpos="16" name="VK_IMAGE_COMPRESSION_FIXED_RATE_17BPC_BIT_EXT"/>
+ <enum bitpos="17" name="VK_IMAGE_COMPRESSION_FIXED_RATE_18BPC_BIT_EXT"/>
+ <enum bitpos="18" name="VK_IMAGE_COMPRESSION_FIXED_RATE_19BPC_BIT_EXT"/>
+ <enum bitpos="19" name="VK_IMAGE_COMPRESSION_FIXED_RATE_20BPC_BIT_EXT"/>
+ <enum bitpos="20" name="VK_IMAGE_COMPRESSION_FIXED_RATE_21BPC_BIT_EXT"/>
+ <enum bitpos="21" name="VK_IMAGE_COMPRESSION_FIXED_RATE_22BPC_BIT_EXT"/>
+ <enum bitpos="22" name="VK_IMAGE_COMPRESSION_FIXED_RATE_23BPC_BIT_EXT"/>
+ <enum bitpos="23" name="VK_IMAGE_COMPRESSION_FIXED_RATE_24BPC_BIT_EXT"/>
+ </enums>
+ <enums name="VkPipelineRobustnessBufferBehaviorEXT" type="enum">
+ <enum value="0" name="VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT" />
+ <enum value="1" name="VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT" />
+ <enum value="2" name="VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT" />
+ <enum value="3" name="VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT" />
+ </enums>
+ <enums name="VkPipelineRobustnessImageBehaviorEXT" type="enum">
+ <enum value="0" name="VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT" />
+ <enum value="1" name="VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT" />
+ <enum value="2" name="VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT" />
+ <enum value="3" name="VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT" />
+ </enums>
+ <enums name="VkOpticalFlowGridSizeFlagBitsNV" type="bitmask">
+ <enum value="0" name="VK_OPTICAL_FLOW_GRID_SIZE_UNKNOWN_NV"/>
+ <enum bitpos="0" name="VK_OPTICAL_FLOW_GRID_SIZE_1X1_BIT_NV"/>
+ <enum bitpos="1" name="VK_OPTICAL_FLOW_GRID_SIZE_2X2_BIT_NV"/>
+ <enum bitpos="2" name="VK_OPTICAL_FLOW_GRID_SIZE_4X4_BIT_NV"/>
+ <enum bitpos="3" name="VK_OPTICAL_FLOW_GRID_SIZE_8X8_BIT_NV"/>
+ </enums>
+ <enums name="VkOpticalFlowUsageFlagBitsNV" type="bitmask">
+ <enum value="0" name="VK_OPTICAL_FLOW_USAGE_UNKNOWN_NV"/>
+ <enum bitpos="0" name="VK_OPTICAL_FLOW_USAGE_INPUT_BIT_NV"/>
+ <enum bitpos="1" name="VK_OPTICAL_FLOW_USAGE_OUTPUT_BIT_NV"/>
+ <enum bitpos="2" name="VK_OPTICAL_FLOW_USAGE_HINT_BIT_NV"/>
+ <enum bitpos="3" name="VK_OPTICAL_FLOW_USAGE_COST_BIT_NV"/>
+ <enum bitpos="4" name="VK_OPTICAL_FLOW_USAGE_GLOBAL_FLOW_BIT_NV"/>
+ </enums>
+ <enums name="VkOpticalFlowPerformanceLevelNV" type="enum">
+ <enum value="0" name="VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_UNKNOWN_NV"/>
+ <enum value="1" name="VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_SLOW_NV"/>
+ <enum value="2" name="VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_MEDIUM_NV"/>
+ <enum value="3" name="VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_FAST_NV"/>
+ </enums>
+ <enums name="VkOpticalFlowSessionBindingPointNV" type="enum">
+ <enum value="0" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_UNKNOWN_NV"/>
+ <enum value="1" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_INPUT_NV"/>
+ <enum value="2" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_REFERENCE_NV"/>
+ <enum value="3" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_HINT_NV"/>
+ <enum value="4" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_FLOW_VECTOR_NV"/>
+ <enum value="5" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_BACKWARD_FLOW_VECTOR_NV"/>
+ <enum value="6" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_COST_NV"/>
+ <enum value="7" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_BACKWARD_COST_NV"/>
+ <enum value="8" name="VK_OPTICAL_FLOW_SESSION_BINDING_POINT_GLOBAL_FLOW_NV"/>
+ </enums>
+ <enums name="VkOpticalFlowSessionCreateFlagBitsNV" type="bitmask">
+ <enum bitpos="0" name="VK_OPTICAL_FLOW_SESSION_CREATE_ENABLE_HINT_BIT_NV"/>
+ <enum bitpos="1" name="VK_OPTICAL_FLOW_SESSION_CREATE_ENABLE_COST_BIT_NV"/>
+ <enum bitpos="2" name="VK_OPTICAL_FLOW_SESSION_CREATE_ENABLE_GLOBAL_FLOW_BIT_NV"/>
+ <enum bitpos="3" name="VK_OPTICAL_FLOW_SESSION_CREATE_ALLOW_REGIONS_BIT_NV"/>
+ <enum bitpos="4" name="VK_OPTICAL_FLOW_SESSION_CREATE_BOTH_DIRECTIONS_BIT_NV"/>
+ </enums>
+ <enums name="VkOpticalFlowExecuteFlagBitsNV" type="bitmask">
+ <enum bitpos="0" name="VK_OPTICAL_FLOW_EXECUTE_DISABLE_TEMPORAL_HINTS_BIT_NV"/>
+ </enums>
+ <enums name="VkMicromapTypeEXT" type="enum">
+ <enum value="0" name="VK_MICROMAP_TYPE_OPACITY_MICROMAP_EXT"/>
+ </enums>
+ <enums name="VkBuildMicromapFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_BUILD_MICROMAP_PREFER_FAST_TRACE_BIT_EXT"/>
+ <enum bitpos="1" name="VK_BUILD_MICROMAP_PREFER_FAST_BUILD_BIT_EXT"/>
+ <enum bitpos="2" name="VK_BUILD_MICROMAP_ALLOW_COMPACTION_BIT_EXT"/>
+ </enums>
+ <enums name="VkMicromapCreateFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_MICROMAP_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT"/>
+ </enums>
+ <enums name="VkCopyMicromapModeEXT" type="enum">
+ <enum value="0" name="VK_COPY_MICROMAP_MODE_CLONE_EXT"/>
+ <enum value="1" name="VK_COPY_MICROMAP_MODE_SERIALIZE_EXT"/>
+ <enum value="2" name="VK_COPY_MICROMAP_MODE_DESERIALIZE_EXT"/>
+ <enum value="3" name="VK_COPY_MICROMAP_MODE_COMPACT_EXT"/>
+ </enums>
+ <enums name="VkBuildMicromapModeEXT" type="enum">
+ <enum value="0" name="VK_BUILD_MICROMAP_MODE_BUILD_EXT"/>
+ </enums>
+ <enums name="VkOpacityMicromapFormatEXT" type="enum">
+ <enum value="1" name="VK_OPACITY_MICROMAP_FORMAT_2_STATE_EXT"/>
+ <enum value="2" name="VK_OPACITY_MICROMAP_FORMAT_4_STATE_EXT"/>
+ </enums>
+ <enums name="VkOpacityMicromapSpecialIndexEXT" type="enum">
+ <enum value="-1" name="VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT"/>
+ <enum value="-2" name="VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT"/>
+ <enum value="-3" name="VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_TRANSPARENT_EXT"/>
+ <enum value="-4" name="VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT"/>
+ </enums>
+ <enums name="VkDepthBiasRepresentationEXT" type="enum">
+ <enum value="0" name="VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORMAT_EXT"/>
+ <enum value="1" name="VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORCE_UNORM_EXT"/>
+ <enum value="2" name="VK_DEPTH_BIAS_REPRESENTATION_FLOAT_EXT"/>
+ </enums>
+ <enums name="VkDeviceFaultAddressTypeEXT" type="enum">
+ <enum value="0" name="VK_DEVICE_FAULT_ADDRESS_TYPE_NONE_EXT" comment="Currently unused"/>
+ <enum value="1" name="VK_DEVICE_FAULT_ADDRESS_TYPE_READ_INVALID_EXT"/>
+ <enum value="2" name="VK_DEVICE_FAULT_ADDRESS_TYPE_WRITE_INVALID_EXT"/>
+ <enum value="3" name="VK_DEVICE_FAULT_ADDRESS_TYPE_EXECUTE_INVALID_EXT"/>
+ <enum value="4" name="VK_DEVICE_FAULT_ADDRESS_TYPE_INSTRUCTION_POINTER_UNKNOWN_EXT"/>
+ <enum value="5" name="VK_DEVICE_FAULT_ADDRESS_TYPE_INSTRUCTION_POINTER_INVALID_EXT"/>
+ <enum value="6" name="VK_DEVICE_FAULT_ADDRESS_TYPE_INSTRUCTION_POINTER_FAULT_EXT"/>
+ </enums>
+ <enums name="VkDeviceFaultVendorBinaryHeaderVersionEXT" type="enum">
+ <enum value="1" name="VK_DEVICE_FAULT_VENDOR_BINARY_HEADER_VERSION_ONE_EXT"/>
+ </enums>
+ <enums name="VkDisplacementMicromapFormatNV" type="enum">
+ <enum value="1" name="VK_DISPLACEMENT_MICROMAP_FORMAT_64_TRIANGLES_64_BYTES_NV"/>
+ <enum value="2" name="VK_DISPLACEMENT_MICROMAP_FORMAT_256_TRIANGLES_128_BYTES_NV"/>
+ <enum value="3" name="VK_DISPLACEMENT_MICROMAP_FORMAT_1024_TRIANGLES_128_BYTES_NV"/>
+ </enums>
+ <enums name="VkShaderCreateFlagBitsEXT" type="bitmask">
+ <enum bitpos="0" name="VK_SHADER_CREATE_LINK_STAGE_BIT_EXT"/>
+ </enums>
+ <enums name="VkShaderCodeTypeEXT" type="enum">
+ <enum value="0" name="VK_SHADER_CODE_TYPE_BINARY_EXT"/>
+ <enum value="1" name="VK_SHADER_CODE_TYPE_SPIRV_EXT"/>
+ </enums>
+ <enums name="VkScopeKHR" type="enum">
+ <enum value="1" name="VK_SCOPE_DEVICE_KHR"/>
+ <enum value="2" name="VK_SCOPE_WORKGROUP_KHR"/>
+ <enum value="3" name="VK_SCOPE_SUBGROUP_KHR"/>
+ <enum value="5" name="VK_SCOPE_QUEUE_FAMILY_KHR"/>
+ </enums>
+ <enums name="VkComponentTypeKHR" type="enum">
+ <enum value="0" name="VK_COMPONENT_TYPE_FLOAT16_KHR"/>
+ <enum value="1" name="VK_COMPONENT_TYPE_FLOAT32_KHR"/>
+ <enum value="2" name="VK_COMPONENT_TYPE_FLOAT64_KHR"/>
+ <enum value="3" name="VK_COMPONENT_TYPE_SINT8_KHR"/>
+ <enum value="4" name="VK_COMPONENT_TYPE_SINT16_KHR"/>
+ <enum value="5" name="VK_COMPONENT_TYPE_SINT32_KHR"/>
+ <enum value="6" name="VK_COMPONENT_TYPE_SINT64_KHR"/>
+ <enum value="7" name="VK_COMPONENT_TYPE_UINT8_KHR"/>
+ <enum value="8" name="VK_COMPONENT_TYPE_UINT16_KHR"/>
+ <enum value="9" name="VK_COMPONENT_TYPE_UINT32_KHR"/>
+ <enum value="10" name="VK_COMPONENT_TYPE_UINT64_KHR"/>
+ </enums>
+ <enums name="VkCubicFilterWeightsQCOM" type="enum">
+ <enum value="0" name="VK_CUBIC_FILTER_WEIGHTS_CATMULL_ROM_QCOM"/>
+ <enum value="1" name="VK_CUBIC_FILTER_WEIGHTS_ZERO_TANGENT_CARDINAL_QCOM"/>
+ <enum value="2" name="VK_CUBIC_FILTER_WEIGHTS_B_SPLINE_QCOM"/>
+ <enum value="3" name="VK_CUBIC_FILTER_WEIGHTS_MITCHELL_NETRAVALI_QCOM"/>
+ </enums>
+ <enums name="VkBlockMatchWindowCompareModeQCOM" type="enum">
+ <enum value="0" name="VK_BLOCK_MATCH_WINDOW_COMPARE_MODE_MIN_QCOM"/>
+ <enum value="1" name="VK_BLOCK_MATCH_WINDOW_COMPARE_MODE_MAX_QCOM"/>
+ </enums>
+ <enums name="VkLayeredDriverUnderlyingApiMSFT" type="enum">
+ <enum value="0" name="VK_LAYERED_DRIVER_UNDERLYING_API_NONE_MSFT"/>
+ <enum value="1" name="VK_LAYERED_DRIVER_UNDERLYING_API_D3D12_MSFT"/>
+ </enums>
+ <enums name="VkLatencyMarkerNV" type="enum">
+ <enum value="0" name="VK_LATENCY_MARKER_SIMULATION_START_NV"/>
+ <enum value="1" name="VK_LATENCY_MARKER_SIMULATION_END_NV"/>
+ <enum value="2" name="VK_LATENCY_MARKER_RENDERSUBMIT_START_NV"/>
+ <enum value="3" name="VK_LATENCY_MARKER_RENDERSUBMIT_END_NV"/>
+ <enum value="4" name="VK_LATENCY_MARKER_PRESENT_START_NV"/>
+ <enum value="5" name="VK_LATENCY_MARKER_PRESENT_END_NV"/>
+ <enum value="6" name="VK_LATENCY_MARKER_INPUT_SAMPLE_NV"/>
+ <enum value="7" name="VK_LATENCY_MARKER_TRIGGER_FLASH_NV"/>
+ <enum value="8" name="VK_LATENCY_MARKER_OUT_OF_BAND_RENDERSUBMIT_START_NV"/>
+ <enum value="9" name="VK_LATENCY_MARKER_OUT_OF_BAND_RENDERSUBMIT_END_NV"/>
+ <enum value="10" name="VK_LATENCY_MARKER_OUT_OF_BAND_PRESENT_START_NV"/>
+ <enum value="11" name="VK_LATENCY_MARKER_OUT_OF_BAND_PRESENT_END_NV"/>
+ </enums>
+ <enums name="VkOutOfBandQueueTypeNV" type="enum">
+ <enum value="0" name="VK_OUT_OF_BAND_QUEUE_TYPE_RENDER_NV"/>
+ <enum value="1" name="VK_OUT_OF_BAND_QUEUE_TYPE_PRESENT_NV"/>
+ </enums>
+ <enums name="VkMemoryUnmapFlagBitsKHR" type="bitmask">
</enums>
<commands comment="Vulkan command definitions">
@@ -7755,7 +11311,14 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>VkImageCreateFlags</type> <name>flags</name></param>
<param><type>VkImageFormatProperties</type>* <name>pImageFormatProperties</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_EXTENSION_NOT_PRESENT,VK_ERROR_FEATURE_NOT_PRESENT,VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_DEVICE_LOST">
+ <command api="vulkan" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_EXTENSION_NOT_PRESENT,VK_ERROR_FEATURE_NOT_PRESENT,VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_DEVICE_LOST">
+ <proto><type>VkResult</type> <name>vkCreateDevice</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param>const <type>VkDeviceCreateInfo</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkDevice</type>* <name>pDevice</name></param>
+ </command>
+ <command api="vulkansc" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_EXTENSION_NOT_PRESENT,VK_ERROR_FEATURE_NOT_PRESENT,VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_DEVICE_LOST,VK_ERROR_INVALID_PIPELINE_CACHE_DATA">
<proto><type>VkResult</type> <name>vkCreateDevice</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param>const <type>VkDeviceCreateInfo</type>* <name>pCreateInfo</name></param>
@@ -7767,7 +11330,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" externsync="true"><type>VkDevice</type> <name>device</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<implicitexternsyncparams>
- <param>all sname:VkQueue objects received from pname:device</param>
+ <param>all sname:VkQueue objects created from pname:device</param>
</implicitexternsyncparams>
</command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
@@ -7785,12 +11348,19 @@ typedef void <name>CAMetalLayer</name>;
<param optional="false,true"><type>uint32_t</type>* <name>pPropertyCount</name></param>
<param optional="true" len="pPropertyCount"><type>VkExtensionProperties</type>* <name>pProperties</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <command api="vulkan" successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkEnumerateDeviceLayerProperties</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pPropertyCount</name></param>
<param optional="true" len="pPropertyCount"><type>VkLayerProperties</type>* <name>pProperties</name></param>
</command>
+ <command api="vulkansc" successcodes="VK_SUCCESS">
+ <proto><type>VkResult</type> <name>vkEnumerateDeviceLayerProperties</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pPropertyCount</name></param>
+ <param optional="true" len="pPropertyCount"><type>VkLayerProperties</type>* <name>pProperties</name></param>
+ </command>
+
<command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_LAYER_NOT_PRESENT">
<proto><type>VkResult</type> <name>vkEnumerateDeviceExtensionProperties</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
@@ -7916,7 +11486,7 @@ typedef void <name>CAMetalLayer</name>;
<proto><type>VkResult</type> <name>vkQueueBindSparse</name></proto>
<param externsync="true"><type>VkQueue</type> <name>queue</name></param>
<param optional="true"><type>uint32_t</type> <name>bindInfoCount</name></param>
- <param len="bindInfoCount" externsync="pBindInfo[].pBufferBinds[].buffer,pBindInfo[].pImageOpaqueBinds[].image,pBindInfo[].pImageBinds[].image">const <type>VkBindSparseInfo</type>* <name>pBindInfo</name></param>
+ <param len="bindInfoCount">const <type>VkBindSparseInfo</type>* <name>pBindInfo</name></param>
<param optional="true" externsync="true"><type>VkFence</type> <name>fence</name></param>
</command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
@@ -8050,7 +11620,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" externsync="true"><type>VkBufferView</type> <name>bufferView</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_COMPRESSION_EXHAUSTED_EXT,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR">
<proto><type>VkResult</type> <name>vkCreateImage</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkImageCreateInfo</type>* <name>pCreateInfo</name></param>
@@ -8070,7 +11640,7 @@ typedef void <name>CAMetalLayer</name>;
<param>const <type>VkImageSubresource</type>* <name>pSubresource</name></param>
<param><type>VkSubresourceLayout</type>* <name>pLayout</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR">
<proto><type>VkResult</type> <name>vkCreateImageView</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkImageViewCreateInfo</type>* <name>pCreateInfo</name></param>
@@ -8096,7 +11666,14 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" externsync="true"><type>VkShaderModule</type> <name>shaderModule</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <command api="vulkan" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreatePipelineCache</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkPipelineCacheCreateInfo</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkPipelineCache</type>* <name>pPipelineCache</name></param>
+ </command>
+ <command api="vulkansc" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_PIPELINE_CACHE_DATA">
<proto><type>VkResult</type> <name>vkCreatePipelineCache</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkPipelineCacheCreateInfo</type>* <name>pCreateInfo</name></param>
@@ -8123,7 +11700,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>srcCacheCount</name></param>
<param len="srcCacheCount">const <type>VkPipelineCache</type>* <name>pSrcCaches</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_SHADER_NV">
+ <command api="vulkan" successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_SHADER_NV">
<proto><type>VkResult</type> <name>vkCreateGraphicsPipelines</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param optional="true"><type>VkPipelineCache</type> <name>pipelineCache</name></param>
@@ -8132,7 +11709,16 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_SHADER_NV">
+ <command api="vulkansc" successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_NO_PIPELINE_MATCH,VK_ERROR_OUT_OF_POOL_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateGraphicsPipelines</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkPipelineCache</type> <name>pipelineCache</name></param>
+ <param><type>uint32_t</type> <name>createInfoCount</name></param>
+ <param len="createInfoCount">const <type>VkGraphicsPipelineCreateInfo</type>* <name>pCreateInfos</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
+ </command>
+ <command api="vulkan" successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_SHADER_NV">
<proto><type>VkResult</type> <name>vkCreateComputePipelines</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param optional="true"><type>VkPipelineCache</type> <name>pipelineCache</name></param>
@@ -8141,11 +11727,20 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_SURFACE_LOST_KHR">
+ <command api="vulkansc" successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_NO_PIPELINE_MATCH,VK_ERROR_OUT_OF_POOL_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateComputePipelines</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkPipelineCache</type> <name>pipelineCache</name></param>
+ <param><type>uint32_t</type> <name>createInfoCount</name></param>
+ <param len="createInfoCount">const <type>VkComputePipelineCreateInfo</type>* <name>pCreateInfos</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_SURFACE_LOST_KHR">
<proto><type>VkResult</type> <name>vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkRenderPass</type> <name>renderpass</name></param>
- <param><type>VkExtent2D</type>* <name>pMaxWorkgroupSize</name></param>
+ <param len="1"><type>VkExtent2D</type>* <name>pMaxWorkgroupSize</name></param>
</command>
<command>
<proto><type>void</type> <name>vkDestroyPipeline</name></proto>
@@ -8166,7 +11761,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" externsync="true"><type>VkPipelineLayout</type> <name>pipelineLayout</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR">
<proto><type>VkResult</type> <name>vkCreateSampler</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkSamplerCreateInfo</type>* <name>pCreateInfo</name></param>
@@ -8231,9 +11826,9 @@ typedef void <name>CAMetalLayer</name>;
<proto><type>void</type> <name>vkUpdateDescriptorSets</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param optional="true"><type>uint32_t</type> <name>descriptorWriteCount</name></param>
- <param len="descriptorWriteCount" externsync="pDescriptorWrites[].dstSet">const <type>VkWriteDescriptorSet</type>* <name>pDescriptorWrites</name></param>
+ <param len="descriptorWriteCount">const <type>VkWriteDescriptorSet</type>* <name>pDescriptorWrites</name></param>
<param optional="true"><type>uint32_t</type> <name>descriptorCopyCount</name></param>
- <param len="descriptorCopyCount" externsync="pDescriptorCopies[].dstSet">const <type>VkCopyDescriptorSet</type>* <name>pDescriptorCopies</name></param>
+ <param len="descriptorCopyCount">const <type>VkCopyDescriptorSet</type>* <name>pDescriptorCopies</name></param>
</command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkCreateFramebuffer</name></proto>
@@ -8267,6 +11862,12 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkRenderPass</type> <name>renderPass</name></param>
<param><type>VkExtent2D</type>* <name>pGranularity</name></param>
</command>
+ <command>
+ <proto><type>void</type> <name>vkGetRenderingAreaGranularityKHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkRenderingAreaInfoKHR</type>* <name>pRenderingAreaInfo</name></param>
+ <param><type>VkExtent2D</type>* <name>pGranularity</name></param>
+ </command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkCreateCommandPool</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -8307,7 +11908,7 @@ typedef void <name>CAMetalLayer</name>;
<param>the sname:VkCommandPool that pname:commandBuffer was allocated from</param>
</implicitexternsyncparams>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR">
<proto><type>VkResult</type> <name>vkEndCommandBuffer</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<implicitexternsyncparams>
@@ -8322,86 +11923,91 @@ typedef void <name>CAMetalLayer</name>;
<param>the sname:VkCommandPool that pname:commandBuffer was allocated from</param>
</implicitexternsyncparams>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindPipeline</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
<param><type>VkPipeline</type> <name>pipeline</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetAttachmentFeedbackLoopEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param optional="true"><type>VkImageAspectFlags</type> <name>aspectMask</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetViewport</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstViewport</name></param>
<param><type>uint32_t</type> <name>viewportCount</name></param>
<param len="viewportCount">const <type>VkViewport</type>* <name>pViewports</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetScissor</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstScissor</name></param>
<param><type>uint32_t</type> <name>scissorCount</name></param>
<param len="scissorCount">const <type>VkRect2D</type>* <name>pScissors</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetLineWidth</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>float</type> <name>lineWidth</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetDepthBias</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>float</type> <name>depthBiasConstantFactor</name></param>
<param><type>float</type> <name>depthBiasClamp</name></param>
<param><type>float</type> <name>depthBiasSlopeFactor</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetBlendConstants</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>float</type> <name>blendConstants</name>[4]</param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetDepthBounds</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>float</type> <name>minDepthBounds</name></param>
<param><type>float</type> <name>maxDepthBounds</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetStencilCompareMask</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkStencilFaceFlags</type> <name>faceMask</name></param>
<param><type>uint32_t</type> <name>compareMask</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetStencilWriteMask</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkStencilFaceFlags</type> <name>faceMask</name></param>
<param><type>uint32_t</type> <name>writeMask</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetStencilReference</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkStencilFaceFlags</type> <name>faceMask</name></param>
<param><type>uint32_t</type> <name>reference</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindDescriptorSets</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
<param><type>VkPipelineLayout</type> <name>layout</name></param>
<param><type>uint32_t</type> <name>firstSet</name></param>
<param><type>uint32_t</type> <name>descriptorSetCount</name></param>
- <param len="descriptorSetCount">const <type>VkDescriptorSet</type>* <name>pDescriptorSets</name></param>
+ <param len="descriptorSetCount" optional="false,true">const <type>VkDescriptorSet</type>* <name>pDescriptorSets</name></param>
<param optional="true"><type>uint32_t</type> <name>dynamicOffsetCount</name></param>
<param len="dynamicOffsetCount">const <type>uint32_t</type>* <name>pDynamicOffsets</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindIndexBuffer</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param><type>VkBuffer</type> <name>buffer</name></param>
+ <param optional="true"><type>VkBuffer</type> <name>buffer</name></param>
<param><type>VkDeviceSize</type> <name>offset</name></param>
<param><type>VkIndexType</type> <name>indexType</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindVertexBuffers</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstBinding</name></param>
@@ -8409,7 +12015,7 @@ typedef void <name>CAMetalLayer</name>;
<param len="bindingCount" optional="false,true">const <type>VkBuffer</type>* <name>pBuffers</name></param>
<param len="bindingCount">const <type>VkDeviceSize</type>* <name>pOffsets</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDraw</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>vertexCount</name></param>
@@ -8417,7 +12023,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>firstVertex</name></param>
<param><type>uint32_t</type> <name>firstInstance</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawIndexed</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>indexCount</name></param>
@@ -8426,26 +12032,26 @@ typedef void <name>CAMetalLayer</name>;
<param><type>int32_t</type> <name>vertexOffset</name></param>
<param><type>uint32_t</type> <name>firstInstance</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawMultiEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>uint32_t</type> <name>drawCount</name></param>
- <param noautovalidity="true" len="drawCount">const <type>VkMultiDrawInfoEXT</type>* <name>pVertexInfo</name></param>
+ <param noautovalidity="true" len="drawCount" stride="stride">const <type>VkMultiDrawInfoEXT</type>* <name>pVertexInfo</name></param>
<param><type>uint32_t</type> <name>instanceCount</name></param>
<param><type>uint32_t</type> <name>firstInstance</name></param>
<param><type>uint32_t</type> <name>stride</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawMultiIndexedEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>uint32_t</type> <name>drawCount</name></param>
- <param noautovalidity="true" len="drawCount">const <type>VkMultiDrawIndexedInfoEXT</type>* <name>pIndexInfo</name></param>
+ <param noautovalidity="true" len="drawCount" stride="stride">const <type>VkMultiDrawIndexedInfoEXT</type>* <name>pIndexInfo</name></param>
<param><type>uint32_t</type> <name>instanceCount</name></param>
<param><type>uint32_t</type> <name>firstInstance</name></param>
<param><type>uint32_t</type> <name>stride</name></param>
<param optional="true">const <type>int32_t</type>* <name>pVertexOffset</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawIndirect</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
@@ -8453,7 +12059,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>drawCount</name></param>
<param><type>uint32_t</type> <name>stride</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawIndexedIndirect</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
@@ -8461,24 +12067,43 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>drawCount</name></param>
<param><type>uint32_t</type> <name>stride</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDispatch</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>groupCountX</name></param>
<param><type>uint32_t</type> <name>groupCountY</name></param>
<param><type>uint32_t</type> <name>groupCountZ</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDispatchIndirect</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
<param><type>VkDeviceSize</type> <name>offset</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdSubpassShadingHUAWEI</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDrawClusterHUAWEI</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>groupCountX</name></param>
+ <param><type>uint32_t</type> <name>groupCountY</name></param>
+ <param><type>uint32_t</type> <name>groupCountZ</name></param>
+ </command>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDrawClusterIndirectHUAWEI</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBuffer</type> <name>buffer</name></param>
+ <param><type>VkDeviceSize</type> <name>offset</name></param>
+ </command>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdUpdatePipelineIndirectBufferNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
+ <param><type>VkPipeline</type> <name>pipeline</name></param>
+ </command>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyBuffer</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>srcBuffer</name></param>
@@ -8486,7 +12111,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>regionCount</name></param>
<param len="regionCount">const <type>VkBufferCopy</type>* <name>pRegions</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyImage</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkImage</type> <name>srcImage</name></param>
@@ -8496,7 +12121,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>regionCount</name></param>
<param len="regionCount">const <type>VkImageCopy</type>* <name>pRegions</name></param>
</command>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdBlitImage</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkImage</type> <name>srcImage</name></param>
@@ -8507,7 +12132,7 @@ typedef void <name>CAMetalLayer</name>;
<param len="regionCount">const <type>VkImageBlit</type>* <name>pRegions</name></param>
<param><type>VkFilter</type> <name>filter</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyBufferToImage</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>srcBuffer</name></param>
@@ -8516,7 +12141,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>regionCount</name></param>
<param len="regionCount">const <type>VkBufferImageCopy</type>* <name>pRegions</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyImageToBuffer</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkImage</type> <name>srcImage</name></param>
@@ -8525,7 +12150,24 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>regionCount</name></param>
<param len="regionCount">const <type>VkBufferImageCopy</type>* <name>pRegions</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyMemoryIndirectNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>copyBufferAddress</name></param>
+ <param><type>uint32_t</type> <name>copyCount</name></param>
+ <param><type>uint32_t</type> <name>stride</name></param>
+ </command>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyMemoryToImageIndirectNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>copyBufferAddress</name></param>
+ <param><type>uint32_t</type> <name>copyCount</name></param>
+ <param><type>uint32_t</type> <name>stride</name></param>
+ <param><type>VkImage</type> <name>dstImage</name></param>
+ <param><type>VkImageLayout</type> <name>dstImageLayout</name></param>
+ <param len="copyCount">const <type>VkImageSubresourceLayers</type>* <name>pImageSubresources</name></param>
+ </command>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdUpdateBuffer</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>dstBuffer</name></param>
@@ -8533,7 +12175,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDeviceSize</type> <name>dataSize</name></param>
<param len="dataSize">const <type>void</type>* <name>pData</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" comment="transfer support is only available when VK_KHR_maintenance1 is enabled, as documented in valid usage language in the specification">
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action" comment="transfer support is only available when VK_KHR_maintenance1 is enabled, as documented in valid usage language in the specification">
<proto><type>void</type> <name>vkCmdFillBuffer</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>dstBuffer</name></param>
@@ -8541,7 +12183,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDeviceSize</type> <name>size</name></param>
<param><type>uint32_t</type> <name>data</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdClearColorImage</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkImage</type> <name>image</name></param>
@@ -8550,7 +12192,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>rangeCount</name></param>
<param len="rangeCount">const <type>VkImageSubresourceRange</type>* <name>pRanges</name></param>
</command>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdClearDepthStencilImage</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkImage</type> <name>image</name></param>
@@ -8559,7 +12201,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>rangeCount</name></param>
<param len="rangeCount">const <type>VkImageSubresourceRange</type>* <name>pRanges</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdClearAttachments</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>attachmentCount</name></param>
@@ -8567,7 +12209,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>rectCount</name></param>
<param len="rectCount">const <type>VkClearRect</type>* <name>pRects</name></param>
</command>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdResolveImage</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkImage</type> <name>srcImage</name></param>
@@ -8577,19 +12219,19 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>regionCount</name></param>
<param len="regionCount">const <type>VkImageResolve</type>* <name>pRegions</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="outside" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
<proto><type>void</type> <name>vkCmdSetEvent</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkEvent</type> <name>event</name></param>
<param optional="true"><type>VkPipelineStageFlags</type> <name>stageMask</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="outside" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
<proto><type>void</type> <name>vkCmdResetEvent</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkEvent</type> <name>event</name></param>
<param optional="true"><type>VkPipelineStageFlags</type> <name>stageMask</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
<proto><type>void</type> <name>vkCmdWaitEvents</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>eventCount</name></param>
@@ -8603,7 +12245,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>uint32_t</type> <name>imageMemoryBarrierCount</name></param>
<param len="imageMemoryBarrierCount">const <type>VkImageMemoryBarrier</type>* <name>pImageMemoryBarriers</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
<proto><type>void</type> <name>vkCmdPipelineBarrier</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>VkPipelineStageFlags</type> <name>srcStageMask</name></param>
@@ -8616,43 +12258,43 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>uint32_t</type> <name>imageMemoryBarrierCount</name></param>
<param len="imageMemoryBarrierCount">const <type>VkImageMemoryBarrier</type>* <name>pImageMemoryBarriers</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdBeginQuery</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>query</name></param>
<param optional="true"><type>VkQueryControlFlags</type> <name>flags</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdEndQuery</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>query</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdBeginConditionalRenderingEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkConditionalRenderingBeginInfoEXT</type>* <name>pConditionalRenderingBegin</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdEndConditionalRenderingEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode,opticalflow" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdResetQueryPool</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>firstQuery</name></param>
<param><type>uint32_t</type> <name>queryCount</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute,decode,encode,opticalflow" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdWriteTimestamp</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPipelineStageFlagBits</type> <name>pipelineStage</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>query</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyQueryPoolResults</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
@@ -8663,7 +12305,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDeviceSize</type> <name>stride</name></param>
<param optional="true"><type>VkQueryResultFlags</type> <name>flags</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdPushConstants</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPipelineLayout</type> <name>layout</name></param>
@@ -8672,22 +12314,22 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>size</name></param>
<param len="size">const <type>void</type>* <name>pValues</name></param>
</command>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary" tasks="action,state,synchronization">
<proto><type>void</type> <name>vkCmdBeginRenderPass</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkRenderPassBeginInfo</type>* <name>pRenderPassBegin</name></param>
<param><type>VkSubpassContents</type> <name>contents</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary" tasks="action,state,synchronization">
<proto><type>void</type> <name>vkCmdNextSubpass</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkSubpassContents</type> <name>contents</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary" tasks="action,state,synchronization">
<proto><type>void</type> <name>vkCmdEndRenderPass</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary">
+ <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="indirection">
<proto><type>void</type> <name>vkCmdExecuteCommands</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>commandBufferCount</name></param>
@@ -8752,7 +12394,8 @@ typedef void <name>CAMetalLayer</name>;
<proto><type>VkResult</type> <name>vkCreateSharedSwapchainsKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>uint32_t</type> <name>swapchainCount</name></param>
- <param len="swapchainCount" externsync="pCreateInfos[].surface,pCreateInfos[].oldSwapchain">const <type>VkSwapchainCreateInfoKHR</type>* <name>pCreateInfos</name></param>
+ <param api="vulkan" len="swapchainCount" externsync="pCreateInfos[].surface,pCreateInfos[].oldSwapchain">const <type>VkSwapchainCreateInfoKHR</type>* <name>pCreateInfos</name></param>
+ <param api="vulkansc" len="swapchainCount" externsync="pCreateInfos[].surface">const <type>VkSwapchainCreateInfoKHR</type>* <name>pCreateInfos</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param len="swapchainCount"><type>VkSwapchainKHR</type>* <name>pSwapchains</name></param>
</command>
@@ -8778,21 +12421,22 @@ typedef void <name>CAMetalLayer</name>;
<command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_SURFACE_LOST_KHR">
<proto><type>VkResult</type> <name>vkGetPhysicalDeviceSurfaceFormatsKHR</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
- <param><type>VkSurfaceKHR</type> <name>surface</name></param>
+ <param optional="true"><type>VkSurfaceKHR</type> <name>surface</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pSurfaceFormatCount</name></param>
<param optional="true" len="pSurfaceFormatCount"><type>VkSurfaceFormatKHR</type>* <name>pSurfaceFormats</name></param>
</command>
<command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_SURFACE_LOST_KHR">
<proto><type>VkResult</type> <name>vkGetPhysicalDeviceSurfacePresentModesKHR</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
- <param><type>VkSurfaceKHR</type> <name>surface</name></param>
+ <param optional="true"><type>VkSurfaceKHR</type> <name>surface</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pPresentModeCount</name></param>
<param optional="true" len="pPresentModeCount"><type>VkPresentModeKHR</type>* <name>pPresentModes</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_DEVICE_LOST,VK_ERROR_SURFACE_LOST_KHR,VK_ERROR_NATIVE_WINDOW_IN_USE_KHR,VK_ERROR_INITIALIZATION_FAILED">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_DEVICE_LOST,VK_ERROR_SURFACE_LOST_KHR,VK_ERROR_NATIVE_WINDOW_IN_USE_KHR,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_COMPRESSION_EXHAUSTED_EXT">
<proto><type>VkResult</type> <name>vkCreateSwapchainKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param externsync="pCreateInfo-&gt;surface,pCreateInfo-&gt;oldSwapchain">const <type>VkSwapchainCreateInfoKHR</type>* <name>pCreateInfo</name></param>
+ <param api="vulkan" externsync="pCreateInfo-&gt;surface,pCreateInfo-&gt;oldSwapchain">const <type>VkSwapchainCreateInfoKHR</type>* <name>pCreateInfo</name></param>
+ <param api="vulkansc" externsync="pCreateInfo-&gt;surface">const <type>VkSwapchainCreateInfoKHR</type>* <name>pCreateInfo</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param><type>VkSwapchainKHR</type>* <name>pSwapchain</name></param>
</command>
@@ -8957,16 +12601,16 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDevice</type> <name>device</name></param>
<param externsync="pTagInfo-&gt;object">const <type>VkDebugMarkerObjectTagInfoEXT</type>* <name>pTagInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDebugMarkerBeginEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkDebugMarkerMarkerInfoEXT</type>* <name>pMarkerInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDebugMarkerEndEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDebugMarkerInsertEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkDebugMarkerMarkerInfoEXT</type>* <name>pMarkerInfo</name></param>
@@ -8989,18 +12633,18 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkExternalMemoryHandleTypeFlagsNV</type> <name>handleType</name></param>
<param><type>HANDLE</type>* <name>pHandle</name></param>
</command>
- <command queues="graphics,compute" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action,indirection">
<proto><type>void</type> <name>vkCmdExecuteGeneratedCommandsNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>isPreprocessed</name></param>
<param>const <type>VkGeneratedCommandsInfoNV</type>* <name>pGeneratedCommandsInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdPreprocessGeneratedCommandsNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkGeneratedCommandsInfoNV</type>* <name>pGeneratedCommandsInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindPipelineShaderGroupNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
@@ -9045,7 +12689,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkFormatProperties2</type>* <name>pFormatProperties</name></param>
</command>
<command name="vkGetPhysicalDeviceFormatProperties2KHR" alias="vkGetPhysicalDeviceFormatProperties2"/>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_FORMAT_NOT_SUPPORTED">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_FORMAT_NOT_SUPPORTED,VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR">
<proto><type>VkResult</type> <name>vkGetPhysicalDeviceImageFormatProperties2</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param>const <type>VkPhysicalDeviceImageFormatInfo2</type>* <name>pImageFormatInfo</name></param>
@@ -9073,7 +12717,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" len="pPropertyCount"><type>VkSparseImageFormatProperties2</type>* <name>pProperties</name></param>
</command>
<command name="vkGetPhysicalDeviceSparseImageFormatProperties2KHR" alias="vkGetPhysicalDeviceSparseImageFormatProperties2"/>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdPushDescriptorSetKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
@@ -9141,6 +12785,24 @@ typedef void <name>CAMetalLayer</name>;
<param>const <type>VkMemoryGetRemoteAddressInfoNV</type>* <name>pMemoryGetRemoteAddressInfo</name></param>
<param><type>VkRemoteAddressNV</type>* <name>pAddress</name></param>
</command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkGetMemorySciBufNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkMemoryGetSciBufInfoNV</type>* <name>pGetSciBufInfo</name></param>
+ <param><type>NvSciBufObj</type>* <name>pHandle</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_INVALID_EXTERNAL_HANDLE">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceExternalMemorySciBufPropertiesNV</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></param>
+ <param><type>NvSciBufObj</type> <name>handle</name></param>
+ <param><type>VkMemorySciBufPropertiesNV</type>* <name>pMemorySciBufProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceSciBufAttributesNV</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param><type>NvSciBufAttrList</type> <name>pAttributes</name></param>
+ </command>
<command>
<proto><type>void</type> <name>vkGetPhysicalDeviceExternalSemaphoreProperties</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
@@ -9210,6 +12872,58 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkImportFenceFdInfoKHR</type>* <name>pImportFenceFdInfo</name></param>
</command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_NOT_PERMITTED_EXT">
+ <proto><type>VkResult</type> <name>vkGetFenceSciSyncFenceNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkFenceGetSciSyncInfoNV</type>* <name>pGetSciSyncHandleInfo</name></param>
+ <param><type>void</type>* <name>pHandle</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_NOT_PERMITTED_EXT">
+ <proto><type>VkResult</type> <name>vkGetFenceSciSyncObjNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkFenceGetSciSyncInfoNV</type>* <name>pGetSciSyncHandleInfo</name></param>
+ <param><type>void</type>* <name>pHandle</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_NOT_PERMITTED_EXT">
+ <proto><type>VkResult</type> <name>vkImportFenceSciSyncFenceNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkImportFenceSciSyncInfoNV</type>* <name>pImportFenceSciSyncInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_NOT_PERMITTED_EXT">
+ <proto><type>VkResult</type> <name>vkImportFenceSciSyncObjNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkImportFenceSciSyncInfoNV</type>* <name>pImportFenceSciSyncInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_NOT_PERMITTED_EXT">
+ <proto><type>VkResult</type> <name>vkGetSemaphoreSciSyncObjNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkSemaphoreGetSciSyncInfoNV</type>* <name>pGetSciSyncInfo</name></param>
+ <param><type>void</type>* <name>pHandle</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_NOT_PERMITTED_EXT,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkImportSemaphoreSciSyncObjNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkImportSemaphoreSciSyncInfoNV</type>* <name>pImportSemaphoreSciSyncInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceSciSyncAttributesNV</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param>const <type>VkSciSyncAttributesInfoNV</type>* <name>pSciSyncAttributesInfo</name></param>
+ <param><type>NvSciSyncAttrList</type> <name>pAttributes</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateSemaphoreSciSyncPoolNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkSemaphoreSciSyncPoolCreateInfoNV</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkSemaphoreSciSyncPoolNV</type>* <name>pSemaphorePool</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroySemaphoreSciSyncPoolNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true" externsync="true"><type>VkSemaphoreSciSyncPoolNV</type> <name>semaphorePool</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
<command successcodes="VK_SUCCESS">
<proto><type>VkResult</type> <name>vkReleaseDisplayEXT</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
@@ -9303,7 +13017,7 @@ typedef void <name>CAMetalLayer</name>;
<param len="bindInfoCount">const <type>VkBindImageMemoryInfo</type>* <name>pBindInfos</name></param>
</command>
<command name="vkBindImageMemory2KHR" alias="vkBindImageMemory2"/>
- <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,transfer" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetDeviceMask</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>deviceMask</name></param>
@@ -9326,7 +13040,7 @@ typedef void <name>CAMetalLayer</name>;
<param>const <type>VkAcquireNextImageInfoKHR</type>* <name>pAcquireInfo</name></param>
<param><type>uint32_t</type>* <name>pImageIndex</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDispatchBase</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>baseGroupX</name></param>
@@ -9362,12 +13076,12 @@ typedef void <name>CAMetalLayer</name>;
<command>
<proto><type>void</type> <name>vkUpdateDescriptorSetWithTemplate</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param externsync="true"><type>VkDescriptorSet</type> <name>descriptorSet</name></param>
+ <param><type>VkDescriptorSet</type> <name>descriptorSet</name></param>
<param><type>VkDescriptorUpdateTemplate</type> <name>descriptorUpdateTemplate</name></param>
<param noautovalidity="true">const <type>void</type>* <name>pData</name></param>
</command>
<command name="vkUpdateDescriptorSetWithTemplateKHR" alias="vkUpdateDescriptorSetWithTemplate"/>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdPushDescriptorSetWithTemplateKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkDescriptorUpdateTemplate</type> <name>descriptorUpdateTemplate</name></param>
@@ -9421,21 +13135,31 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param><type>VkSurfaceKHR</type>* <name>pSurface</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetViewportWScalingNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstViewport</name></param>
<param><type>uint32_t</type> <name>viewportCount</name></param>
<param len="viewportCount">const <type>VkViewportWScalingNV</type>* <name>pViewportWScalings</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetDiscardRectangleEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstDiscardRectangle</name></param>
<param><type>uint32_t</type> <name>discardRectangleCount</name></param>
<param len="discardRectangleCount">const <type>VkRect2D</type>* <name>pDiscardRectangles</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDiscardRectangleEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>discardRectangleEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDiscardRectangleModeEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDiscardRectangleModeEXT</type> <name>discardRectangleMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetSampleLocationsEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkSampleLocationsInfoEXT</type>* <name>pSampleLocationsInfo</name></param>
@@ -9506,6 +13230,28 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" len="pSparseMemoryRequirementCount"><type>VkSparseImageMemoryRequirements2</type>* <name>pSparseMemoryRequirements</name></param>
</command>
<command name="vkGetImageSparseMemoryRequirements2KHR" alias="vkGetImageSparseMemoryRequirements2"/>
+ <command>
+ <proto><type>void</type> <name>vkGetDeviceBufferMemoryRequirements</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkDeviceBufferMemoryRequirements</type>* <name>pInfo</name></param>
+ <param><type>VkMemoryRequirements2</type>* <name>pMemoryRequirements</name></param>
+ </command>
+ <command name="vkGetDeviceBufferMemoryRequirementsKHR" alias="vkGetDeviceBufferMemoryRequirements"/>
+ <command>
+ <proto><type>void</type> <name>vkGetDeviceImageMemoryRequirements</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkDeviceImageMemoryRequirements</type>* <name>pInfo</name></param>
+ <param><type>VkMemoryRequirements2</type>* <name>pMemoryRequirements</name></param>
+ </command>
+ <command name="vkGetDeviceImageMemoryRequirementsKHR" alias="vkGetDeviceImageMemoryRequirements"/>
+ <command>
+ <proto><type>void</type> <name>vkGetDeviceImageSparseMemoryRequirements</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkDeviceImageMemoryRequirements</type>* <name>pInfo</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pSparseMemoryRequirementCount</name></param>
+ <param optional="true" len="pSparseMemoryRequirementCount"><type>VkSparseImageMemoryRequirements2</type>* <name>pSparseMemoryRequirements</name></param>
+ </command>
+ <command name="vkGetDeviceImageSparseMemoryRequirementsKHR" alias="vkGetDeviceImageSparseMemoryRequirements"/>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkCreateSamplerYcbcrConversion</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -9582,8 +13328,8 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkImage</type> <name>image</name></param>
<param><type>int</type> <name>nativeFenceFd</name></param>
- <param><type>VkSemaphore</type> <name>semaphore</name></param>
- <param><type>VkFence</type> <name>fence</name></param>
+ <param optional="true"><type>VkSemaphore</type> <name>semaphore</name></param>
+ <param optional="true"><type>VkFence</type> <name>fence</name></param>
</command>
<command>
<proto><type>VkResult</type> <name>vkQueueSignalReleaseImageANDROID</name></proto>
@@ -9609,19 +13355,21 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkBool32</type> <name>localDimmingEnable</name></param>
</command>
<command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
- <proto><type>VkResult</type> <name>vkGetPhysicalDeviceCalibrateableTimeDomainsEXT</name></proto>
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceCalibrateableTimeDomainsKHR</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pTimeDomainCount</name></param>
- <param optional="true" len="pTimeDomainCount"><type>VkTimeDomainEXT</type>* <name>pTimeDomains</name></param>
+ <param optional="true" len="pTimeDomainCount"><type>VkTimeDomainKHR</type>* <name>pTimeDomains</name></param>
</command>
+ <command name="vkGetPhysicalDeviceCalibrateableTimeDomainsEXT" alias="vkGetPhysicalDeviceCalibrateableTimeDomainsKHR"/>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
- <proto><type>VkResult</type> <name>vkGetCalibratedTimestampsEXT</name></proto>
+ <proto><type>VkResult</type> <name>vkGetCalibratedTimestampsKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>uint32_t</type> <name>timestampCount</name></param>
- <param len="timestampCount">const <type>VkCalibratedTimestampInfoEXT</type>* <name>pTimestampInfos</name></param>
+ <param len="timestampCount">const <type>VkCalibratedTimestampInfoKHR</type>* <name>pTimestampInfos</name></param>
<param len="timestampCount"><type>uint64_t</type>* <name>pTimestamps</name></param>
<param><type>uint64_t</type>* <name>pMaxDeviation</name></param>
</command>
+ <command name="vkGetCalibratedTimestampsEXT" alias="vkGetCalibratedTimestampsKHR"/>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkSetDebugUtilsObjectNameEXT</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -9646,16 +13394,16 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkQueue</type> <name>queue</name></param>
<param>const <type>VkDebugUtilsLabelEXT</type>* <name>pLabelInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdBeginDebugUtilsLabelEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkDebugUtilsLabelEXT</type>* <name>pLabelInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdEndDebugUtilsLabelEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdInsertDebugUtilsLabelEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkDebugUtilsLabelEXT</type>* <name>pLabelInfo</name></param>
@@ -9684,13 +13432,13 @@ typedef void <name>CAMetalLayer</name>;
<proto><type>VkResult</type> <name>vkGetMemoryHostPointerPropertiesEXT</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkExternalMemoryHandleTypeFlagBits</type> <name>handleType</name></param>
- <param optional="false">const <type>void</type>* <name>pHostPointer</name></param>
+ <param>const <type>void</type>* <name>pHostPointer</name></param>
<param><type>VkMemoryHostPointerPropertiesEXT</type>* <name>pMemoryHostPointerProperties</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdWriteBufferMarkerAMD</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param><type>VkPipelineStageFlagBits</type> <name>pipelineStage</name></param>
+ <param optional="true"><type>VkPipelineStageFlagBits</type> <name>pipelineStage</name></param>
<param><type>VkBuffer</type> <name>dstBuffer</name></param>
<param><type>VkDeviceSize</type> <name>dstOffset</name></param>
<param><type>uint32_t</type> <name>marker</name></param>
@@ -9703,21 +13451,21 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkRenderPass</type>* <name>pRenderPass</name></param>
</command>
<command name="vkCreateRenderPass2KHR" alias="vkCreateRenderPass2"/>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary" tasks="action,state,synchronization">
<proto><type>void</type> <name>vkCmdBeginRenderPass2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkRenderPassBeginInfo</type>* <name>pRenderPassBegin</name></param>
<param>const <type>VkSubpassBeginInfo</type>* <name>pSubpassBeginInfo</name></param>
</command>
<command name="vkCmdBeginRenderPass2KHR" alias="vkCmdBeginRenderPass2"/>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary" tasks="action,state,synchronization">
<proto><type>void</type> <name>vkCmdNextSubpass2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkSubpassBeginInfo</type>* <name>pSubpassBeginInfo</name></param>
<param>const <type>VkSubpassEndInfo</type>* <name>pSubpassEndInfo</name></param>
</command>
<command name="vkCmdNextSubpass2KHR" alias="vkCmdNextSubpass2"/>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary" tasks="action,state,synchronization">
<proto><type>void</type> <name>vkCmdEndRenderPass2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkSubpassEndInfo</type>* <name>pSubpassEndInfo</name></param>
@@ -9755,7 +13503,7 @@ typedef void <name>CAMetalLayer</name>;
<param>const <type>VkMemoryGetAndroidHardwareBufferInfoANDROID</type>* <name>pInfo</name></param>
<param>struct <type>AHardwareBuffer</type>** <name>pBuffer</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawIndirectCount</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
@@ -9767,7 +13515,7 @@ typedef void <name>CAMetalLayer</name>;
</command>
<command name="vkCmdDrawIndirectCountKHR" alias="vkCmdDrawIndirectCount"/>
<command name="vkCmdDrawIndirectCountAMD" alias="vkCmdDrawIndirectCount"/>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawIndexedIndirectCount</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
@@ -9779,7 +13527,7 @@ typedef void <name>CAMetalLayer</name>;
</command>
<command name="vkCmdDrawIndexedIndirectCountKHR" alias="vkCmdDrawIndexedIndirectCount"/>
<command name="vkCmdDrawIndexedIndirectCountAMD" alias="vkCmdDrawIndexedIndirectCount"/>
- <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdSetCheckpointNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param noautovalidity="true">const <type>void</type>* <name>pCheckpointMarker</name></param>
@@ -9790,7 +13538,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="false,true"><type>uint32_t</type>* <name>pCheckpointDataCount</name></param>
<param optional="true" len="pCheckpointDataCount"><type>VkCheckpointDataNV</type>* <name>pCheckpointData</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindTransformFeedbackBuffersEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstBinding</name></param>
@@ -9799,7 +13547,7 @@ typedef void <name>CAMetalLayer</name>;
<param len="bindingCount">const <type>VkDeviceSize</type>* <name>pOffsets</name></param>
<param optional="true" len="bindingCount" noautovalidity="true">const <type>VkDeviceSize</type>* <name>pSizes</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBeginTransformFeedbackEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstCounterBuffer</name></param>
@@ -9807,7 +13555,7 @@ typedef void <name>CAMetalLayer</name>;
<param noautovalidity="true" len="counterBufferCount">const <type>VkBuffer</type>* <name>pCounterBuffers</name></param>
<param optional="true" len="counterBufferCount">const <type>VkDeviceSize</type>* <name>pCounterBufferOffsets</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdEndTransformFeedbackEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstCounterBuffer</name></param>
@@ -9815,7 +13563,7 @@ typedef void <name>CAMetalLayer</name>;
<param noautovalidity="true" len="counterBufferCount">const <type>VkBuffer</type>* <name>pCounterBuffers</name></param>
<param optional="true" len="counterBufferCount">const <type>VkDeviceSize</type>* <name>pCounterBufferOffsets</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdBeginQueryIndexedEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
@@ -9823,14 +13571,14 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>VkQueryControlFlags</type> <name>flags</name></param>
<param><type>uint32_t</type> <name>index</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute,decode,encode" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action,state">
<proto><type>void</type> <name>vkCmdEndQueryIndexedEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>query</name></param>
<param><type>uint32_t</type> <name>index</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawIndirectByteCountEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>instanceCount</name></param>
@@ -9840,40 +13588,47 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>counterOffset</name></param>
<param><type>uint32_t</type> <name>vertexStride</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetExclusiveScissorNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstExclusiveScissor</name></param>
<param><type>uint32_t</type> <name>exclusiveScissorCount</name></param>
<param len="exclusiveScissorCount">const <type>VkRect2D</type>* <name>pExclusiveScissors</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetExclusiveScissorEnableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>firstExclusiveScissor</name></param>
+ <param><type>uint32_t</type> <name>exclusiveScissorCount</name></param>
+ <param len="exclusiveScissorCount">const <type>VkBool32</type>* <name>pExclusiveScissorEnables</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindShadingRateImageNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>VkImageView</type> <name>imageView</name></param>
<param><type>VkImageLayout</type> <name>imageLayout</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetViewportShadingRatePaletteNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstViewport</name></param>
<param><type>uint32_t</type> <name>viewportCount</name></param>
<param len="viewportCount">const <type>VkShadingRatePaletteNV</type>* <name>pShadingRatePalettes</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetCoarseSampleOrderNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkCoarseSampleOrderTypeNV</type> <name>sampleOrderType</name></param>
<param optional="true"><type>uint32_t</type> <name>customSampleOrderCount</name></param>
<param len="customSampleOrderCount">const <type>VkCoarseSampleOrderCustomNV</type>* <name>pCustomSampleOrders</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawMeshTasksNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>taskCount</name></param>
<param><type>uint32_t</type> <name>firstTask</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawMeshTasksIndirectNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
@@ -9881,7 +13636,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>drawCount</name></param>
<param><type>uint32_t</type> <name>stride</name></param>
</command>
- <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdDrawMeshTasksIndirectCountNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>buffer</name></param>
@@ -9891,6 +13646,31 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>maxDrawCount</name></param>
<param><type>uint32_t</type> <name>stride</name></param>
</command>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDrawMeshTasksEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>groupCountX</name></param>
+ <param><type>uint32_t</type> <name>groupCountY</name></param>
+ <param><type>uint32_t</type> <name>groupCountZ</name></param>
+ </command>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDrawMeshTasksIndirectEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBuffer</type> <name>buffer</name></param>
+ <param><type>VkDeviceSize</type> <name>offset</name></param>
+ <param><type>uint32_t</type> <name>drawCount</name></param>
+ <param><type>uint32_t</type> <name>stride</name></param>
+ </command>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDrawMeshTasksIndirectCountEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBuffer</type> <name>buffer</name></param>
+ <param><type>VkDeviceSize</type> <name>offset</name></param>
+ <param><type>VkBuffer</type> <name>countBuffer</name></param>
+ <param><type>VkDeviceSize</type> <name>countBufferOffset</name></param>
+ <param><type>uint32_t</type> <name>maxDrawCount</name></param>
+ <param><type>uint32_t</type> <name>stride</name></param>
+ </command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkCompileDeferredNV</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -9904,7 +13684,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param><type>VkAccelerationStructureNV</type>* <name>pAccelerationStructure</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdBindInvocationMaskHUAWEI</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>VkImageView</type> <name>imageView</name></param>
@@ -9934,14 +13714,14 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>bindInfoCount</name></param>
<param len="bindInfoCount">const <type>VkBindAccelerationStructureMemoryInfoNV</type>* <name>pBindInfos</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyAccelerationStructureNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkAccelerationStructureNV</type> <name>dst</name></param>
<param><type>VkAccelerationStructureNV</type> <name>src</name></param>
<param><type>VkCopyAccelerationStructureModeKHR</type> <name>mode</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyAccelerationStructureKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkCopyAccelerationStructureInfoKHR</type>* <name>pInfo</name></param>
@@ -9952,7 +13732,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
<param>const <type>VkCopyAccelerationStructureInfoKHR</type>* <name>pInfo</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyAccelerationStructureToMemoryKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkCopyAccelerationStructureToMemoryInfoKHR</type>* <name>pInfo</name></param>
@@ -9963,7 +13743,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
<param>const <type>VkCopyAccelerationStructureToMemoryInfoKHR</type>* <name>pInfo</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCopyMemoryToAccelerationStructureKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkCopyMemoryToAccelerationStructureInfoKHR</type>* <name>pInfo</name></param>
@@ -9974,7 +13754,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
<param>const <type>VkCopyMemoryToAccelerationStructureInfoKHR</type>* <name>pInfo</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdWriteAccelerationStructuresPropertiesKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>accelerationStructureCount</name></param>
@@ -9983,7 +13763,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>firstQuery</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdWriteAccelerationStructuresPropertiesNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>accelerationStructureCount</name></param>
@@ -9992,7 +13772,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>firstQuery</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdBuildAccelerationStructureNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkAccelerationStructureInfoNV</type>* <name>pInfo</name></param>
@@ -10014,7 +13794,7 @@ typedef void <name>CAMetalLayer</name>;
<param len="dataSize"><type>void</type>* <name>pData</name></param>
<param><type>size_t</type> <name>stride</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdTraceRaysKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkStridedDeviceAddressRegionKHR</type>* <name>pRaygenShaderBindingTable</name></param>
@@ -10025,7 +13805,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>height</name></param>
<param><type>uint32_t</type> <name>depth</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdTraceRaysNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBuffer</type> <name>raygenShaderBindingTableBuffer</name></param>
@@ -10069,7 +13849,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>size_t</type> <name>dataSize</name></param>
<param len="dataSize"><type>void</type>* <name>pData</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_SHADER_NV">
+ <command api="vulkan" successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_SHADER_NV">
<proto><type>VkResult</type> <name>vkCreateRayTracingPipelinesNV</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param optional="true"><type>VkPipelineCache</type> <name>pipelineCache</name></param>
@@ -10078,7 +13858,16 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS">
+ <command api="vulkansc" successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_NO_PIPELINE_MATCH,VK_ERROR_OUT_OF_POOL_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateRayTracingPipelinesNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkPipelineCache</type> <name>pipelineCache</name></param>
+ <param><type>uint32_t</type> <name>createInfoCount</name></param>
+ <param len="createInfoCount">const <type>VkRayTracingPipelineCreateInfoNV</type>* <name>pCreateInfos</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
+ </command>
+ <command api="vulkan" successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS">
<proto><type>VkResult</type> <name>vkCreateRayTracingPipelinesKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
@@ -10088,13 +13877,23 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
</command>
+ <command api="vulkansc" successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS,VK_ERROR_NO_PIPELINE_MATCH,VK_ERROR_OUT_OF_POOL_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateRayTracingPipelinesKHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
+ <param><type>VkPipelineCache</type> <name>pipelineCache</name></param>
+ <param><type>uint32_t</type> <name>createInfoCount</name></param>
+ <param len="createInfoCount">const <type>VkRayTracingPipelineCreateInfoKHR</type>* <name>pCreateInfos</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
+ </command>
<command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkGetPhysicalDeviceCooperativeMatrixPropertiesNV</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pPropertyCount</name></param>
<param optional="true" len="pPropertyCount"><type>VkCooperativeMatrixPropertiesNV</type>* <name>pProperties</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdTraceRaysIndirectKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkStridedDeviceAddressRegionKHR</type>* <name>pRaygenShaderBindingTable</name></param>
@@ -10103,6 +13902,11 @@ typedef void <name>CAMetalLayer</name>;
<param>const <type>VkStridedDeviceAddressRegionKHR</type>* <name>pCallableShaderBindingTable</name></param>
<param><type>VkDeviceAddress</type> <name>indirectDeviceAddress</name></param>
</command>
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdTraceRaysIndirect2KHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>indirectDeviceAddress</name></param>
+ </command>
<command>
<proto><type>void</type> <name>vkGetDeviceAccelerationStructureCompatibilityKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -10116,7 +13920,7 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>group</name></param>
<param><type>VkShaderGroupShaderKHR</type> <name>groupShader</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetRayTracingPipelineStackSizeKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>pipelineStackSize</name></param>
@@ -10219,17 +14023,17 @@ typedef void <name>CAMetalLayer</name>;
<proto><type>void</type> <name>vkUninitializePerformanceApiINTEL</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
</command>
- <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_OUT_OF_HOST_MEMORY" tasks="action,state">
<proto><type>VkResult</type> <name>vkCmdSetPerformanceMarkerINTEL</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkPerformanceMarkerInfoINTEL</type>* <name>pMarkerInfo</name></param>
</command>
- <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_OUT_OF_HOST_MEMORY" tasks="action,state">
<proto><type>VkResult</type> <name>vkCmdSetPerformanceStreamMarkerINTEL</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkPerformanceStreamMarkerInfoINTEL</type>* <name>pMarkerInfo</name></param>
</command>
- <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <command queues="graphics,compute,transfer" renderpass="both" cmdbufferlevel="primary,secondary" successcodes="VK_SUCCESS" errorcodes="VK_ERROR_TOO_MANY_OBJECTS,VK_ERROR_OUT_OF_HOST_MEMORY" tasks="state">
<proto><type>VkResult</type> <name>vkCmdSetPerformanceOverrideINTEL</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkPerformanceOverrideInfoINTEL</type>* <name>pOverrideInfo</name></param>
@@ -10283,18 +14087,28 @@ typedef void <name>CAMetalLayer</name>;
<param optional="false,true"><type>uint32_t</type>* <name>pInternalRepresentationCount</name></param>
<param optional="true" len="pInternalRepresentationCount"><type>VkPipelineExecutableInternalRepresentationKHR</type>* <name>pInternalRepresentations</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetLineStippleEXT</name></proto>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetLineStippleKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>lineStippleFactor</name></param>
<param><type>uint16_t</type> <name>lineStipplePattern</name></param>
</command>
+ <command name="vkCmdSetLineStippleEXT" alias="vkCmdSetLineStippleKHR"/>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetFaultData</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkFaultQueryBehavior</type> <name>faultQueryBehavior</name></param>
+ <param><type>VkBool32</type>* <name>pUnrecordedFaults</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pFaultCount</name></param>
+ <param optional="true" len="pFaultCount"><type>VkFaultData</type>* <name>pFaults</name></param>
+ </command>
<command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
- <proto><type>VkResult</type> <name>vkGetPhysicalDeviceToolPropertiesEXT</name></proto>
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceToolProperties</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pToolCount</name></param>
- <param optional="true" len="pToolCount"><type>VkPhysicalDeviceToolPropertiesEXT</type>* <name>pToolProperties</name></param>
+ <param optional="true" len="pToolCount"><type>VkPhysicalDeviceToolProperties</type>* <name>pToolProperties</name></param>
</command>
+ <command name="vkGetPhysicalDeviceToolPropertiesEXT" alias="vkGetPhysicalDeviceToolProperties"/>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR">
<proto><type>VkResult</type> <name>vkCreateAccelerationStructureKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -10302,14 +14116,14 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param><type>VkAccelerationStructureKHR</type>* <name>pAccelerationStructure</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdBuildAccelerationStructuresKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>infoCount</name></param>
<param len="infoCount">const <type>VkAccelerationStructureBuildGeometryInfoKHR</type>* <name>pInfos</name></param>
<param len="infoCount">const <type>VkAccelerationStructureBuildRangeInfoKHR</type>* const* <name>ppBuildRangeInfos</name></param>
</command>
- <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary">
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdBuildAccelerationStructuresIndirectKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>infoCount</name></param>
@@ -10358,35 +14172,59 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkDeferredOperationKHR</type> <name>operation</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetCullModeEXT</name></proto>
+ <command>
+ <proto><type>void</type> <name>vkGetPipelineIndirectMemoryRequirementsNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkComputePipelineCreateInfo</type>* <name>pCreateInfo</name></param>
+ <param><type>VkMemoryRequirements2</type>* <name>pMemoryRequirements</name></param>
+ </command>
+ <command>
+ <proto><type>VkDeviceAddress</type> <name>vkGetPipelineIndirectDeviceAddressNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkPipelineIndirectDeviceAddressInfoNV</type>* <name>pInfo</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCullMode</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>VkCullModeFlags</type> <name>cullMode</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetFrontFaceEXT</name></proto>
+ <command name="vkCmdSetCullModeEXT" alias="vkCmdSetCullMode"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetFrontFace</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkFrontFace</type> <name>frontFace</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetPrimitiveTopologyEXT</name></proto>
+ <command name="vkCmdSetFrontFaceEXT" alias="vkCmdSetFrontFace"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetPrimitiveTopology</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkPrimitiveTopology</type> <name>primitiveTopology</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetViewportWithCountEXT</name></proto>
+ <command name="vkCmdSetPrimitiveTopologyEXT" alias="vkCmdSetPrimitiveTopology"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetViewportWithCount</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>viewportCount</name></param>
<param len="viewportCount">const <type>VkViewport</type>* <name>pViewports</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetScissorWithCountEXT</name></proto>
+ <command name="vkCmdSetViewportWithCountEXT" alias="vkCmdSetViewportWithCount"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetScissorWithCount</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>scissorCount</name></param>
<param len="scissorCount">const <type>VkRect2D</type>* <name>pScissors</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdBindVertexBuffers2EXT</name></proto>
+ <command name="vkCmdSetScissorWithCountEXT" alias="vkCmdSetScissorWithCount"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindIndexBuffer2KHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param optional="true"><type>VkBuffer</type> <name>buffer</name></param>
+ <param><type>VkDeviceSize</type> <name>offset</name></param>
+ <param><type>VkDeviceSize</type> <name>size</name></param>
+ <param><type>VkIndexType</type> <name>indexType</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindVertexBuffers2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>firstBinding</name></param>
<param><type>uint32_t</type> <name>bindingCount</name></param>
@@ -10395,33 +14233,39 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" len="bindingCount">const <type>VkDeviceSize</type>* <name>pSizes</name></param>
<param optional="true" len="bindingCount">const <type>VkDeviceSize</type>* <name>pStrides</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetDepthTestEnableEXT</name></proto>
+ <command name="vkCmdBindVertexBuffers2EXT" alias="vkCmdBindVertexBuffers2"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthTestEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>depthTestEnable</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetDepthWriteEnableEXT</name></proto>
+ <command name="vkCmdSetDepthTestEnableEXT" alias="vkCmdSetDepthTestEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthWriteEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>depthWriteEnable</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetDepthCompareOpEXT</name></proto>
+ <command name="vkCmdSetDepthWriteEnableEXT" alias="vkCmdSetDepthWriteEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthCompareOp</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkCompareOp</type> <name>depthCompareOp</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetDepthBoundsTestEnableEXT</name></proto>
+ <command name="vkCmdSetDepthCompareOpEXT" alias="vkCmdSetDepthCompareOp"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthBoundsTestEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>depthBoundsTestEnable</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetStencilTestEnableEXT</name></proto>
+ <command name="vkCmdSetDepthBoundsTestEnableEXT" alias="vkCmdSetDepthBoundsTestEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetStencilTestEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>stencilTestEnable</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetStencilOpEXT</name></proto>
+ <command name="vkCmdSetStencilTestEnableEXT" alias="vkCmdSetStencilTestEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetStencilOp</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkStencilFaceFlags</type> <name>faceMask</name></param>
<param><type>VkStencilOp</type> <name>failOp</name></param>
@@ -10429,91 +14273,283 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkStencilOp</type> <name>depthFailOp</name></param>
<param><type>VkCompareOp</type> <name>compareOp</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command name="vkCmdSetStencilOpEXT" alias="vkCmdSetStencilOp"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetPatchControlPointsEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>patchControlPoints</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetRasterizerDiscardEnableEXT</name></proto>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetRasterizerDiscardEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>rasterizerDiscardEnable</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetDepthBiasEnableEXT</name></proto>
+ <command name="vkCmdSetRasterizerDiscardEnableEXT" alias="vkCmdSetRasterizerDiscardEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthBiasEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>depthBiasEnable</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command name="vkCmdSetDepthBiasEnableEXT" alias="vkCmdSetDepthBiasEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetLogicOpEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkLogicOp</type> <name>logicOp</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetPrimitiveRestartEnableEXT</name></proto>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetPrimitiveRestartEnable</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkBool32</type> <name>primitiveRestartEnable</name></param>
</command>
+ <command name="vkCmdSetPrimitiveRestartEnableEXT" alias="vkCmdSetPrimitiveRestartEnable"/>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetTessellationDomainOriginEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkTessellationDomainOrigin</type> <name>domainOrigin</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthClampEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>depthClampEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetPolygonModeEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkPolygonMode</type> <name>polygonMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetRasterizationSamplesEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkSampleCountFlagBits</type> <name>rasterizationSamples</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetSampleMaskEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkSampleCountFlagBits</type> <name>samples</name></param>
+ <param len="latexmath:[\lceil{\mathit{samples} \over 32}\rceil]" altlen="(samples + 31) / 32">const <type>VkSampleMask</type>* <name>pSampleMask</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetAlphaToCoverageEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>alphaToCoverageEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetAlphaToOneEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>alphaToOneEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetLogicOpEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>logicOpEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetColorBlendEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>firstAttachment</name></param>
+ <param><type>uint32_t</type> <name>attachmentCount</name></param>
+ <param len="attachmentCount">const <type>VkBool32</type>* <name>pColorBlendEnables</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetColorBlendEquationEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>firstAttachment</name></param>
+ <param><type>uint32_t</type> <name>attachmentCount</name></param>
+ <param len="attachmentCount">const <type>VkColorBlendEquationEXT</type>* <name>pColorBlendEquations</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetColorWriteMaskEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>firstAttachment</name></param>
+ <param><type>uint32_t</type> <name>attachmentCount</name></param>
+ <param len="attachmentCount" optional="false,true">const <type>VkColorComponentFlags</type>* <name>pColorWriteMasks</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetRasterizationStreamEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>rasterizationStream</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetConservativeRasterizationModeEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkConservativeRasterizationModeEXT</type> <name>conservativeRasterizationMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetExtraPrimitiveOverestimationSizeEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>float</type> <name>extraPrimitiveOverestimationSize</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthClipEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>depthClipEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetSampleLocationsEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>sampleLocationsEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetColorBlendAdvancedEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>firstAttachment</name></param>
+ <param><type>uint32_t</type> <name>attachmentCount</name></param>
+ <param len="attachmentCount">const <type>VkColorBlendAdvancedEXT</type>* <name>pColorBlendAdvanced</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetProvokingVertexModeEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkProvokingVertexModeEXT</type> <name>provokingVertexMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetLineRasterizationModeEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkLineRasterizationModeEXT</type> <name>lineRasterizationMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetLineStippleEnableEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>stippledLineEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthClipNegativeOneToOneEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>negativeOneToOne</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetViewportWScalingEnableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>viewportWScalingEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetViewportSwizzleNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>firstViewport</name></param>
+ <param><type>uint32_t</type> <name>viewportCount</name></param>
+ <param len="viewportCount">const <type>VkViewportSwizzleNV</type>* <name>pViewportSwizzles</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCoverageToColorEnableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>coverageToColorEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCoverageToColorLocationNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>coverageToColorLocation</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCoverageModulationModeNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkCoverageModulationModeNV</type> <name>coverageModulationMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCoverageModulationTableEnableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>coverageModulationTableEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCoverageModulationTableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>coverageModulationTableCount</name></param>
+ <param len="coverageModulationTableCount">const <type>float</type>* <name>pCoverageModulationTable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetShadingRateImageEnableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>shadingRateImageEnable</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetCoverageReductionModeNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkCoverageReductionModeNV</type> <name>coverageReductionMode</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetRepresentativeFragmentTestEnableNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkBool32</type> <name>representativeFragmentTestEnable</name></param>
+ </command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
- <proto><type>VkResult</type> <name>vkCreatePrivateDataSlotEXT</name></proto>
+ <proto><type>VkResult</type> <name>vkCreatePrivateDataSlot</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param>const <type>VkPrivateDataSlotCreateInfoEXT</type>* <name>pCreateInfo</name></param>
+ <param>const <type>VkPrivateDataSlotCreateInfo</type>* <name>pCreateInfo</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
- <param><type>VkPrivateDataSlotEXT</type>* <name>pPrivateDataSlot</name></param>
+ <param><type>VkPrivateDataSlot</type>* <name>pPrivateDataSlot</name></param>
</command>
+ <command name="vkCreatePrivateDataSlotEXT" alias="vkCreatePrivateDataSlot"/>
<command>
- <proto><type>void</type> <name>vkDestroyPrivateDataSlotEXT</name></proto>
+ <proto><type>void</type> <name>vkDestroyPrivateDataSlot</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param optional="true" externsync="true"><type>VkPrivateDataSlotEXT</type> <name>privateDataSlot</name></param>
+ <param optional="true" externsync="true"><type>VkPrivateDataSlot</type> <name>privateDataSlot</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
+ <command name="vkDestroyPrivateDataSlotEXT" alias="vkDestroyPrivateDataSlot"/>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
- <proto><type>VkResult</type> <name>vkSetPrivateDataEXT</name></proto>
+ <proto><type>VkResult</type> <name>vkSetPrivateData</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkObjectType</type> <name>objectType</name></param>
<param objecttype="objectType"><type>uint64_t</type> <name>objectHandle</name></param>
- <param><type>VkPrivateDataSlotEXT</type> <name>privateDataSlot</name></param>
+ <param><type>VkPrivateDataSlot</type> <name>privateDataSlot</name></param>
<param><type>uint64_t</type> <name>data</name></param>
</command>
+ <command name="vkSetPrivateDataEXT" alias="vkSetPrivateData"/>
<command>
- <proto><type>void</type> <name>vkGetPrivateDataEXT</name></proto>
+ <proto><type>void</type> <name>vkGetPrivateData</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkObjectType</type> <name>objectType</name></param>
<param objecttype="objectType"><type>uint64_t</type> <name>objectHandle</name></param>
- <param><type>VkPrivateDataSlotEXT</type> <name>privateDataSlot</name></param>
+ <param><type>VkPrivateDataSlot</type> <name>privateDataSlot</name></param>
<param><type>uint64_t</type>* <name>pData</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdCopyBuffer2KHR</name></proto>
+ <command name="vkGetPrivateDataEXT" alias="vkGetPrivateData"/>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyBuffer2</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkCopyBufferInfo2</type>* <name>pCopyBufferInfo</name></param>
+ </command>
+ <command name="vkCmdCopyBuffer2KHR" alias="vkCmdCopyBuffer2"/>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyImage2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkCopyBufferInfo2KHR</type>* <name>pCopyBufferInfo</name></param>
+ <param>const <type>VkCopyImageInfo2</type>* <name>pCopyImageInfo</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdCopyImage2KHR</name></proto>
+ <command name="vkCmdCopyImage2KHR" alias="vkCmdCopyImage2"/>
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdBlitImage2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkCopyImageInfo2KHR</type>* <name>pCopyImageInfo</name></param>
+ <param>const <type>VkBlitImageInfo2</type>* <name>pBlitImageInfo</name></param>
</command>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdBlitImage2KHR</name></proto>
+ <command name="vkCmdBlitImage2KHR" alias="vkCmdBlitImage2"/>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyBufferToImage2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkBlitImageInfo2KHR</type>* <name>pBlitImageInfo</name></param>
+ <param>const <type>VkCopyBufferToImageInfo2</type>* <name>pCopyBufferToImageInfo</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdCopyBufferToImage2KHR</name></proto>
+ <command name="vkCmdCopyBufferToImage2KHR" alias="vkCmdCopyBufferToImage2"/>
+ <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyImageToBuffer2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkCopyBufferToImageInfo2KHR</type>* <name>pCopyBufferToImageInfo</name></param>
+ <param>const <type>VkCopyImageToBufferInfo2</type>* <name>pCopyImageToBufferInfo</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdCopyImageToBuffer2KHR</name></proto>
+ <command name="vkCmdCopyImageToBuffer2KHR" alias="vkCmdCopyImageToBuffer2"/>
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdResolveImage2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkCopyImageToBufferInfo2KHR</type>* <name>pCopyImageToBufferInfo</name></param>
+ <param>const <type>VkResolveImageInfo2</type>* <name>pResolveImageInfo</name></param>
</command>
- <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdResolveImage2KHR</name></proto>
+ <command name="vkCmdResolveImage2KHR" alias="vkCmdResolveImage2"/>
+ <command queues="graphics,compute,transfer" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdRefreshObjectsKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkResolveImageInfo2KHR</type>* <name>pResolveImageInfo</name></param>
+ <param>const <type>VkRefreshObjectListKHR</type>* <name>pRefreshObjects</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceRefreshableObjectTypesKHR</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pRefreshableObjectTypeCount</name></param>
+ <param optional="true" len="pRefreshableObjectTypeCount"><type>VkObjectType</type>* <name>pRefreshableObjectTypes</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetFragmentShadingRateKHR</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkExtent2D</type>* <name>pFragmentSize</name></param>
@@ -10525,7 +14561,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="false,true"><type>uint32_t</type>* <name>pFragmentShadingRateCount</name></param>
<param optional="true" len="pFragmentShadingRateCount"><type>VkPhysicalDeviceFragmentShadingRateKHR</type>* <name>pFragmentShadingRates</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetFragmentShadingRateEnumNV</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkFragmentShadingRateNV</type> <name>shadingRate</name></param>
@@ -10539,7 +14575,7 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true" len="pBuildInfo-&gt;geometryCount">const <type>uint32_t</type>* <name>pMaxPrimitiveCounts</name></param>
<param><type>VkAccelerationStructureBuildSizesInfoKHR</type>* <name>pSizeInfo</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetVertexInputEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param optional="true"><type>uint32_t</type> <name>vertexBindingDescriptionCount</name></param>
@@ -10547,54 +14583,60 @@ typedef void <name>CAMetalLayer</name>;
<param optional="true"><type>uint32_t</type> <name>vertexAttributeDescriptionCount</name></param>
<param len="vertexAttributeDescriptionCount">const <type>VkVertexInputAttributeDescription2EXT</type>* <name>pVertexAttributeDescriptions</name></param>
</command>
- <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
<proto><type>void</type> <name>vkCmdSetColorWriteEnableEXT</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>attachmentCount</name></param>
<param len="attachmentCount">const <type>VkBool32</type>* <name>pColorWriteEnables</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdSetEvent2KHR</name></proto>
+ <command queues="graphics,compute,decode,encode" renderpass="outside" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
+ <proto><type>void</type> <name>vkCmdSetEvent2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkEvent</type> <name>event</name></param>
- <param>const <type>VkDependencyInfoKHR</type>* <name>pDependencyInfo</name></param>
+ <param>const <type>VkDependencyInfo</type>* <name>pDependencyInfo</name></param>
</command>
- <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdResetEvent2KHR</name></proto>
+ <command name="vkCmdSetEvent2KHR" alias="vkCmdSetEvent2"/>
+ <command queues="graphics,compute,decode,encode" renderpass="outside" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
+ <proto><type>void</type> <name>vkCmdResetEvent2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>VkEvent</type> <name>event</name></param>
- <param><type>VkPipelineStageFlags2KHR</type> <name>stageMask</name></param>
+ <param optional="true"><type>VkPipelineStageFlags2</type> <name>stageMask</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdWaitEvents2KHR</name></proto>
+ <command name="vkCmdResetEvent2KHR" alias="vkCmdResetEvent2"/>
+ <command queues="graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
+ <proto><type>void</type> <name>vkCmdWaitEvents2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param><type>uint32_t</type> <name>eventCount</name></param>
<param len="eventCount">const <type>VkEvent</type>* <name>pEvents</name></param>
- <param len="eventCount">const <type>VkDependencyInfoKHR</type>* <name>pDependencyInfos</name></param>
+ <param len="eventCount">const <type>VkDependencyInfo</type>* <name>pDependencyInfos</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdPipelineBarrier2KHR</name></proto>
+ <command name="vkCmdWaitEvents2KHR" alias="vkCmdWaitEvents2"/>
+ <command queues="transfer,graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="synchronization">
+ <proto><type>void</type> <name>vkCmdPipelineBarrier2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkDependencyInfoKHR</type>* <name>pDependencyInfo</name></param>
+ <param>const <type>VkDependencyInfo</type>* <name>pDependencyInfo</name></param>
</command>
+ <command name="vkCmdPipelineBarrier2KHR" alias="vkCmdPipelineBarrier2"/>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_DEVICE_LOST">
- <proto><type>VkResult</type> <name>vkQueueSubmit2KHR</name></proto>
+ <proto><type>VkResult</type> <name>vkQueueSubmit2</name></proto>
<param externsync="true"><type>VkQueue</type> <name>queue</name></param>
<param optional="true"><type>uint32_t</type> <name>submitCount</name></param>
- <param len="submitCount">const <type>VkSubmitInfo2KHR</type>* <name>pSubmits</name></param>
+ <param len="submitCount">const <type>VkSubmitInfo2</type>* <name>pSubmits</name></param>
<param optional="true" externsync="true"><type>VkFence</type> <name>fence</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
- <proto><type>void</type> <name>vkCmdWriteTimestamp2KHR</name></proto>
+ <command name="vkQueueSubmit2KHR" alias="vkQueueSubmit2"/>
+ <command queues="transfer,graphics,compute,decode,encode" renderpass="both" videocoding="both" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdWriteTimestamp2</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param><type>VkPipelineStageFlags2KHR</type> <name>stage</name></param>
+ <param optional="true"><type>VkPipelineStageFlags2</type> <name>stage</name></param>
<param><type>VkQueryPool</type> <name>queryPool</name></param>
<param><type>uint32_t</type> <name>query</name></param>
</command>
- <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command name="vkCmdWriteTimestamp2KHR" alias="vkCmdWriteTimestamp2"/>
+ <command queues="transfer,graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdWriteBufferMarker2AMD</name></proto>
<param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param><type>VkPipelineStageFlags2KHR</type> <name>stage</name></param>
+ <param optional="true"><type>VkPipelineStageFlags2</type> <name>stage</name></param>
<param><type>VkBuffer</type> <name>dstBuffer</name></param>
<param><type>VkDeviceSize</type> <name>dstOffset</name></param>
<param><type>uint32_t</type> <name>marker</name></param>
@@ -10605,20 +14647,54 @@ typedef void <name>CAMetalLayer</name>;
<param optional="false,true"><type>uint32_t</type>* <name>pCheckpointDataCount</name></param>
<param optional="true" len="pCheckpointDataCount"><type>VkCheckpointData2NV</type>* <name>pCheckpointData</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_EXTENSION_NOT_PRESENT,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_FEATURE_NOT_PRESENT,VK_ERROR_FORMAT_NOT_SUPPORTED">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_MEMORY_MAP_FAILED">
+ <proto><type>VkResult</type> <name>vkCopyMemoryToImageEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkCopyMemoryToImageInfoEXT</type>* <name>pCopyMemoryToImageInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_MEMORY_MAP_FAILED">
+ <proto><type>VkResult</type> <name>vkCopyImageToMemoryEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkCopyImageToMemoryInfoEXT</type>* <name>pCopyImageToMemoryInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_MEMORY_MAP_FAILED">
+ <proto><type>VkResult</type> <name>vkCopyImageToImageEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkCopyImageToImageInfoEXT</type>* <name>pCopyImageToImageInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_MEMORY_MAP_FAILED">
+ <proto><type>VkResult</type> <name>vkTransitionImageLayoutEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>uint32_t</type> <name>transitionCount</name></param>
+ <param len="transitionCount">const <type>VkHostImageLayoutTransitionInfoEXT</type>* <name>pTransitions</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetCommandPoolMemoryConsumption</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param externsync="true"><type>VkCommandPool</type> <name>commandPool</name></param>
+ <param optional="true" externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkCommandPoolMemoryConsumption</type>* <name>pConsumption</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR">
<proto><type>VkResult</type> <name>vkGetPhysicalDeviceVideoCapabilitiesKHR</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
- <param>const <type>VkVideoProfileKHR</type>* <name>pVideoProfile</name></param>
+ <param>const <type>VkVideoProfileInfoKHR</type>* <name>pVideoProfile</name></param>
<param><type>VkVideoCapabilitiesKHR</type>* <name>pCapabilities</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_EXTENSION_NOT_PRESENT,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_FORMAT_NOT_SUPPORTED">
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR">
<proto><type>VkResult</type> <name>vkGetPhysicalDeviceVideoFormatPropertiesKHR</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
<param>const <type>VkPhysicalDeviceVideoFormatInfoKHR</type>* <name>pVideoFormatInfo</name></param>
<param optional="false,true"><type>uint32_t</type>* <name>pVideoFormatPropertyCount</name></param>
<param optional="true" len="pVideoFormatPropertyCount"><type>VkVideoFormatPropertiesKHR</type>* <name>pVideoFormatProperties</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_INCOMPATIBLE_DRIVER,VK_ERROR_FEATURE_NOT_PRESENT">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR,VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param>const <type>VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR</type>* <name>pQualityLevelInfo</name></param>
+ <param><type>VkVideoEncodeQualityLevelPropertiesKHR</type>* <name>pQualityLevelProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR,VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR">
<proto><type>VkResult</type> <name>vkCreateVideoSessionKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkVideoSessionCreateInfoKHR</type>* <name>pCreateInfo</name></param>
@@ -10628,67 +14704,88 @@ typedef void <name>CAMetalLayer</name>;
<command>
<proto><type>void</type> <name>vkDestroyVideoSessionKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param><type>VkVideoSessionKHR</type> <name>videoSession</name></param>
+ <param optional="true" externsync="true"><type>VkVideoSessionKHR</type> <name>videoSession</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_TOO_MANY_OBJECTS">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR">
<proto><type>VkResult</type> <name>vkCreateVideoSessionParametersKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param>const <type>VkVideoSessionParametersCreateInfoKHR</type>* <name>pCreateInfo</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
<param><type>VkVideoSessionParametersKHR</type>* <name>pVideoSessionParameters</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_TOO_MANY_OBJECTS">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR">
<proto><type>VkResult</type> <name>vkUpdateVideoSessionParametersKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkVideoSessionParametersKHR</type> <name>videoSessionParameters</name></param>
<param>const <type>VkVideoSessionParametersUpdateInfoKHR</type>* <name>pUpdateInfo</name></param>
</command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetEncodedVideoSessionParametersKHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkVideoEncodeSessionParametersGetInfoKHR</type>* <name>pVideoSessionParametersInfo</name></param>
+ <param optional="true"><type>VkVideoEncodeSessionParametersFeedbackInfoKHR</type>* <name>pFeedbackInfo</name></param>
+ <param optional="false,true"><type>size_t</type>* <name>pDataSize</name></param>
+ <param optional="true" len="pDataSize"><type>void</type>* <name>pData</name></param>
+ </command>
<command>
<proto><type>void</type> <name>vkDestroyVideoSessionParametersKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param><type>VkVideoSessionParametersKHR</type> <name>videoSessionParameters</name></param>
+ <param optional="true" externsync="true"><type>VkVideoSessionParametersKHR</type> <name>videoSessionParameters</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_INITIALIZATION_FAILED">
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE">
<proto><type>VkResult</type> <name>vkGetVideoSessionMemoryRequirementsKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param><type>VkVideoSessionKHR</type> <name>videoSession</name></param>
- <param optional="false,true"><type>uint32_t</type>* <name>pVideoSessionMemoryRequirementsCount</name></param>
- <param optional="true" len="pVideoSessionMemoryRequirementsCount"><type>VkVideoGetMemoryPropertiesKHR</type>* <name>pVideoSessionMemoryRequirements</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pMemoryRequirementsCount</name></param>
+ <param optional="true" len="pMemoryRequirementsCount"><type>VkVideoSessionMemoryRequirementsKHR</type>* <name>pMemoryRequirements</name></param>
</command>
- <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
<proto><type>VkResult</type> <name>vkBindVideoSessionMemoryKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
- <param><type>VkVideoSessionKHR</type> <name>videoSession</name></param>
- <param><type>uint32_t</type> <name>videoSessionBindMemoryCount</name></param>
- <param len="videoSessionBindMemoryCount">const <type>VkVideoBindMemoryKHR</type>* <name>pVideoSessionBindMemories</name></param>
+ <param externsync="true"><type>VkVideoSessionKHR</type> <name>videoSession</name></param>
+ <param><type>uint32_t</type> <name>bindSessionMemoryInfoCount</name></param>
+ <param len="bindSessionMemoryInfoCount">const <type>VkBindVideoSessionMemoryInfoKHR</type>* <name>pBindSessionMemoryInfos</name></param>
</command>
- <command queues="decode" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="decode" renderpass="outside" videocoding="inside" cmdbufferlevel="primary" tasks="action">
<proto><type>void</type> <name>vkCmdDecodeVideoKHR</name></proto>
- <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
- <param>const <type>VkVideoDecodeInfoKHR</type>* <name>pFrameInfo</name></param>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkVideoDecodeInfoKHR</type>* <name>pDecodeInfo</name></param>
</command>
- <command queues="decode,encode" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="decode,encode" renderpass="outside" videocoding="outside" cmdbufferlevel="primary" tasks="action,state">
<proto><type>void</type> <name>vkCmdBeginVideoCodingKHR</name></proto>
- <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkVideoBeginCodingInfoKHR</type>* <name>pBeginInfo</name></param>
</command>
- <command queues="decode,encode" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="decode,encode" renderpass="outside" videocoding="inside" cmdbufferlevel="primary" tasks="action">
<proto><type>void</type> <name>vkCmdControlVideoCodingKHR</name></proto>
- <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkVideoCodingControlInfoKHR</type>* <name>pCodingControlInfo</name></param>
</command>
- <command queues="decode,encode" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="decode,encode" renderpass="outside" videocoding="inside" cmdbufferlevel="primary" tasks="action,state">
<proto><type>void</type> <name>vkCmdEndVideoCodingKHR</name></proto>
- <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkVideoEndCodingInfoKHR</type>* <name>pEndCodingInfo</name></param>
</command>
- <command queues="encode" renderpass="outside" cmdbufferlevel="primary">
+ <command queues="encode" renderpass="outside" videocoding="inside" cmdbufferlevel="primary" tasks="action">
<proto><type>void</type> <name>vkCmdEncodeVideoKHR</name></proto>
- <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkVideoEncodeInfoKHR</type>* <name>pEncodeInfo</name></param>
</command>
+ <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDecompressMemoryNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>decompressRegionCount</name></param>
+ <param len="decompressRegionCount">const <type>VkDecompressMemoryRegionNV</type>* <name>pDecompressMemoryRegions</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdDecompressMemoryIndirectCountNV</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>indirectCommandsAddress</name></param>
+ <param><type>VkDeviceAddress</type> <name>indirectCommandsCountAddress</name></param>
+ <param><type>uint32_t</type> <name>stride</name></param>
+ </command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
<proto><type>VkResult</type> <name>vkCreateCuModuleNVX</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
@@ -10715,11 +14812,90 @@ typedef void <name>CAMetalLayer</name>;
<param><type>VkCuFunctionNVX</type> <name>function</name></param>
<param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
</command>
- <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary">
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
<proto><type>void</type> <name>vkCmdCuLaunchKernelNVX</name></proto>
<param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
<param>const <type>VkCuLaunchInfoNVX</type>* <name>pLaunchInfo</name></param>
</command>
+ <command>
+ <proto><type>void</type> <name>vkGetDescriptorSetLayoutSizeEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkDescriptorSetLayout</type> <name>layout</name></param>
+ <param><type>VkDeviceSize</type>* <name>pLayoutSizeInBytes</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetDescriptorSetLayoutBindingOffsetEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkDescriptorSetLayout</type> <name>layout</name></param>
+ <param><type>uint32_t</type> <name>binding</name></param>
+ <param><type>VkDeviceSize</type>* <name>pOffset</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetDescriptorEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkDescriptorGetInfoEXT</type>* <name>pDescriptorInfo</name></param>
+ <param><type>size_t</type> <name>dataSize</name></param>
+ <param len="dataSize"><type>void</type>* <name>pDescriptor</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindDescriptorBuffersEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>bufferCount</name></param>
+ <param len="bufferCount">const <type>VkDescriptorBufferBindingInfoEXT</type>* <name>pBindingInfos</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDescriptorBufferOffsetsEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
+ <param><type>VkPipelineLayout</type> <name>layout</name></param>
+ <param><type>uint32_t</type> <name>firstSet</name></param>
+ <param><type>uint32_t</type> <name>setCount</name></param>
+ <param len="setCount">const <type>uint32_t</type>* <name>pBufferIndices</name></param>
+ <param len="setCount">const <type>VkDeviceSize</type>* <name>pOffsets</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindDescriptorBufferEmbeddedSamplersEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkPipelineBindPoint</type> <name>pipelineBindPoint</name></param>
+ <param><type>VkPipelineLayout</type> <name>layout</name></param>
+ <param><type>uint32_t</type> <name>set</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetBufferOpaqueCaptureDescriptorDataEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkBufferCaptureDescriptorDataInfoEXT</type>* <name>pInfo</name></param>
+ <param><type>void</type>* <name>pData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetImageOpaqueCaptureDescriptorDataEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkImageCaptureDescriptorDataInfoEXT</type>* <name>pInfo</name></param>
+ <param><type>void</type>* <name>pData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetImageViewOpaqueCaptureDescriptorDataEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkImageViewCaptureDescriptorDataInfoEXT</type>* <name>pInfo</name></param>
+ <param><type>void</type>* <name>pData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetSamplerOpaqueCaptureDescriptorDataEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkSamplerCaptureDescriptorDataInfoEXT</type>* <name>pInfo</name></param>
+ <param><type>void</type>* <name>pData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkAccelerationStructureCaptureDescriptorDataInfoEXT</type>* <name>pInfo</name></param>
+ <param><type>void</type>* <name>pData</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkSetDeviceMemoryPriorityEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkDeviceMemory</type> <name>memory</name></param>
+ <param><type>float</type> <name>priority</name></param>
+ </command>
<command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED">
<proto><type>VkResult</type> <name>vkAcquireDrmDisplayEXT</name></proto>
<param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
@@ -10733,16 +14909,465 @@ typedef void <name>CAMetalLayer</name>;
<param><type>uint32_t</type> <name>connectorId</name></param>
<param><type>VkDisplayKHR</type>* <name>display</name></param>
</command>
- <command successcodes="VK_SUCCESS,VK_TIMEOUT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_DEVICE_LOST">
+ <command successcodes="VK_SUCCESS,VK_TIMEOUT,VK_SUBOPTIMAL_KHR" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_DEVICE_LOST,VK_ERROR_OUT_OF_DATE_KHR,VK_ERROR_SURFACE_LOST_KHR,VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT">
<proto><type>VkResult</type> <name>vkWaitForPresentKHR</name></proto>
<param><type>VkDevice</type> <name>device</name></param>
<param externsync="true"><type>VkSwapchainKHR</type> <name>swapchain</name></param>
<param><type>uint64_t</type> <name>presentId</name></param>
<param><type>uint64_t</type> <name>timeout</name></param>
</command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INVALID_EXTERNAL_HANDLE,VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkCreateBufferCollectionFUCHSIA</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkBufferCollectionCreateInfoFUCHSIA</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkBufferCollectionFUCHSIA</type>* <name>pCollection</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_FORMAT_NOT_SUPPORTED">
+ <proto><type>VkResult</type> <name>vkSetBufferCollectionBufferConstraintsFUCHSIA</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></param>
+ <param>const <type>VkBufferConstraintsInfoFUCHSIA</type>* <name>pBufferConstraintsInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_FORMAT_NOT_SUPPORTED">
+ <proto><type>VkResult</type> <name>vkSetBufferCollectionImageConstraintsFUCHSIA</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></param>
+ <param>const <type>VkImageConstraintsInfoFUCHSIA</type>* <name>pImageConstraintsInfo</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroyBufferCollectionFUCHSIA</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkGetBufferCollectionPropertiesFUCHSIA</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkBufferCollectionFUCHSIA</type> <name>collection</name></param>
+ <param><type>VkBufferCollectionPropertiesFUCHSIA</type>* <name>pProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateCudaModuleNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkCudaModuleCreateInfoNV</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkCudaModuleNV</type>* <name>pModule</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkGetCudaModuleCacheNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkCudaModuleNV</type> <name>module</name></param>
+ <param optional="false,true"><type>size_t</type>* <name>pCacheSize</name></param>
+ <param optional="true" len="pCacheSize"><type>void</type>* <name>pCacheData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateCudaFunctionNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkCudaFunctionCreateInfoNV</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkCudaFunctionNV</type>* <name>pFunction</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroyCudaModuleNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkCudaModuleNV</type> <name>module</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroyCudaFunctionNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkCudaFunctionNV</type> <name>function</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCudaLaunchKernelNV</name></proto>
+ <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkCudaLaunchInfoNV</type>* <name>pLaunchInfo</name></param>
+ </command>
+ <command queues="graphics" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action,state">
+ <proto><type>void</type> <name>vkCmdBeginRendering</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkRenderingInfo</type>* <name>pRenderingInfo</name></param>
+ </command>
+ <command name="vkCmdBeginRenderingKHR" alias="vkCmdBeginRendering"/>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="action,state">
+ <proto><type>void</type> <name>vkCmdEndRendering</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ </command>
+
+ <command name="vkCmdEndRenderingKHR" alias="vkCmdEndRendering"/>
+ <command>
+ <proto><type>void</type> <name>vkGetDescriptorSetLayoutHostMappingInfoVALVE</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkDescriptorSetBindingReferenceVALVE</type>* <name>pBindingReference</name></param>
+ <param><type>VkDescriptorSetLayoutHostMappingInfoVALVE</type>* <name>pHostMapping</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetDescriptorSetHostMappingVALVE</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkDescriptorSet</type> <name>descriptorSet</name></param>
+ <param><type>void</type>** <name>ppData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR">
+ <proto><type>VkResult</type> <name>vkCreateMicromapEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkMicromapCreateInfoEXT</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkMicromapEXT</type>* <name>pMicromap</name></param>
+ </command>
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdBuildMicromapsEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>infoCount</name></param>
+ <param len="infoCount">const <type>VkMicromapBuildInfoEXT</type>* <name>pInfos</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkBuildMicromapsEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
+ <param><type>uint32_t</type> <name>infoCount</name></param>
+ <param len="infoCount">const <type>VkMicromapBuildInfoEXT</type>* <name>pInfos</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroyMicromapEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true" externsync="true"><type>VkMicromapEXT</type> <name>micromap</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyMicromapEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkCopyMicromapInfoEXT</type>* <name>pInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkCopyMicromapEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
+ <param>const <type>VkCopyMicromapInfoEXT</type>* <name>pInfo</name></param>
+ </command>
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyMicromapToMemoryEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkCopyMicromapToMemoryInfoEXT</type>* <name>pInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkCopyMicromapToMemoryEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
+ <param>const <type>VkCopyMicromapToMemoryInfoEXT</type>* <name>pInfo</name></param>
+ </command>
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdCopyMemoryToMicromapEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkCopyMemoryToMicromapInfoEXT</type>* <name>pInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_OPERATION_DEFERRED_KHR,VK_OPERATION_NOT_DEFERRED_KHR" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkCopyMemoryToMicromapEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true"><type>VkDeferredOperationKHR</type> <name>deferredOperation</name></param>
+ <param>const <type>VkCopyMemoryToMicromapInfoEXT</type>* <name>pInfo</name></param>
+ </command>
+ <command queues="compute" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdWriteMicromapsPropertiesEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>micromapCount</name></param>
+ <param len="micromapCount">const <type>VkMicromapEXT</type>* <name>pMicromaps</name></param>
+ <param><type>VkQueryType</type> <name>queryType</name></param>
+ <param><type>VkQueryPool</type> <name>queryPool</name></param>
+ <param><type>uint32_t</type> <name>firstQuery</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkWriteMicromapsPropertiesEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>uint32_t</type> <name>micromapCount</name></param>
+ <param len="micromapCount">const <type>VkMicromapEXT</type>* <name>pMicromaps</name></param>
+ <param><type>VkQueryType</type> <name>queryType</name></param>
+ <param><type>size_t</type> <name>dataSize</name></param>
+ <param len="dataSize"><type>void</type>* <name>pData</name></param>
+ <param><type>size_t</type> <name>stride</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetDeviceMicromapCompatibilityEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkMicromapVersionInfoEXT</type>* <name>pVersionInfo</name></param>
+ <param><type>VkAccelerationStructureCompatibilityKHR</type>* <name>pCompatibility</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetMicromapBuildSizesEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkAccelerationStructureBuildTypeKHR</type> <name>buildType</name></param>
+ <param>const <type>VkMicromapBuildInfoEXT</type>* <name>pBuildInfo</name></param>
+ <param><type>VkMicromapBuildSizesInfoEXT</type>* <name>pSizeInfo</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetShaderModuleIdentifierEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkShaderModule</type> <name>shaderModule</name></param>
+ <param><type>VkShaderModuleIdentifierEXT</type>* <name>pIdentifier</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetShaderModuleCreateInfoIdentifierEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkShaderModuleCreateInfo</type>* <name>pCreateInfo</name></param>
+ <param><type>VkShaderModuleIdentifierEXT</type>* <name>pIdentifier</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetImageSubresourceLayout2KHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkImage</type> <name>image</name></param>
+ <param>const <type>VkImageSubresource2KHR</type>* <name>pSubresource</name></param>
+ <param><type>VkSubresourceLayout2KHR</type>* <name>pLayout</name></param>
+ </command>
+ <command name="vkGetImageSubresourceLayout2EXT" alias="vkGetImageSubresourceLayout2KHR"/>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetPipelinePropertiesEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkPipelineInfoEXT</type>* <name>pPipelineInfo</name></param>
+ <param noautovalidity="true" validstructs="VkPipelinePropertiesIdentifierEXT"><type>VkBaseOutStructure</type>* <name>pPipelineProperties</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkExportMetalObjectsEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkExportMetalObjectsInfoEXT</type>* <name>pMetalObjectsInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE">
+ <proto><type>VkResult</type> <name>vkGetFramebufferTilePropertiesQCOM</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkFramebuffer</type> <name>framebuffer</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pPropertiesCount</name></param>
+ <param optional="true" len="pPropertiesCount"><type>VkTilePropertiesQCOM</type>* <name>pProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS">
+ <proto><type>VkResult</type> <name>vkGetDynamicRenderingTilePropertiesQCOM</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkRenderingInfo</type>* <name>pRenderingInfo</name></param>
+ <param><type>VkTilePropertiesQCOM</type>* <name>pProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_EXTENSION_NOT_PRESENT,VK_ERROR_INITIALIZATION_FAILED,VK_ERROR_FORMAT_NOT_SUPPORTED">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceOpticalFlowImageFormatsNV</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param>const <type>VkOpticalFlowImageFormatInfoNV</type>* <name>pOpticalFlowImageFormatInfo</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pFormatCount</name></param>
+ <param optional="true" len="pFormatCount"><type>VkOpticalFlowImageFormatPropertiesNV</type>* <name>pImageFormatProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkCreateOpticalFlowSessionNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkOpticalFlowSessionCreateInfoNV</type>* <name>pCreateInfo</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param><type>VkOpticalFlowSessionNV</type>* <name>pSession</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroyOpticalFlowSessionNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkOpticalFlowSessionNV</type> <name>session</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkBindOpticalFlowSessionImageNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkOpticalFlowSessionNV</type> <name>session</name></param>
+ <param><type>VkOpticalFlowSessionBindingPointNV</type> <name>bindingPoint</name></param>
+ <param optional="true"><type>VkImageView</type> <name>view</name></param>
+ <param><type>VkImageLayout</type> <name>layout</name></param>
+ </command>
+ <command queues="opticalflow" renderpass="outside" cmdbufferlevel="primary,secondary" tasks="action">
+ <proto><type>void</type> <name>vkCmdOpticalFlowExecuteNV</name></proto>
+ <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkOpticalFlowSessionNV</type> <name>session</name></param>
+ <param>const <type>VkOpticalFlowExecuteInfoNV</type>* <name>pExecuteInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetDeviceFaultInfoEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkDeviceFaultCountsEXT</type>* <name>pFaultCounts</name></param>
+ <param optional="true"><type>VkDeviceFaultInfoEXT</type>* <name>pFaultInfo</name></param>
+ </command>
+ <command queues="graphics" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDepthBias2EXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkDepthBiasInfoEXT</type>* <name>pDepthBiasInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_SURFACE_LOST_KHR">
+ <proto><type>VkResult</type> <name>vkReleaseSwapchainImagesEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkReleaseSwapchainImagesInfoEXT</type>* <name>pReleaseInfo</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetDeviceImageSubresourceLayoutKHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkDeviceImageSubresourceInfoKHR</type>* <name>pInfo</name></param>
+ <param><type>VkSubresourceLayout2KHR</type>* <name>pLayout</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_MEMORY_MAP_FAILED">
+ <proto><type>VkResult</type> <name>vkMapMemory2KHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkMemoryMapInfoKHR</type>* <name>pMemoryMapInfo</name></param>
+ <param optional="false,true"><type>void</type>** <name>ppData</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_MEMORY_MAP_FAILED">
+ <proto><type>VkResult</type> <name>vkUnmapMemory2KHR</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const <type>VkMemoryUnmapInfoKHR</type>* <name>pMemoryUnmapInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPATIBLE_SHADER_BINARY_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY,VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkCreateShadersEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>uint32_t</type> <name>createInfoCount</name></param>
+ <param len="createInfoCount">const <type>VkShaderCreateInfoEXT</type>* <name>pCreateInfos</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param len="createInfoCount"><type>VkShaderEXT</type>* <name>pShaders</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkDestroyShaderEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true" externsync="true"><type>VkShaderEXT</type> <name>shader</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetShaderBinaryDataEXT</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkShaderEXT</type> <name>shader</name></param>
+ <param optional="false,true"><type>size_t</type>* <name>pDataSize</name></param>
+ <param optional="true" len="pDataSize"><type>void</type>* <name>pData</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindShadersEXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>uint32_t</type> <name>stageCount</name></param>
+ <param len="stageCount">const <type>VkShaderStageFlagBits</type>* <name>pStages</name></param>
+ <param optional="true,true" len="stageCount">const <type>VkShaderEXT</type>* <name>pShaders</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR">
+ <proto><type>VkResult</type> <name>vkGetScreenBufferPropertiesQNX</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param>const struct <type>_screen_buffer</type>* <name>buffer</name></param>
+ <param><type>VkScreenBufferPropertiesQNX</type>* <name>pProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_INCOMPLETE" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR</name></proto>
+ <param><type>VkPhysicalDevice</type> <name>physicalDevice</name></param>
+ <param optional="false,true"><type>uint32_t</type>* <name>pPropertyCount</name></param>
+ <param optional="true" len="pPropertyCount"><type>VkCooperativeMatrixPropertiesKHR</type>* <name>pProperties</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetExecutionGraphPipelineScratchSizeAMDX</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkPipeline</type> <name>executionGraph</name></param>
+ <param><type>VkExecutionGraphPipelineScratchSizeAMDX</type>* <name>pSizeInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY">
+ <proto><type>VkResult</type> <name>vkGetExecutionGraphPipelineNodeIndexAMDX</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkPipeline</type> <name>executionGraph</name></param>
+ <param>const <type>VkPipelineShaderStageNodeCreateInfoAMDX</type>* <name>pNodeInfo</name></param>
+ <param><type>uint32_t</type>* <name>pNodeIndex</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS,VK_PIPELINE_COMPILE_REQUIRED_EXT" errorcodes="VK_ERROR_OUT_OF_HOST_MEMORY,VK_ERROR_OUT_OF_DEVICE_MEMORY">
+ <proto><type>VkResult</type> <name>vkCreateExecutionGraphPipelinesAMDX</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param optional="true"><type>VkPipelineCache</type> <name>pipelineCache</name></param>
+ <param><type>uint32_t</type> <name>createInfoCount</name></param>
+ <param len="createInfoCount">const <type>VkExecutionGraphPipelineCreateInfoAMDX</type>* <name>pCreateInfos</name></param>
+ <param optional="true">const <type>VkAllocationCallbacks</type>* <name>pAllocator</name></param>
+ <param len="createInfoCount"><type>VkPipeline</type>* <name>pPipelines</name></param>
+ </command>
+ <command queues="graphics,compute" tasks="action" renderpass="outside" cmdbufferlevel="primary">
+ <proto><type>void</type> <name>vkCmdInitializeGraphScratchMemoryAMDX</name></proto>
+ <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>scratch</name></param>
+ </command>
+ <command queues="graphics,compute" tasks="action" renderpass="outside" cmdbufferlevel="primary">
+ <proto><type>void</type> <name>vkCmdDispatchGraphAMDX</name></proto>
+ <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>scratch</name></param>
+ <param>const <type>VkDispatchGraphCountInfoAMDX</type>* <name>pCountInfo</name></param>
+ </command>
+ <command queues="graphics,compute" tasks="action" renderpass="outside" cmdbufferlevel="primary">
+ <proto><type>void</type> <name>vkCmdDispatchGraphIndirectAMDX</name></proto>
+ <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>scratch</name></param>
+ <param>const <type>VkDispatchGraphCountInfoAMDX</type>* <name>pCountInfo</name></param>
+ </command>
+ <command queues="graphics,compute" tasks="action" renderpass="outside" cmdbufferlevel="primary">
+ <proto><type>void</type> <name>vkCmdDispatchGraphIndirectCountAMDX</name></proto>
+ <param><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param><type>VkDeviceAddress</type> <name>scratch</name></param>
+ <param><type>VkDeviceAddress</type> <name>countInfo</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindDescriptorSets2KHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkBindDescriptorSetsInfoKHR</type>* <name>pBindDescriptorSetsInfo</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdPushConstants2KHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkPushConstantsInfoKHR</type>* <name>pPushConstantsInfo</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdPushDescriptorSet2KHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkPushDescriptorSetInfoKHR</type>* <name>pPushDescriptorSetInfo</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdPushDescriptorSetWithTemplate2KHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkPushDescriptorSetWithTemplateInfoKHR</type>* <name>pPushDescriptorSetWithTemplateInfo</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetDescriptorBufferOffsets2EXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkSetDescriptorBufferOffsetsInfoEXT</type>* <name>pSetDescriptorBufferOffsetsInfo</name></param>
+ </command>
+ <command queues="graphics,compute" renderpass="both" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdBindDescriptorBufferEmbeddedSamplers2EXT</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkBindDescriptorBufferEmbeddedSamplersInfoEXT</type>* <name>pBindDescriptorBufferEmbeddedSamplersInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_INITIALIZATION_FAILED">
+ <proto><type>VkResult</type> <name>vkSetLatencySleepModeNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkSwapchainKHR</type> <name>swapchain</name></param>
+ <param>const <type>VkLatencySleepModeInfoNV</type>* <name>pSleepModeInfo</name></param>
+ </command>
+ <command successcodes="VK_SUCCESS" errorcodes="VK_ERROR_UNKNOWN">
+ <proto><type>VkResult</type> <name>vkLatencySleepNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkSwapchainKHR</type> <name>swapchain</name></param>
+ <param>const <type>VkLatencySleepInfoNV</type>* <name>pSleepInfo</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkSetLatencyMarkerNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkSwapchainKHR</type> <name>swapchain</name></param>
+ <param>const <type>VkSetLatencyMarkerInfoNV</type>* <name>pLatencyMarkerInfo</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkGetLatencyTimingsNV</name></proto>
+ <param><type>VkDevice</type> <name>device</name></param>
+ <param><type>VkSwapchainKHR</type> <name>swapchain</name></param>
+ <param><type>VkGetLatencyMarkerInfoNV</type>* <name>pLatencyMarkerInfo</name></param>
+ </command>
+ <command>
+ <proto><type>void</type> <name>vkQueueNotifyOutOfBandNV</name></proto>
+ <param><type>VkQueue</type> <name>queue</name></param>
+ <param>const <type>VkOutOfBandQueueTypeInfoNV</type>* <name>pQueueTypeInfo</name></param>
+ </command>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetRenderingAttachmentLocationsKHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkRenderingAttachmentLocationInfoKHR</type>* <name>pLocationInfo</name></param>
+ </command>
+ <command queues="graphics" renderpass="inside" cmdbufferlevel="primary,secondary" tasks="state">
+ <proto><type>void</type> <name>vkCmdSetRenderingInputAttachmentIndicesKHR</name></proto>
+ <param externsync="true"><type>VkCommandBuffer</type> <name>commandBuffer</name></param>
+ <param>const <type>VkRenderingInputAttachmentIndexInfoKHR</type>* <name>pLocationInfo</name></param>
+ </command>
</commands>
- <feature api="vulkan" name="VK_VERSION_1_0" number="1.0" comment="Vulkan core API interface definitions">
+ <feature api="vulkan,vulkansc" name="VK_VERSION_1_0" number="1.0" comment="Vulkan core API interface definitions">
<require comment="Header boilerplate">
<type name="vk_platform"/>
<type name="VK_DEFINE_HANDLE"/>
@@ -10763,6 +15388,24 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkResult"/>
<type name="VkStructureType"/>
</require>
+ <require comment="API constants">
+ <enum name="VK_ATTACHMENT_UNUSED"/>
+ <enum name="VK_FALSE"/>
+ <enum name="VK_LOD_CLAMP_NONE"/>
+ <enum name="VK_QUEUE_FAMILY_IGNORED"/>
+ <enum name="VK_REMAINING_ARRAY_LAYERS"/>
+ <enum name="VK_REMAINING_MIP_LEVELS"/>
+ <enum name="VK_SUBPASS_EXTERNAL"/>
+ <enum name="VK_TRUE"/>
+ <enum name="VK_WHOLE_SIZE"/>
+ <enum name="VK_MAX_MEMORY_TYPES"/>
+ <enum name="VK_MAX_PHYSICAL_DEVICE_NAME_SIZE"/>
+ <enum name="VK_UUID_SIZE"/>
+ <enum name="VK_MAX_EXTENSION_NAME_SIZE"/>
+ <enum name="VK_MAX_DESCRIPTION_SIZE"/>
+ <enum name="VK_MAX_MEMORY_HEAPS"/>
+ <type name="VkPipelineCacheHeaderVersion"/>
+ </require>
<require comment="These types are part of the API, though not directly used in API commands or data structures">
<type name="VkBaseInStructure"/>
<type name="VkBaseOutStructure"/>
@@ -10791,18 +15434,6 @@ typedef void <name>CAMetalLayer</name>;
<type name="VK_API_VERSION_MINOR"/>
<type name="VK_API_VERSION_PATCH"/>
</require>
- <require comment="API constants">
- <enum name="VK_ATTACHMENT_UNUSED"/>
- <enum name="VK_FALSE"/>
- <enum name="VK_LOD_CLAMP_NONE"/>
- <enum name="VK_QUEUE_FAMILY_IGNORED"/>
- <enum name="VK_REMAINING_ARRAY_LAYERS"/>
- <enum name="VK_REMAINING_MIP_LEVELS"/>
- <enum name="VK_SUBPASS_EXTERNAL"/>
- <enum name="VK_TRUE"/>
- <enum name="VK_WHOLE_SIZE"/>
- <type name="VkPipelineCacheHeaderVersion"/>
- </require>
<require comment="Device initialization">
<type name="PFN_vkAllocationFunction"/>
<type name="PFN_vkFreeFunction"/>
@@ -10824,7 +15455,8 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkImageUsageFlagBits"/>
<type name="VkImageUsageFlags"/>
<type name="VkInstance"/>
- <type name="VkInstanceCreateFlags" comment="Will add VkInstanceCreateFlagBits when bits are defined in the future"/>
+ <type name="VkInstanceCreateFlags"/>
+ <type name="VkInstanceCreateFlagBits"/>
<type name="VkInstanceCreateInfo"/>
<type name="VkInternalAllocationType"/>
<type name="VkMemoryHeap"/>
@@ -10890,6 +15522,7 @@ typedef void <name>CAMetalLayer</name>;
<require comment="Memory commands">
<type name="VkMappedMemoryRange"/>
<type name="VkMemoryAllocateInfo"/>
+ <type name="VkMemoryMapFlagBits"/>
<type name="VkMemoryMapFlags"/>
<command name="vkAllocateMemory"/>
<command name="vkFreeMemory"/>
@@ -11249,7 +15882,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdExecuteCommands"/>
</require>
</feature>
- <feature api="vulkan" name="VK_VERSION_1_1" number="1.1" comment="Vulkan 1.1 core API interface definitions.">
+ <feature api="vulkan,vulkansc" name="VK_VERSION_1_1" number="1.1" comment="Vulkan 1.1 core API interface definitions.">
<require>
<type name="VK_API_VERSION_1_1"/>
</require>
@@ -11403,11 +16036,11 @@ typedef void <name>CAMetalLayer</name>;
</require>
<require comment="Promoted from VK_KHR_variable_pointers">
<enum extends="VkStructureType" extnumber="121" offset="0" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES"/>
- <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES"/>
+ <enum api="vulkan" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES"/>
<type name="VkPhysicalDeviceVariablePointerFeatures"/>
<type name="VkPhysicalDeviceVariablePointersFeatures"/>
</require>
- <require comment="Originally based on VK_KHR_protected_memory (extension 146), which was never published; thus the mystifying large value= numbers below. These are not aliased since they weren't actually promoted from an extension.">
+ <require comment="Originally based on VK_KHR_protected_memory (extension 146), which was never published; thus the mystifying large value= numbers below. These are not aliased since they were not actually promoted from an extension.">
<enum extends="VkStructureType" extnumber="146" offset="0" name="VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO"/>
<enum extends="VkStructureType" extnumber="146" offset="1" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES"/>
<enum extends="VkStructureType" extnumber="146" offset="2" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES"/>
@@ -11576,12 +16209,12 @@ typedef void <name>CAMetalLayer</name>;
</require>
<require comment="Promoted from VK_KHR_shader_draw_parameters, with a feature support query added">
<enum extends="VkStructureType" extnumber="64" offset="0" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES"/>
- <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES"/>
+ <enum api="vulkan" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES"/>
<type name="VkPhysicalDeviceShaderDrawParameterFeatures"/>
<type name="VkPhysicalDeviceShaderDrawParametersFeatures"/>
</require>
</feature>
- <feature api="vulkan" name="VK_VERSION_1_2" number="1.2" comment="Vulkan 1.2 core API interface definitions.">
+ <feature api="vulkan,vulkansc" name="VK_VERSION_1_2" number="1.2" comment="Vulkan 1.2 core API interface definitions.">
<require>
<type name="VK_API_VERSION_1_2"/>
</require>
@@ -11776,9 +16409,407 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetDeviceMemoryOpaqueCaptureAddress"/>
</require>
</feature>
+ <feature api="vulkan,vulkansc" name="VK_VERSION_1_3" number="1.3" comment="Vulkan 1.3 core API interface definitions.">
+ <require>
+ <type name="VK_API_VERSION_1_3"/>
+ </require>
+ <require>
+ <type name="VkFlags64"/>
+ </require>
+ <require>
+ <enum extends="VkStructureType" value="53" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES"/>
+ <enum extends="VkStructureType" value="54" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES"/>
+ <type name="VkPhysicalDeviceVulkan13Features"/>
+ <type name="VkPhysicalDeviceVulkan13Properties"/>
+ </require>
+ <require comment="Promoted from VK_EXT_pipeline_creation_feedback (extension 193)">
+ <enum offset="0" extends="VkStructureType" extnumber="193" name="VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO"/>
+ <type name="VkPipelineCreationFeedbackFlagBits"/>
+ <type name="VkPipelineCreationFeedbackFlags"/>
+ <type name="VkPipelineCreationFeedbackCreateInfo"/>
+ <type name="VkPipelineCreationFeedback"/>
+ </require>
+ <require comment="Promoted from VK_KHR_shader_terminate_invocation (extension 216)">
+ <enum offset="0" extends="VkStructureType" extnumber="216" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES"/>
+ <type name="VkPhysicalDeviceShaderTerminateInvocationFeatures"/>
+ </require>
+ <require comment="Promoted from VK_EXT_tooling_info (extension 246)">
+ <enum offset="0" extends="VkStructureType" extnumber="246" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES"/>
+ <type name="VkToolPurposeFlagBits"/>
+ <type name="VkToolPurposeFlags"/>
+ <type name="VkPhysicalDeviceToolProperties"/>
+ <command name="vkGetPhysicalDeviceToolProperties"/>
+ </require>
+ <require comment="Promoted from VK_EXT_shader_demote_to_helper_invocation (extension 277)">
+ <enum offset="0" extends="VkStructureType" extnumber="277" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES"/>
+ <type name="VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures"/>
+ </require>
+ <require comment="Promoted from VK_KHR_shader_non_semantic_info (extension 294)">
+ </require>
+ <require comment="Promoted from VK_EXT_private_data (extension 296)">
+ <enum offset="0" extends="VkStructureType" extnumber="296" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES"/>
+ <enum offset="1" extends="VkStructureType" extnumber="296" name="VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO"/>
+ <enum offset="2" extends="VkStructureType" extnumber="296" name="VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO"/>
+ <enum offset="0" extends="VkObjectType" extnumber="296" name="VK_OBJECT_TYPE_PRIVATE_DATA_SLOT"/>
+ <type name="VkPhysicalDevicePrivateDataFeatures"/>
+ <type name="VkDevicePrivateDataCreateInfo"/>
+ <type name="VkPrivateDataSlotCreateInfo"/>
+ <type name="VkPrivateDataSlot"/>
+ <type name="VkPrivateDataSlotCreateFlags" comment="Will add VkPrivateDataSlotCreateFlagBits when bits are defined in the future"/>
+ <command name="vkCreatePrivateDataSlot"/>
+ <command name="vkDestroyPrivateDataSlot"/>
+ <command name="vkSetPrivateData"/>
+ <command name="vkGetPrivateData"/>
+ </require>
+ <require comment="Promoted from VK_EXT_pipeline_creation_cache_control (extension 298)">
+ <enum offset="0" extends="VkStructureType" extnumber="298" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES"/>
+ <type name="VkPhysicalDevicePipelineCreationCacheControlFeatures"/>
+ <enum bitpos="8" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT"/>
+ <enum bitpos="9" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT"/>
+ <enum offset="0" extends="VkResult" extnumber="298" name="VK_PIPELINE_COMPILE_REQUIRED"/>
+ <enum bitpos="0" extends="VkPipelineCacheCreateFlagBits" name="VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT"/>
+ </require>
+ <require comment="Promoted from VK_KHR_synchronization2 (extension 315)">
+ <enum offset="0" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_MEMORY_BARRIER_2"/>
+ <enum offset="1" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2"/>
+ <enum offset="2" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2"/>
+ <enum offset="3" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_DEPENDENCY_INFO"/>
+ <enum offset="4" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_SUBMIT_INFO_2"/>
+ <enum offset="5" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO"/>
+ <enum offset="6" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO"/>
+ <enum offset="7" extends="VkStructureType" extnumber="315" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES"/>
+ <enum bitpos="0" extends="VkEventCreateFlagBits" name="VK_EVENT_CREATE_DEVICE_ONLY_BIT"/>
+ <enum offset="0" extends="VkImageLayout" extnumber="315" name="VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL"/>
+ <enum offset="1" extends="VkImageLayout" extnumber="315" name="VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL"/>
+ <enum value="0" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_NONE"/>
+ <enum value="0" extends="VkAccessFlagBits" name="VK_ACCESS_NONE"/>
+ <type name="VkPipelineStageFlags2"/>
+ <type name="VkPipelineStageFlagBits2"/>
+ <type name="VkAccessFlags2"/>
+ <type name="VkAccessFlagBits2"/>
+ <type name="VkMemoryBarrier2"/>
+ <type name="VkBufferMemoryBarrier2"/>
+ <type name="VkImageMemoryBarrier2"/>
+ <type name="VkDependencyInfo"/>
+ <type name="VkSubmitInfo2"/>
+ <type name="VkSemaphoreSubmitInfo"/>
+ <type name="VkCommandBufferSubmitInfo"/>
+ <type name="VkSubmitFlagBits"/>
+ <type name="VkSubmitFlags"/>
+ <type name="VkPhysicalDeviceSynchronization2Features"/>
+ <command name="vkCmdSetEvent2"/>
+ <command name="vkCmdResetEvent2"/>
+ <command name="vkCmdWaitEvents2"/>
+ <command name="vkCmdPipelineBarrier2"/>
+ <command name="vkCmdWriteTimestamp2"/>
+ <command name="vkQueueSubmit2"/>
+ </require>
+ <require comment="Promoted from VK_KHR_zero_initialize_workgroup_memory (extension 326)">
+ <enum offset="0" extends="VkStructureType" extnumber="326" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES"/>
+ <type name="VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures"/>
+ </require>
+ <require comment="Promoted from VK_EXT_image_robustness (extension 336)">
+ <enum offset="0" extends="VkStructureType" extnumber="336" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES"/>
+ <type name="VkPhysicalDeviceImageRobustnessFeatures"/>
+ </require>
+ <require comment="Promoted from VK_KHR_copy_commands2 (extension 338)">
+ <enum offset="0" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2"/>
+ <enum offset="1" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2"/>
+ <enum offset="2" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2"/>
+ <enum offset="3" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2"/>
+ <enum offset="4" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2"/>
+ <enum offset="5" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2"/>
+ <enum offset="6" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_BUFFER_COPY_2"/>
+ <enum offset="7" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_IMAGE_COPY_2"/>
+ <enum offset="8" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_IMAGE_BLIT_2"/>
+ <enum offset="9" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2"/>
+ <enum offset="10" extends="VkStructureType" extnumber="338" name="VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2"/>
+ <type name="VkCopyBufferInfo2"/>
+ <type name="VkCopyImageInfo2"/>
+ <type name="VkCopyBufferToImageInfo2"/>
+ <type name="VkCopyImageToBufferInfo2"/>
+ <type name="VkBlitImageInfo2"/>
+ <type name="VkResolveImageInfo2"/>
+ <type name="VkBufferCopy2"/>
+ <type name="VkImageCopy2"/>
+ <type name="VkImageBlit2"/>
+ <type name="VkBufferImageCopy2"/>
+ <type name="VkImageResolve2"/>
+ <command name="vkCmdCopyBuffer2"/>
+ <command name="vkCmdCopyImage2"/>
+ <command name="vkCmdCopyBufferToImage2"/>
+ <command name="vkCmdCopyImageToBuffer2"/>
+ <command name="vkCmdBlitImage2"/>
+ <command name="vkCmdResolveImage2"/>
+ </require>
+ <require comment="Promoted from VK_EXT_subgroup_size_control (STDPROMOTE/PROPLIMCHANGE) (extension 226)">
+ <type name="VkPhysicalDeviceSubgroupSizeControlFeatures"/>
+ <type name="VkPhysicalDeviceSubgroupSizeControlProperties"/>
+ <type name="VkPipelineShaderStageRequiredSubgroupSizeCreateInfo"/>
+ <enum offset="0" extends="VkStructureType" extnumber="226" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES"/>
+ <enum offset="1" extends="VkStructureType" extnumber="226" name="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO"/>
+ <enum offset="2" extends="VkStructureType" extnumber="226" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES"/>
+ <enum bitpos="0" extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT"/>
+ <enum bitpos="1" extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT"/>
+ </require>
+ <require comment="Promoted from VK_EXT_inline_uniform_block (STDPROMOTE/PROPLIMCHANGE) (extension 139)">
+ <enum offset="0" extends="VkDescriptorType" extnumber="139" name="VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK"/>
+ <enum offset="0" extends="VkStructureType" extnumber="139" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES"/>
+ <enum offset="1" extends="VkStructureType" extnumber="139" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES"/>
+ <enum offset="2" extends="VkStructureType" extnumber="139" name="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK"/>
+ <enum offset="3" extends="VkStructureType" extnumber="139" name="VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO"/>
+ <type name="VkPhysicalDeviceInlineUniformBlockFeatures"/>
+ <type name="VkPhysicalDeviceInlineUniformBlockProperties"/>
+ <type name="VkWriteDescriptorSetInlineUniformBlock"/>
+ <type name="VkDescriptorPoolInlineUniformBlockCreateInfo"/>
+ </require>
+ <require comment="Promoted from VK_EXT_ycbcr_2plane_444_formats (does not promote the Feature struct, just the formats) (extension 331)">
+ <enum offset="0" extends="VkFormat" extnumber="331" name="VK_FORMAT_G8_B8R8_2PLANE_444_UNORM"/>
+ <enum offset="1" extends="VkFormat" extnumber="331" name="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16"/>
+ <enum offset="2" extends="VkFormat" extnumber="331" name="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16"/>
+ <enum offset="3" extends="VkFormat" extnumber="331" name="VK_FORMAT_G16_B16R16_2PLANE_444_UNORM"/>
+ </require>
+ <require comment="Promoted from VK_EXT_4444_formats (does not promote the Feature struct, just the formats) (extension 341)">
+ <enum offset="0" extends="VkFormat" extnumber="341" name="VK_FORMAT_A4R4G4B4_UNORM_PACK16"/>
+ <enum offset="1" extends="VkFormat" extnumber="341" name="VK_FORMAT_A4B4G4R4_UNORM_PACK16"/>
+ </require>
+ <require comment="Promoted from VK_EXT_texture_compression_astc_hdr (Feature struct is promoted, but becomes optional) (extension 67)">
+ <enum offset="0" extends="VkStructureType" extnumber="67" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES"/>
+ <type name="VkPhysicalDeviceTextureCompressionASTCHDRFeatures"/>
+ <enum offset="0" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK"/>
+ <enum offset="1" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK"/>
+ <enum offset="2" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK"/>
+ <enum offset="3" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK"/>
+ <enum offset="4" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK"/>
+ <enum offset="5" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK"/>
+ <enum offset="6" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK"/>
+ <enum offset="7" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK"/>
+ <enum offset="8" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK"/>
+ <enum offset="9" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK"/>
+ <enum offset="10" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK"/>
+ <enum offset="11" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK"/>
+ <enum offset="12" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK"/>
+ <enum offset="13" extends="VkFormat" extnumber="67" name="VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK"/>
+ </require>
+ <require comment="Promoted from VK_KHR_dynamic_rendering (extension 45)">
+ <command name="vkCmdBeginRendering"/>
+ <command name="vkCmdEndRendering"/>
+ <enum offset="0" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_RENDERING_INFO"/>
+ <enum offset="1" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO"/>
+ <enum offset="2" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO"/>
+ <enum offset="3" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES"/>
+ <enum offset="4" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO"/>
+ <enum offset="0" extends="VkAttachmentStoreOp" extnumber="302" name="VK_ATTACHMENT_STORE_OP_NONE"/>
+ <type name="VkRenderingInfo"/>
+ <type name="VkRenderingAttachmentInfo"/>
+ <type name="VkPipelineRenderingCreateInfo"/>
+ <type name="VkPhysicalDeviceDynamicRenderingFeatures"/>
+ <type name="VkCommandBufferInheritanceRenderingInfo"/>
+ <type name="VkRenderingFlags"/>
+ <type name="VkRenderingFlagBits"/>
+ </require>
+ <require comment="Promoted from VK_EXT_extended_dynamic_state (Feature struct is not promoted) (extension 268)">
+ <enum offset="0" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_CULL_MODE"/>
+ <enum offset="1" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_FRONT_FACE"/>
+ <enum offset="2" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY"/>
+ <enum offset="3" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT"/>
+ <enum offset="4" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT"/>
+ <enum offset="5" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE"/>
+ <enum offset="6" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE"/>
+ <enum offset="7" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE"/>
+ <enum offset="8" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_DEPTH_COMPARE_OP"/>
+ <enum offset="9" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE"/>
+ <enum offset="10" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE"/>
+ <enum offset="11" extends="VkDynamicState" extnumber="268" name="VK_DYNAMIC_STATE_STENCIL_OP"/>
+ <command name="vkCmdSetCullMode"/>
+ <command name="vkCmdSetFrontFace"/>
+ <command name="vkCmdSetPrimitiveTopology"/>
+ <command name="vkCmdSetViewportWithCount"/>
+ <command name="vkCmdSetScissorWithCount"/>
+ <command name="vkCmdBindVertexBuffers2"/>
+ <command name="vkCmdSetDepthTestEnable"/>
+ <command name="vkCmdSetDepthWriteEnable"/>
+ <command name="vkCmdSetDepthCompareOp"/>
+ <command name="vkCmdSetDepthBoundsTestEnable"/>
+ <command name="vkCmdSetStencilTestEnable"/>
+ <command name="vkCmdSetStencilOp"/>
+ </require>
+ <require comment="Promoted from VK_KHR_shader_integer_dot_product (extension 281)">
+ <enum offset="0" extends="VkStructureType" extnumber="281" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES"/>
+ <enum offset="1" extends="VkStructureType" extnumber="281" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES"/>
+ <type name="VkPhysicalDeviceShaderIntegerDotProductFeatures"/>
+ <type name="VkPhysicalDeviceShaderIntegerDotProductProperties"/>
+ </require>
+ <require comment="Promoted from VK_EXT_texel_buffer_alignment (extension 282)">
+ <enum offset="1" extends="VkStructureType" extnumber="282" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES"/>
+ <type name="VkPhysicalDeviceTexelBufferAlignmentProperties"/>
+ </require>
+ <require comment="Promoted from VK_KHR_format_feature_flags2 (extension 361)">
+ <enum offset="0" extends="VkStructureType" extnumber="361" name="VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3"/>
+ <type name="VkFormatFeatureFlags2"/>
+ <type name="VkFormatFeatureFlagBits2"/>
+ <type name="VkFormatProperties3"/>
+ </require>
+ <require comment="Promoted from VK_EXT_extended_dynamic_state2 (Feature struct and optional state are not promoted) (extension 378)">
+ <enum offset="1" extends="VkDynamicState" extnumber="378" name="VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE"/>
+ <enum offset="2" extends="VkDynamicState" extnumber="378" name="VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE"/>
+ <enum offset="4" extends="VkDynamicState" extnumber="378" name="VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE"/>
+ <command name="vkCmdSetRasterizerDiscardEnable"/>
+ <command name="vkCmdSetDepthBiasEnable"/>
+ <command name="vkCmdSetPrimitiveRestartEnable"/>
+ </require>
+ <require comment="Promoted from VK_KHR_maintenance4 (extension 414)">
+ <enum offset="0" extends="VkStructureType" extnumber="414" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES"/>
+ <enum offset="1" extends="VkStructureType" extnumber="414" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES"/>
+ <enum offset="2" extends="VkStructureType" extnumber="414" name="VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS"/>
+ <enum offset="3" extends="VkStructureType" extnumber="414" name="VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS"/>
+ <enum value="0" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_NONE"/>
+ <type name="VkPhysicalDeviceMaintenance4Features"/>
+ <type name="VkPhysicalDeviceMaintenance4Properties"/>
+ <type name="VkDeviceBufferMemoryRequirements"/>
+ <type name="VkDeviceImageMemoryRequirements"/>
+ <command name="vkGetDeviceBufferMemoryRequirements"/>
+ <command name="vkGetDeviceImageMemoryRequirements"/>
+ <command name="vkGetDeviceImageSparseMemoryRequirements"/>
+ </require>
+ </feature>
+
+ <feature api="vulkansc" name="VKSC_VERSION_1_0" number="1.0" comment="Vulkan SC core API interface definitions">
+ <require>
+ <type name="VKSC_API_VARIANT"/>
+ <type name="VKSC_API_VERSION_1_0"/>
+ <type name="VkPhysicalDeviceVulkanSC10Features"/>
+ <type name="VkPhysicalDeviceVulkanSC10Properties"/>
+ <enum offset="0" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_SC_1_0_FEATURES"/>
+ <enum offset="1" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_SC_1_0_PROPERTIES"/>
+ <enum offset="1" extnumber="12" extends="VkResult" dir="-" name="VK_ERROR_VALIDATION_FAILED"/>
+ </require>
+ <require comment="static memory functionality">
+ <type name="VkDeviceObjectReservationCreateInfo"/>
+ <type name="VkCommandPoolMemoryReservationCreateInfo"/>
+ <type name="VkCommandPoolMemoryConsumption"/>
+ <type name="VkPipelinePoolSize"/>
+ <command name="vkGetCommandPoolMemoryConsumption"/>
+ <enum offset="2" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_OBJECT_RESERVATION_CREATE_INFO"/>
+ <enum offset="3" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMMAND_POOL_MEMORY_RESERVATION_CREATE_INFO"/>
+ <enum offset="4" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMMAND_POOL_MEMORY_CONSUMPTION"/>
+ <enum offset="5" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_POOL_SIZE"/>
+ </require>
+ <require comment="fault handling functionality">
+ <enum offset="7" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_FAULT_DATA"/>
+ <enum offset="8" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_FAULT_CALLBACK_INFO"/>
+ <type name="VkFaultData"/>
+ <type name="VkFaultCallbackInfo"/>
+ <type name="VkFaultLevel"/>
+ <type name="VkFaultType"/>
+ <type name="VkFaultQueryBehavior"/>
+ <type name="PFN_vkFaultCallbackFunction"/>
+ <command name="vkGetFaultData"/>
+ </require>
+ <require comment="pipeline offline create info">
+ <enum offset="10" extnumber="299" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_OFFLINE_CREATE_INFO"/>
+ <type name="VkPipelineOfflineCreateInfo"/>
+ <type name="VkPipelineMatchControl"/>
+ </require>
+ <require comment="pipeline cache functionality">
+ <enum offset="0" extnumber="299" extends="VkResult" dir="-" name="VK_ERROR_INVALID_PIPELINE_CACHE_DATA"/>
+ <enum offset="1" extnumber="299" extends="VkResult" dir="-" name="VK_ERROR_NO_PIPELINE_MATCH"/>
+ <enum bitpos="1" extends="VkPipelineCacheCreateFlagBits" name="VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT"/>
+ <enum bitpos="2" extends="VkPipelineCacheCreateFlagBits" name="VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT"/>
+ <type name="VkPipelineCacheCreateFlagBits" comment="This should be picked up from the extends= attributes above"/>
+ </require>
+ <require comment="seu safe memory functionality">
+ <enum bitpos="2" extends="VkMemoryHeapFlagBits" name="VK_MEMORY_HEAP_SEU_SAFE_BIT"/>
+ </require>
+ <require comment="pipeline cache header - These types are part of the API, though not all directly used in API commands or data structures">
+ <enum offset="1" extnumber="299" extends="VkPipelineCacheHeaderVersion" name="VK_PIPELINE_CACHE_HEADER_VERSION_SAFETY_CRITICAL_ONE"/>
+ <type name="VkPipelineCacheValidationVersion"/>
+ <type name="VkPipelineCacheStageValidationIndexEntry"/>
+ <type name="VkPipelineCacheSafetyCriticalIndexEntry"/>
+ <type name="VkPipelineCacheHeaderVersionSafetyCriticalOne"/>
+ </require>
+
+ <remove comment="SC 1.0 removes some features from Vulkan 1.0/1.1/1.2">
+ <enum name="VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO"/>
+ <!--enum name="VK_OBJECT_TYPE_SHADER_MODULE" comment="leave this present for compatibility"/-->
+ <enum name="VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT"/>
+ <enum name="VK_PIPELINE_CREATE_DERIVATIVE_BIT"/>
+ <enum name="VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT"/>
+
+ <!-- Remove Vulkan and deprecated macros -->
+ <type name="VK_API_VERSION"/>
+ <type name="VK_MAKE_VERSION"/>
+ <type name="VK_VERSION_MAJOR"/>
+ <type name="VK_VERSION_MINOR"/>
+ <type name="VK_VERSION_PATCH"/>
+
+ <!--type name="VkShaderModule" comment="leave this present for compatibility"/-->
+ <type name="VkShaderModuleCreateFlags"/>
+ <type name="VkShaderModuleCreateFlagBits"/>
+ <type name="VkShaderModuleCreateInfo"/>
+ <type name="VkCommandPoolTrimFlags"/>
+ <command name="vkCreateShaderModule"/>
+ <command name="vkDestroyShaderModule"/>
+ <command name="vkMergePipelineCaches"/>
+ <command name="vkGetPipelineCacheData"/>
+ <command name="vkTrimCommandPool"/>
+ <command name="vkDestroyCommandPool"/>
+ <command name="vkDestroyDescriptorPool"/>
+ <command name="vkDestroyQueryPool"/>
+ <command name="vkDestroySwapchainKHR"/>
+ <command name="vkFreeMemory"/>
+
+ <!-- Descriptor update templates are unsupported -->
+ <enum name="VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO"/>
+ <enum name="VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE"/>
+ <command name="vkCreateDescriptorUpdateTemplate"/>
+ <command name="vkDestroyDescriptorUpdateTemplate"/>
+ <command name="vkUpdateDescriptorSetWithTemplate"/>
+ <type name="VkDescriptorUpdateTemplate"/>
+ <type name="VkDescriptorUpdateTemplateCreateFlags"/>
+ <type name="VkDescriptorUpdateTemplateType"/>
+ <type name="VkDescriptorUpdateTemplateEntry"/>
+ <type name="VkDescriptorUpdateTemplateCreateInfo"/>
+ <enum name="VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET"/>
+
+ <!-- Sparse resources are unsupported -->
+ <enum name="VK_QUEUE_SPARSE_BINDING_BIT"/>
+ <!--type name="VkPhysicalDeviceSparseProperties" comment="needed for VkPhysicalDeviceProperties"/-->
+ <type name="VkSparseImageFormatProperties"/>
+ <type name="VkSparseImageFormatFlagBits"/>
+ <type name="VkSparseImageFormatFlags"/>
+ <command name="vkGetPhysicalDeviceSparseImageFormatProperties"/>
+ <command name="vkGetPhysicalDeviceSparseImageFormatProperties2"/>
+ <type name="VkPhysicalDeviceSparseImageFormatInfo2"/>
+ <enum name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2"/>
+ <type name="VkSparseImageFormatProperties2"/>
+ <enum name="VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2"/>
+ <type name="VkSparseImageMemoryRequirements"/>
+ <command name="vkGetImageSparseMemoryRequirements"/>
+ <command name="vkGetImageSparseMemoryRequirements2"/>
+ <type name="VkImageSparseMemoryRequirementsInfo2"/>
+ <enum name="VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2"/>
+ <type name="VkSparseImageMemoryRequirements2"/>
+ <enum name="VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2"/>
+ <type name="VkSparseMemoryBind"/>
+ <type name="VkSparseMemoryBindFlagBits"/>
+ <type name="VkSparseMemoryBindFlags"/>
+ <type name="VkSparseBufferMemoryBindInfo"/>
+ <type name="VkSparseImageOpaqueMemoryBindInfo"/>
+ <type name="VkSparseImageMemoryBindInfo"/>
+ <type name="VkSparseImageMemoryBind"/>
+ <command name="vkQueueBindSparse"/>
+ <type name="VkBindSparseInfo"/>
+ <enum name="VK_STRUCTURE_TYPE_BIND_SPARSE_INFO"/>
+ <type name="VkDeviceGroupBindSparseInfo"/>
+ <enum name="VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO"/>
+
+ <command name="vkDestroySemaphoreSciSyncPoolNV"/>
+ </remove>
+ </feature>
<extensions comment="Vulkan extension interface definitions">
- <extension name="VK_KHR_surface" number="1" type="instance" author="KHR" contact="James Jones @cubanismo,Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_surface" number="1" type="instance" author="KHR" contact="James Jones @cubanismo,Ian Elliott @ianelliottus" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="25" name="VK_KHR_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_surface&quot;" name="VK_KHR_SURFACE_EXTENSION_NAME"/>
@@ -11800,7 +16831,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceSurfacePresentModesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_swapchain" number="2" type="device" requires="VK_KHR_surface" author="KHR" contact="James Jones @cubanismo,Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_swapchain" number="2" type="device" depends="VK_KHR_surface" author="KHR" contact="James Jones @cubanismo,Ian Elliott @ianelliottus" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="70" name="VK_KHR_SWAPCHAIN_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_swapchain&quot;" name="VK_KHR_SWAPCHAIN_EXTENSION_NAME"/>
@@ -11821,7 +16852,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkAcquireNextImageKHR"/>
<command name="vkQueuePresentKHR"/>
</require>
- <require feature="VK_VERSION_1_1">
+ <require depends="VK_VERSION_1_1">
<comment>This duplicates definitions in VK_KHR_device_group below</comment>
<enum extends="VkStructureType" extnumber="61" offset="7" name="VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR"/>
<enum extends="VkStructureType" extnumber="61" offset="8" name="VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR"/>
@@ -11845,7 +16876,7 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="1" extends="VkSwapchainCreateFlagBitsKHR" name="VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR" comment="Swapchain is protected"/>
</require>
</extension>
- <extension name="VK_KHR_display" number="3" type="instance" requires="VK_KHR_surface" author="KHR" contact="James Jones @cubanismo,Norbert Nopper @FslNopper" supported="vulkan">
+ <extension name="VK_KHR_display" number="3" type="instance" depends="VK_KHR_surface" author="KHR" contact="James Jones @cubanismo,Norbert Nopper @FslNopper" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="23" name="VK_KHR_DISPLAY_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_display&quot;" name="VK_KHR_DISPLAY_EXTENSION_NAME"/>
@@ -11876,7 +16907,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateDisplayPlaneSurfaceKHR"/>
</require>
</extension>
- <extension name="VK_KHR_display_swapchain" number="4" type="device" requires="VK_KHR_swapchain,VK_KHR_display" author="KHR" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_KHR_display_swapchain" number="4" type="device" depends="VK_KHR_swapchain+VK_KHR_display" author="KHR" contact="James Jones @cubanismo" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="10" name="VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_display_swapchain&quot;" name="VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME"/>
@@ -11886,7 +16917,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateSharedSwapchainsKHR"/>
</require>
</extension>
- <extension name="VK_KHR_xlib_surface" number="5" type="instance" requires="VK_KHR_surface" platform="xlib" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_xlib_surface" number="5" type="instance" depends="VK_KHR_surface" platform="xlib" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan" ratified="vulkan">
<require>
<enum value="6" name="VK_KHR_XLIB_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_xlib_surface&quot;" name="VK_KHR_XLIB_SURFACE_EXTENSION_NAME"/>
@@ -11897,7 +16928,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceXlibPresentationSupportKHR"/>
</require>
</extension>
- <extension name="VK_KHR_xcb_surface" number="6" type="instance" requires="VK_KHR_surface" platform="xcb" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_xcb_surface" number="6" type="instance" depends="VK_KHR_surface" platform="xcb" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan" ratified="vulkan">
<require>
<enum value="6" name="VK_KHR_XCB_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_xcb_surface&quot;" name="VK_KHR_XCB_SURFACE_EXTENSION_NAME"/>
@@ -11908,7 +16939,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceXcbPresentationSupportKHR"/>
</require>
</extension>
- <extension name="VK_KHR_wayland_surface" number="7" type="instance" requires="VK_KHR_surface" platform="wayland" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_wayland_surface" number="7" type="instance" depends="VK_KHR_surface" platform="wayland" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan" ratified="vulkan">
<require>
<enum value="6" name="VK_KHR_WAYLAND_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_wayland_surface&quot;" name="VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME"/>
@@ -11919,13 +16950,13 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceWaylandPresentationSupportKHR"/>
</require>
</extension>
- <extension name="VK_KHR_mir_surface" number="8" type="instance" requires="VK_KHR_surface" author="KHR" supported="disabled" comment="Extension permanently disabled. Extension number should not be reused">
+ <extension name="VK_KHR_mir_surface" number="8" type="instance" depends="VK_KHR_surface" author="KHR" supported="disabled" comment="Extension permanently disabled. Extension number should not be reused" ratified="vulkan">
<require>
<enum value="4" name="VK_KHR_MIR_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_mir_surface&quot;" name="VK_KHR_MIR_SURFACE_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_android_surface" number="9" type="instance" requires="VK_KHR_surface" platform="android" author="KHR" contact="Jesse Hall @critsec" supported="vulkan">
+ <extension name="VK_KHR_android_surface" number="9" type="instance" depends="VK_KHR_surface" platform="android" author="KHR" contact="Jesse Hall @critsec" supported="vulkan" ratified="vulkan">
<require>
<enum value="6" name="VK_KHR_ANDROID_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_android_surface&quot;" name="VK_KHR_ANDROID_SURFACE_EXTENSION_NAME"/>
@@ -11936,7 +16967,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateAndroidSurfaceKHR"/>
</require>
</extension>
- <extension name="VK_KHR_win32_surface" number="10" type="instance" requires="VK_KHR_surface" platform="win32" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_win32_surface" number="10" type="instance" depends="VK_KHR_surface" platform="win32" author="KHR" contact="Jesse Hall @critsec,Ian Elliott @ianelliottus" supported="vulkan" ratified="vulkan">
<require>
<enum value="6" name="VK_KHR_WIN32_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_win32_surface&quot;" name="VK_KHR_WIN32_SURFACE_EXTENSION_NAME"/>
@@ -11949,11 +16980,11 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_ANDROID_native_buffer" number="11" type="device" author="ANDROID" platform="android" contact="Jesse Hall @critsec" supported="disabled">
<require>
- <comment>VK_ANDROID_native_buffer is used between the Android Vulkan loader and drivers to implement the WSI extensions. It isn't exposed to applications and uses types that aren't part of Android's stable public API, so it is left disabled to keep it out of the standard Vulkan headers.</comment>
+ <comment>VK_ANDROID_native_buffer is used between the Android Vulkan loader and drivers to implement the WSI extensions. It is not exposed to applications and uses types that are not part of Android's stable public API, so it is left disabled to keep it out of the standard Vulkan headers.</comment>
<enum value="8" name="VK_ANDROID_NATIVE_BUFFER_SPEC_VERSION"/>
<enum value="11" name="VK_ANDROID_NATIVE_BUFFER_NUMBER"/>
- <enum value="&quot;VK_ANDROID_native_buffer&quot;" name="VK_ANDROID_NATIVE_BUFFER_NAME"/>
- <enum name="VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME" alias="VK_ANDROID_NATIVE_BUFFER_NAME"/>
+ <enum value="&quot;VK_ANDROID_native_buffer&quot;" name="VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME"/>
+ <enum name="VK_ANDROID_NATIVE_BUFFER_NAME" alias="VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID"/>
<enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID"/>
@@ -11974,8 +17005,9 @@ typedef void <name>CAMetalLayer</name>;
<enum value="10" name="VK_EXT_DEBUG_REPORT_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_debug_report&quot;" name="VK_EXT_DEBUG_REPORT_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT"/>
- <enum alias="VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT" comment="Backwards-compatible alias containing a typo"/>
- <enum offset="1" extends="VkResult" dir="-" name="VK_ERROR_VALIDATION_FAILED_EXT"/>
+ <enum alias="VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT" deprecated="aliased"/>
+ <enum api="vulkan" offset="1" extends="VkResult" dir="-" name="VK_ERROR_VALIDATION_FAILED_EXT"/>
+ <enum api="vulkansc" extends="VkResult" name="VK_ERROR_VALIDATION_FAILED_EXT" alias="VK_ERROR_VALIDATION_FAILED"/>
<enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT"/>
<type name="VkDebugReportCallbackEXT"/>
<type name="PFN_vkDebugReportCallbackEXT"/>
@@ -11987,7 +17019,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkDestroyDebugReportCallbackEXT"/>
<command name="vkDebugReportMessageEXT"/>
</require>
- <require feature="VK_VERSION_1_1">
+ <require depends="VK_VERSION_1_1">
<comment>This duplicates definitions in other extensions, below</comment>
<enum extends="VkDebugReportObjectTypeEXT" extnumber="157" offset="0" name="VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT"/>
<enum extends="VkDebugReportObjectTypeEXT" extnumber="86" offset="0" name="VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT"/>
@@ -12000,26 +17032,26 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="0" extends="VkResult" dir="-" name="VK_ERROR_INVALID_SHADER_NV"/>
</require>
</extension>
- <extension name="VK_EXT_depth_range_unrestricted" type="device" number="14" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_depth_range_unrestricted" type="device" number="14" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_depth_range_unrestricted&quot;" name="VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_sampler_mirror_clamp_to_edge" type="device" number="15" author="KHR" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_sampler_mirror_clamp_to_edge" type="device" number="15" author="KHR" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="3" name="VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_sampler_mirror_clamp_to_edge&quot;" name="VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME"/>
<enum value="4" extends="VkSamplerAddressMode" name="VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE" comment="Note that this defines what was previously a core enum, and so uses the 'value' attribute rather than 'offset', and does not have a suffix. This is a special case, and should not be repeated"/>
- <enum extends="VkSamplerAddressMode" name="VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR" alias="VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE" comment="Alias introduced for consistency with extension suffixing rules"/>
+ <enum extends="VkSamplerAddressMode" name="VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR" alias="VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE" deprecated="aliased" comment="Introduced for consistency with extension suffixing rules"/>
</require>
</extension>
<extension name="VK_IMG_filter_cubic" number="16" type="device" author="IMG" contact="Tobias Hector @tobski" supported="vulkan">
<require>
<enum value="1" name="VK_IMG_FILTER_CUBIC_SPEC_VERSION"/>
<enum value="&quot;VK_IMG_filter_cubic&quot;" name="VK_IMG_FILTER_CUBIC_EXTENSION_NAME"/>
- <enum offset="0" extends="VkFilter" name="VK_FILTER_CUBIC_IMG"/>
- <enum bitpos="13" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG" comment="Format can be filtered with VK_FILTER_CUBIC_IMG when being sampled"/>
+ <enum extends="VkFilter" name="VK_FILTER_CUBIC_IMG" alias="VK_FILTER_CUBIC_EXT"/>
+ <enum extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG" alias="VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT" comment="Format can be filtered with VK_FILTER_CUBIC_IMG when being sampled"/>
</require>
</extension>
<extension name="VK_AMD_extension_17" number="17" author="AMD" contact="Daniel Rakos @drakos-amd" supported="disabled">
@@ -12061,7 +17093,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_shader_explicit_vertex_parameter&quot;" name="VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_debug_marker" number="23" type="device" requires="VK_EXT_debug_report" author="Baldur Karlsson" contact="Baldur Karlsson @baldurk" specialuse="debugging" supported="vulkan" promotedto="VK_EXT_debug_utils">
+ <extension name="VK_EXT_debug_marker" number="23" type="device" depends="VK_EXT_debug_report" author="Baldur Karlsson" contact="Baldur Karlsson @baldurk" specialuse="debugging" supported="vulkan" promotedto="VK_EXT_debug_utils">
<require>
<enum value="4" name="VK_EXT_DEBUG_MARKER_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_debug_marker&quot;" name="VK_EXT_DEBUG_MARKER_EXTENSION_NAME"/>
@@ -12079,32 +17111,40 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdDebugMarkerInsertEXT"/>
</require>
</extension>
- <extension name="VK_KHR_video_queue" number="24" type="device" requires="VK_KHR_get_physical_device_properties2,VK_KHR_sampler_ycbcr_conversion" author="KHR" contact="Tony Zlatinski @tzlatinski" provisional="true" platform="provisional" supported="vulkan">
+ <extension name="VK_KHR_video_queue" number="24" type="device" depends="VK_VERSION_1_1+VK_KHR_synchronization2" author="KHR" contact="Tony Zlatinski @tzlatinski" supported="vulkan" ratified="vulkan">
<require>
- <enum value="2" name="VK_KHR_VIDEO_QUEUE_SPEC_VERSION"/>
+ <enum value="8" name="VK_KHR_VIDEO_QUEUE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_video_queue&quot;" name="VK_KHR_VIDEO_QUEUE_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_PROFILE_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_GET_MEMORY_PROPERTIES_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_BIND_MEMORY_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="11" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="12" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_QUEUE_FAMILY_PROPERTIES_2_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="13" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_PROFILES_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="14" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="15" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_PROFILE_INFO_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_MEMORY_REQUIREMENTS_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_VIDEO_SESSION_MEMORY_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR"/>
+ <enum offset="11" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_INFO_KHR"/>
+ <enum offset="12" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUEUE_FAMILY_VIDEO_PROPERTIES_KHR"/>
+ <enum offset="13" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_PROFILE_LIST_INFO_KHR"/>
+ <enum offset="14" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR"/>
+ <enum offset="15" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR"/>
+ <enum offset="16" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUEUE_FAMILY_QUERY_RESULT_STATUS_PROPERTIES_KHR"/>
- <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_VIDEO_SESSION_KHR" comment="VkVideoSessionKHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkObjectType" name="VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR" comment="VkVideoSessionParametersKHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_VIDEO_SESSION_KHR" comment="VkVideoSessionKHR"/>
+ <enum offset="1" extends="VkObjectType" name="VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR" comment="VkVideoSessionParametersKHR"/>
- <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="4" extends="VkQueryResultFlagBits" name="VK_QUERY_RESULT_WITH_STATUS_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR"/>
+ <enum bitpos="4" extends="VkQueryResultFlagBits" name="VK_QUERY_RESULT_WITH_STATUS_BIT_KHR"/>
+
+ <enum offset="0" extends="VkResult" dir="-" name="VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR"/>
+ <enum offset="1" extends="VkResult" dir="-" name="VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR"/>
+ <enum offset="2" extends="VkResult" dir="-" name="VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR"/>
+ <enum offset="3" extends="VkResult" dir="-" name="VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR"/>
+ <enum offset="4" extends="VkResult" dir="-" name="VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR"/>
+ <enum offset="5" extends="VkResult" dir="-" name="VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR"/>
<type name="VkVideoSessionKHR"/>
<type name="VkVideoSessionParametersKHR"/>
@@ -12119,25 +17159,25 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkVideoCapabilityFlagsKHR"/>
<type name="VkVideoSessionCreateFlagBitsKHR"/>
<type name="VkVideoSessionCreateFlagsKHR"/>
+ <type name="VkVideoSessionParametersCreateFlagsKHR"/>
<type name="VkVideoBeginCodingFlagsKHR"/>
<type name="VkVideoEndCodingFlagsKHR"/>
<type name="VkVideoCodingControlFlagBitsKHR"/>
<type name="VkVideoCodingControlFlagsKHR"/>
- <type name="VkVideoCodingQualityPresetFlagBitsKHR"/>
- <type name="VkVideoCodingQualityPresetFlagsKHR"/>
+ <type name="VkQueueFamilyQueryResultStatusPropertiesKHR"/>
<type name="VkQueryResultStatusKHR"/>
- <type name="VkVideoQueueFamilyProperties2KHR"/>
- <type name="VkVideoProfileKHR"/>
- <type name="VkVideoProfilesKHR"/>
+ <type name="VkQueueFamilyVideoPropertiesKHR"/>
+ <type name="VkVideoProfileInfoKHR"/>
+ <type name="VkVideoProfileListInfoKHR"/>
<type name="VkVideoCapabilitiesKHR"/>
<type name="VkPhysicalDeviceVideoFormatInfoKHR"/>
<type name="VkVideoFormatPropertiesKHR"/>
- <type name="VkVideoPictureResourceKHR"/>
- <type name="VkVideoReferenceSlotKHR"/>
- <type name="VkVideoGetMemoryPropertiesKHR"/>
- <type name="VkVideoBindMemoryKHR"/>
+ <type name="VkVideoPictureResourceInfoKHR"/>
+ <type name="VkVideoReferenceSlotInfoKHR"/>
+ <type name="VkVideoSessionMemoryRequirementsKHR"/>
+ <type name="VkBindVideoSessionMemoryInfoKHR"/>
<type name="VkVideoSessionCreateInfoKHR"/>
<type name="VkVideoSessionParametersCreateInfoKHR"/>
<type name="VkVideoSessionParametersUpdateInfoKHR"/>
@@ -12160,32 +17200,46 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdControlVideoCodingKHR"/>
</require>
</extension>
- <extension name="VK_KHR_video_decode_queue" number="25" type="device" requires="VK_KHR_video_queue,VK_KHR_synchronization2" author="KHR" contact="jake.beju@amd.com" provisional="true" platform="provisional" supported="vulkan">
+ <extension name="VK_KHR_video_decode_queue" number="25" type="device" depends="VK_KHR_video_queue+VK_KHR_synchronization2" author="KHR" contact="jake.beju@amd.com" supported="vulkan" ratified="vulkan">
<require>
- <enum value="1" name="VK_KHR_VIDEO_DECODE_QUEUE_SPEC_VERSION"/>
+ <enum value="8" name="VK_KHR_VIDEO_DECODE_QUEUE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_video_decode_queue&quot;" name="VK_KHR_VIDEO_DECODE_QUEUE_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="5" extends="VkQueueFlagBits" name="VK_QUEUE_VIDEO_DECODE_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="26" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="35" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS" />
- <enum bitpos="36" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="13" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_DECODE_SRC_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="14" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_DECODE_DST_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="10" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="11" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="12" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="25" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="26" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="2" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_CAPABILITIES_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_USAGE_INFO_KHR"/>
+ <enum bitpos="5" extends="VkQueueFlagBits" name="VK_QUEUE_VIDEO_DECODE_BIT_KHR"/>
+ <!-- VkPipelineStageFlagBits bitpos="26" is reserved by this extension, but not used -->
+ <enum bitpos="26" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR"/>
+ <enum bitpos="35" extends="VkAccessFlagBits2" name="VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR"/>
+ <enum bitpos="36" extends="VkAccessFlagBits2" name="VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR"/>
+ <enum bitpos="13" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_DECODE_SRC_BIT_KHR"/>
+ <enum bitpos="14" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_DECODE_DST_BIT_KHR"/>
+ <enum bitpos="10" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR"/>
+ <enum bitpos="11" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR"/>
+ <enum bitpos="12" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR"/>
+ <enum bitpos="25" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR"/>
+ <enum bitpos="26" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR"/>
+ <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR"/>
+ <enum offset="1" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR"/>
+ <enum offset="2" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR"/>
+
+ <type name="VkVideoDecodeCapabilityFlagBitsKHR"/>
+ <type name="VkVideoDecodeCapabilityFlagsKHR"/>
+ <type name="VkVideoDecodeCapabilitiesKHR"/>
+
+ <type name="VkVideoDecodeUsageFlagBitsKHR"/>
+ <type name="VkVideoDecodeUsageFlagsKHR"/>
+ <type name="VkVideoDecodeUsageInfoKHR"/>
- <type name="VkVideoDecodeFlagBitsKHR"/>
<type name="VkVideoDecodeFlagsKHR"/>
<type name="VkVideoDecodeInfoKHR"/>
<command name="vkCmdDecodeVideoKHR"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="25" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR"/>
+ <enum bitpos="26" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR"/>
+ </require>
</extension>
<extension name="VK_AMD_gcn_shader" number="26" type="device" author="AMD" contact="Dominik Witczak @dominikwitczakamd" supported="vulkan">
<require>
@@ -12208,10 +17262,10 @@ typedef void <name>CAMetalLayer</name>;
<extension name="VK_EXT_extension_28" number="28" author="NV" contact="Piers Daniell @pdaniell-nv" supported="disabled">
<require>
<enum value="0" name="VK_EXT_EXTENSION_28_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_28&quot;" name="VK_EXT_EXTENSION_28_EXTENSION_NAME"/>
+ <enum value="&quot;VK_EXT_extension_28&quot;" name="VK_EXT_EXTENSION_28_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_transform_feedback" number="29" type="device" author="NV" contact="Piers Daniell @pdaniell-nv" specialuse="glemulation,d3demulation,devtools" supported="vulkan" requires="VK_KHR_get_physical_device_properties2">
+ <extension name="VK_EXT_transform_feedback" number="29" type="device" author="NV" contact="Piers Daniell @pdaniell-nv" specialuse="glemulation,d3demulation,devtools" supported="vulkan" depends="VK_KHR_get_physical_device_properties2">
<require>
<enum value="1" name="VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_transform_feedback&quot;" name="VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME"/>
@@ -12228,8 +17282,8 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="4" extends="VkQueryType" name="VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT"/>
- <enum bitpos="11" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT"/>
- <enum bitpos="12" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT"/>
+ <enum bitpos="11" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT"/>
+ <enum bitpos="12" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT"/>
<enum bitpos="25" extends="VkAccessFlagBits" name="VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT"/>
<enum bitpos="26" extends="VkAccessFlagBits" name="VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT"/>
@@ -12244,7 +17298,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPipelineRasterizationStateStreamCreateFlagsEXT"/>
</require>
</extension>
- <extension name="VK_NVX_binary_import" number="30" type="device" author="NVX" contact="Eric Werness @ewerness,Liam Middlebrook @liam-middlebrook" supported="vulkan">
+ <extension name="VK_NVX_binary_import" number="30" type="device" author="NVX" contact="Eric Werness @ewerness-nv,Liam Middlebrook @liam-middlebrook" supported="vulkan">
<require>
<enum value="1" name="VK_NVX_BINARY_IMPORT_SPEC_VERSION"/>
<enum value="&quot;VK_NVX_binary_import&quot;" name="VK_NVX_BINARY_IMPORT_EXTENSION_NAME"/>
@@ -12253,8 +17307,6 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_CU_LAUNCH_INFO_NVX"/>
<enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_CU_MODULE_NVX"/>
<enum offset="1" extends="VkObjectType" name="VK_OBJECT_TYPE_CU_FUNCTION_NVX"/>
- <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_CU_MODULE_NVX_EXT"/>
- <enum offset="1" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_CU_FUNCTION_NVX_EXT"/>
<type name="VkCuModuleNVX"/>
<type name="VkCuFunctionNVX"/>
<type name="VkCuModuleCreateInfoNVX"/>
@@ -12266,8 +17318,12 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkDestroyCuFunctionNVX"/>
<command name="vkCmdCuLaunchKernelNVX"/>
</require>
+ <require depends="VK_EXT_debug_report">
+ <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_CU_MODULE_NVX_EXT"/>
+ <enum offset="1" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_CU_FUNCTION_NVX_EXT"/>
+ </require>
</extension>
- <extension name="VK_NVX_image_view_handle" number="31" type="device" author="NVX" contact="Eric Werness @ewerness" supported="vulkan">
+ <extension name="VK_NVX_image_view_handle" number="31" type="device" author="NVX" contact="Eric Werness @ewerness-nv" supported="vulkan">
<require>
<enum value="2" name="VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION"/>
<enum value="&quot;VK_NVX_image_view_handle&quot;" name="VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME"/>
@@ -12323,74 +17379,120 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_shader_ballot&quot;" name="VK_AMD_SHADER_BALLOT_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_video_encode_h264" number="39" type="device" requires="VK_KHR_video_encode_queue" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" provisional="true" platform="provisional" supported="vulkan">
- <require>
- <enum value="2" name="VK_EXT_VIDEO_ENCODE_H264_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_video_encode_h264&quot;" name="VK_EXT_VIDEO_ENCODE_H264_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="16" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <extension name="VK_KHR_video_encode_h264" number="39" type="device" depends="VK_KHR_video_encode_queue" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="14" name="VK_KHR_VIDEO_ENCODE_H264_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_video_encode_h264&quot;" name="VK_KHR_VIDEO_ENCODE_H264_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PICTURE_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_GOP_REMAINING_FRAME_INFO_KHR"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_INFO_KHR"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_INFO_KHR"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_KHR"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_KHR"/>
+ <enum offset="11" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_QUALITY_LEVEL_PROPERTIES_KHR"/>
+ <enum offset="12" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_GET_INFO_KHR"/>
+ <enum offset="13" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_FEEDBACK_INFO_KHR"/>
+ <enum bitpos="16" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR"/>
- <type name="VkVideoEncodeH264CapabilityFlagBitsEXT"/>
- <type name="VkVideoEncodeH264CapabilityFlagsEXT"/>
- <type name="VkVideoEncodeH264InputModeFlagBitsEXT"/>
- <type name="VkVideoEncodeH264InputModeFlagsEXT"/>
- <type name="VkVideoEncodeH264OutputModeFlagBitsEXT"/>
- <type name="VkVideoEncodeH264OutputModeFlagsEXT"/>
- <type name="VkVideoEncodeH264CreateFlagBitsEXT"/>
- <type name="VkVideoEncodeH264CreateFlagsEXT"/>
- <type name="VkVideoEncodeH264CapabilitiesEXT"/>
- <type name="VkVideoEncodeH264SessionCreateInfoEXT"/>
- <type name="VkVideoEncodeH264SessionParametersCreateInfoEXT"/>
- <type name="VkVideoEncodeH264SessionParametersAddInfoEXT"/>
- <type name="VkVideoEncodeH264VclFrameInfoEXT"/>
- <type name="VkVideoEncodeH264EmitPictureParametersEXT"/>
- <type name="VkVideoEncodeH264DpbSlotInfoEXT"/>
- <type name="VkVideoEncodeH264NaluSliceEXT"/>
- <type name="VkVideoEncodeH264ProfileEXT"/>
- </require>
- </extension>
- <extension name="VK_EXT_video_encode_h265" number="40" type="device" requires="VK_KHR_video_encode_queue" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" supported="disabled">
- <require>
- <enum value="0" name="VK_EXT_VIDEO_ENCODE_H265_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_video_encode_h265&quot;" name="VK_EXT_VIDEO_ENCODE_H265_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_EXT_video_decode_h264" number="41" type="device" requires="VK_KHR_video_decode_queue" author="KHR" contact="peter.fang@amd.com" provisional="true" platform="provisional" supported="vulkan">
- <require>
- <enum value="3" name="VK_EXT_VIDEO_DECODE_H264_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_video_decode_h264&quot;" name="VK_EXT_VIDEO_DECODE_H264_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_CREATE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="0" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <type name="VkVideoDecodeH264PictureLayoutFlagBitsEXT"/>
- <type name="VkVideoDecodeH264PictureLayoutFlagsEXT"/>
- <type name="VkVideoDecodeH264CreateFlagsEXT"/>
- <type name="VkVideoDecodeH264ProfileEXT"/>
- <type name="VkVideoDecodeH264CapabilitiesEXT"/>
- <type name="VkVideoDecodeH264SessionCreateInfoEXT"/>
- <type name="VkVideoDecodeH264SessionParametersCreateInfoEXT"/>
- <type name="VkVideoDecodeH264SessionParametersAddInfoEXT"/>
- <type name="VkVideoDecodeH264PictureInfoEXT"/>
+ <type name="VkVideoEncodeH264CapabilityFlagBitsKHR"/>
+ <type name="VkVideoEncodeH264CapabilityFlagsKHR"/>
+ <type name="VkVideoEncodeH264StdFlagBitsKHR"/>
+ <type name="VkVideoEncodeH264StdFlagsKHR"/>
+ <type name="VkVideoEncodeH264CapabilitiesKHR"/>
+ <type name="VkVideoEncodeH264QualityLevelPropertiesKHR"/>
+ <type name="VkVideoEncodeH264SessionCreateInfoKHR"/>
+ <type name="VkVideoEncodeH264SessionParametersCreateInfoKHR"/>
+ <type name="VkVideoEncodeH264SessionParametersAddInfoKHR"/>
+ <type name="VkVideoEncodeH264SessionParametersGetInfoKHR"/>
+ <type name="VkVideoEncodeH264SessionParametersFeedbackInfoKHR"/>
+ <type name="VkVideoEncodeH264PictureInfoKHR"/>
+ <type name="VkVideoEncodeH264DpbSlotInfoKHR"/>
+ <type name="VkVideoEncodeH264NaluSliceInfoKHR"/>
+ <type name="VkVideoEncodeH264ProfileInfoKHR"/>
+ <type name="VkVideoEncodeH264RateControlInfoKHR"/>
+ <type name="VkVideoEncodeH264RateControlFlagBitsKHR"/>
+ <type name="VkVideoEncodeH264RateControlFlagsKHR"/>
+ <type name="VkVideoEncodeH264RateControlLayerInfoKHR"/>
+ <type name="VkVideoEncodeH264QpKHR"/>
+ <type name="VkVideoEncodeH264FrameSizeKHR"/>
+ <type name="VkVideoEncodeH264GopRemainingFrameInfoKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_video_encode_h265" number="40" type="device" depends="VK_KHR_video_encode_queue" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="14" name="VK_KHR_VIDEO_ENCODE_H265_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_video_encode_h265&quot;" name="VK_KHR_VIDEO_ENCODE_H265_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_CAPABILITIES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PICTURE_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_DPB_SLOT_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_GOP_REMAINING_FRAME_INFO_KHR"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PROFILE_INFO_KHR"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_INFO_KHR"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_KHR"/>
+ <enum offset="11" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_CREATE_INFO_KHR"/>
+ <enum offset="12" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_QUALITY_LEVEL_PROPERTIES_KHR"/>
+ <enum offset="13" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_GET_INFO_KHR"/>
+ <enum offset="14" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_FEEDBACK_INFO_KHR"/>
+ <enum bitpos="17" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR"/>
- <type name="VkVideoDecodeH264MvcEXT"/>
- <type name="VkVideoDecodeH264DpbSlotInfoEXT"/>
- </require>
- </extension>
- <extension name="VK_AMD_texture_gather_bias_lod" number="42" author="AMD" contact="Rex Xu @amdrexu" supported="vulkan" type="device" requires="VK_KHR_get_physical_device_properties2">
+ <type name="VkVideoEncodeH265CapabilityFlagBitsKHR"/>
+ <type name="VkVideoEncodeH265CapabilityFlagsKHR"/>
+ <type name="VkVideoEncodeH265StdFlagBitsKHR"/>
+ <type name="VkVideoEncodeH265StdFlagsKHR"/>
+ <type name="VkVideoEncodeH265CtbSizeFlagBitsKHR"/>
+ <type name="VkVideoEncodeH265CtbSizeFlagsKHR"/>
+ <type name="VkVideoEncodeH265TransformBlockSizeFlagBitsKHR"/>
+ <type name="VkVideoEncodeH265TransformBlockSizeFlagsKHR"/>
+ <type name="VkVideoEncodeH265CapabilitiesKHR"/>
+ <type name="VkVideoEncodeH265SessionCreateInfoKHR"/>
+ <type name="VkVideoEncodeH265QualityLevelPropertiesKHR"/>
+ <type name="VkVideoEncodeH265SessionParametersCreateInfoKHR"/>
+ <type name="VkVideoEncodeH265SessionParametersAddInfoKHR"/>
+ <type name="VkVideoEncodeH265SessionParametersGetInfoKHR"/>
+ <type name="VkVideoEncodeH265SessionParametersFeedbackInfoKHR"/>
+ <type name="VkVideoEncodeH265PictureInfoKHR"/>
+ <type name="VkVideoEncodeH265DpbSlotInfoKHR"/>
+ <type name="VkVideoEncodeH265NaluSliceSegmentInfoKHR"/>
+ <type name="VkVideoEncodeH265ProfileInfoKHR"/>
+ <type name="VkVideoEncodeH265RateControlInfoKHR"/>
+ <type name="VkVideoEncodeH265RateControlFlagBitsKHR"/>
+ <type name="VkVideoEncodeH265RateControlFlagsKHR"/>
+ <type name="VkVideoEncodeH265RateControlLayerInfoKHR"/>
+ <type name="VkVideoEncodeH265QpKHR"/>
+ <type name="VkVideoEncodeH265FrameSizeKHR"/>
+ <type name="VkVideoEncodeH265GopRemainingFrameInfoKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_video_decode_h264" number="41" type="device" depends="VK_KHR_video_decode_queue" author="KHR" contact="peter.fang@amd.com" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="9" name="VK_KHR_VIDEO_DECODE_H264_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_video_decode_h264&quot;" name="VK_KHR_VIDEO_DECODE_H264_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_KHR"/>
+ <enum bitpos="0" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR"/>
+ <type name="VkVideoDecodeH264PictureLayoutFlagBitsKHR"/>
+ <type name="VkVideoDecodeH264PictureLayoutFlagsKHR"/>
+ <type name="VkVideoDecodeH264ProfileInfoKHR"/>
+ <type name="VkVideoDecodeH264CapabilitiesKHR"/>
+ <type name="VkVideoDecodeH264SessionParametersCreateInfoKHR"/>
+ <type name="VkVideoDecodeH264SessionParametersAddInfoKHR"/>
+ <type name="VkVideoDecodeH264PictureInfoKHR"/>
+ <type name="VkVideoDecodeH264DpbSlotInfoKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_texture_gather_bias_lod" number="42" author="AMD" contact="Rex Xu @amdrexu" supported="vulkan" type="device" depends="VK_KHR_get_physical_device_properties2">
<require>
<enum value="1" name="VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION"/>
<enum value="&quot;VK_AMD_texture_gather_bias_lod&quot;" name="VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME"/>
@@ -12414,12 +17516,49 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_44&quot;" name="VK_AMD_EXTENSION_44_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_AMD_extension_45" number="45" author="AMD" contact="Daniel Rakos @drakos-amd" supported="disabled">
- <require>
- <enum value="0" name="VK_AMD_EXTENSION_45_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_45&quot;" name="VK_AMD_EXTENSION_45_EXTENSION_NAME"/>
- <enum bitpos="21" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RESERVED_21_BIT_AMD"/>
- <enum bitpos="22" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RESERVED_22_BIT_AMD"/>
+ <extension name="VK_KHR_dynamic_rendering" number="45" author="KHR" type="device" depends="VK_KHR_depth_stencil_resolve+VK_KHR_get_physical_device_properties2" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_3" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_dynamic_rendering&quot;" name="VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME"/>
+ <command name="vkCmdBeginRenderingKHR"/>
+ <command name="vkCmdEndRenderingKHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDERING_INFO_KHR" alias="VK_STRUCTURE_TYPE_RENDERING_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR" alias="VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR" alias="VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO_KHR" alias="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO"/>
+ <enum extends="VkAttachmentStoreOp" name="VK_ATTACHMENT_STORE_OP_NONE_KHR" alias="VK_ATTACHMENT_STORE_OP_NONE"/>
+ <type name="VkRenderingInfoKHR"/>
+ <type name="VkRenderingAttachmentInfoKHR"/>
+ <type name="VkPipelineRenderingCreateInfoKHR"/>
+ <type name="VkPhysicalDeviceDynamicRenderingFeaturesKHR"/>
+ <type name="VkCommandBufferInheritanceRenderingInfoKHR"/>
+ <type name="VkRenderingFlagsKHR"/>
+ <type name="VkRenderingFlagBitsKHR"/>
+ </require>
+ <require depends="VK_KHR_fragment_shading_rate">
+ <enum bitpos="21" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
+ <enum alias="VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR" deprecated="aliased"/>
+ <enum offset="6" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR"/>
+ <type name="VkRenderingFragmentShadingRateAttachmentInfoKHR"/>
+ </require>
+ <require depends="VK_EXT_fragment_density_map">
+ <enum bitpos="22" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT"/>
+ <enum alias="VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT" deprecated="aliased"/>
+ <enum offset="7" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT"/>
+ <type name="VkRenderingFragmentDensityMapAttachmentInfoEXT"/>
+ </require>
+ <require depends="VK_AMD_mixed_attachment_samples">
+ <enum offset="8" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD"/>
+ <type name="VkAttachmentSampleCountInfoAMD"/>
+ </require>
+ <require depends="VK_NV_framebuffer_mixed_samples">
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_NV" alias="VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD"/>
+ <type name="VkAttachmentSampleCountInfoNV"/>
+ </require>
+ <require depends="VK_NVX_multiview_per_view_attributes">
+ <enum offset="9" extends="VkStructureType" extnumber="45" name="VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_ATTRIBUTES_INFO_NVX"/>
+ <type name="VkMultiviewPerViewAttributesInfoNVX"/>
</require>
</extension>
<extension name="VK_AMD_extension_46" number="46" author="AMD" contact="Daniel Rakos @drakos-amd" supported="disabled">
@@ -12446,7 +17585,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_GOOGLE_extension_49&quot;" name="VK_GOOGLE_EXTENSION_49_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_GGP_stream_descriptor_surface" number="50" type="instance" requires="VK_KHR_surface" platform="ggp" author="GGP" contact="Jean-Francois Roy @jfroy" supported="vulkan">
+ <extension name="VK_GGP_stream_descriptor_surface" number="50" type="instance" depends="VK_KHR_surface" platform="ggp" author="GGP" contact="Jean-Francois Roy @jfroy" supported="vulkan">
<require>
<enum value="1" name="VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_GGP_stream_descriptor_surface&quot;" name="VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME"/>
@@ -12456,7 +17595,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateStreamDescriptorSurfaceGGP"/>
</require>
</extension>
- <extension name="VK_NV_corner_sampled_image" number="51" author="NV" type="device" requires="VK_KHR_get_physical_device_properties2" contact="Daniel Koch @dgkoch" supported="vulkan">
+ <extension name="VK_NV_corner_sampled_image" number="51" author="NV" type="device" depends="VK_KHR_get_physical_device_properties2" contact="Daniel Koch @dgkoch" supported="vulkan">
<require>
<enum value="2" name="VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION"/>
<enum value="&quot;VK_NV_corner_sampled_image&quot;" name="VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME"/>
@@ -12465,12 +17604,11 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceCornerSampledImageFeaturesNV"/>
</require>
</extension>
- <extension name="VK_NV_extension_52" number="52" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
+ <extension name="VK_NV_private_vendor_info" number="52" type="device" author="NV" contact="Daniel Koch @dgkoch" supported="vulkansc">
<require>
- <enum value="0" name="VK_NV_EXTENSION_52_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_52&quot;" name="VK_NV_EXTENSION_52_EXTENSION_NAME"/>
- <enum bitpos="0" extends="VkShaderModuleCreateFlagBits" name="VK_SHADER_MODULE_CREATE_RESERVED_0_BIT_NV"/>
- <enum bitpos="2" extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_RESERVED_2_BIT_NV"/>
+ <enum value="2" name="VK_NV_PRIVATE_VENDOR_INFO_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_private_vendor_info&quot;" name="VK_NV_PRIVATE_VENDOR_INFO_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PRIVATE_VENDOR_INFO_PLACEHOLDER_OFFSET_0_NV"/>
</require>
</extension>
<extension name="VK_NV_extension_53" number="53" author="NV" contact="Jeff Bolz @jeffbolznv" supported="disabled">
@@ -12479,7 +17617,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_NV_extension_53&quot;" name="VK_NV_EXTENSION_53_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_multiview" number="54" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_multiview" number="54" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_MULTIVIEW_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_multiview&quot;" name="VK_KHR_MULTIVIEW_EXTENSION_NAME"/>
@@ -12492,7 +17630,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceMultiviewPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_IMG_format_pvrtc" number="55" type="device" author="IMG" contact="Stuart Smith" supported="vulkan">
+ <extension name="VK_IMG_format_pvrtc" number="55" type="device" author="IMG" contact="Stuart Smith" supported="vulkan" deprecatedby="">
<require>
<enum value="1" name="VK_IMG_FORMAT_PVRTC_SPEC_VERSION"/>
<enum value="&quot;VK_IMG_format_pvrtc&quot;" name="VK_IMG_FORMAT_PVRTC_EXTENSION_NAME"/>
@@ -12518,7 +17656,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceExternalImageFormatPropertiesNV"/>
</require>
</extension>
- <extension name="VK_NV_external_memory" number="57" type="device" requires="VK_NV_external_memory_capabilities" author="NV" contact="James Jones @cubanismo" supported="vulkan" deprecatedby="VK_KHR_external_memory">
+ <extension name="VK_NV_external_memory" number="57" type="device" depends="VK_NV_external_memory_capabilities" author="NV" contact="James Jones @cubanismo" supported="vulkan" deprecatedby="VK_KHR_external_memory">
<require>
<enum value="1" name="VK_NV_EXTERNAL_MEMORY_SPEC_VERSION"/>
<enum value="&quot;VK_NV_external_memory&quot;" name="VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME"/>
@@ -12528,7 +17666,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkExportMemoryAllocateInfoNV"/>
</require>
</extension>
- <extension name="VK_NV_external_memory_win32" number="58" type="device" requires="VK_NV_external_memory" author="NV" contact="James Jones @cubanismo" platform="win32" supported="vulkan" deprecatedby="VK_KHR_external_memory_win32">
+ <extension name="VK_NV_external_memory_win32" number="58" type="device" depends="VK_NV_external_memory" author="NV" contact="James Jones @cubanismo" platform="win32" supported="vulkan" deprecatedby="VK_KHR_external_memory_win32">
<require>
<enum value="1" name="VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION"/>
<enum value="&quot;VK_NV_external_memory_win32&quot;" name="VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME"/>
@@ -12539,7 +17677,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetMemoryWin32HandleNV"/>
</require>
</extension>
- <extension name="VK_NV_win32_keyed_mutex" number="59" type="device" requires="VK_NV_external_memory_win32" author="NV" contact="Carsten Rohde @crohde" platform="win32" supported="vulkan" promotedto="VK_KHR_win32_keyed_mutex">
+ <extension name="VK_NV_win32_keyed_mutex" number="59" type="device" depends="VK_NV_external_memory_win32" author="NV" contact="Carsten Rohde @crohde" platform="win32" supported="vulkan" promotedto="VK_KHR_win32_keyed_mutex">
<require>
<enum value="2" name="VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION"/>
<enum value="&quot;VK_NV_win32_keyed_mutex&quot;" name="VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME"/>
@@ -12547,7 +17685,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkWin32KeyedMutexAcquireReleaseInfoNV"/>
</require>
</extension>
- <extension name="VK_KHR_get_physical_device_properties2" number="60" type="instance" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_get_physical_device_properties2" number="60" type="instance" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="2" name="VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_get_physical_device_properties2&quot;" name="VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME"/>
@@ -12578,7 +17716,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceSparseImageFormatProperties2KHR"/>
</require>
</extension>
- <extension name="VK_KHR_device_group" number="61" type="device" author="KHR" requires="VK_KHR_device_group_creation" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_device_group" number="61" type="device" author="KHR" depends="VK_KHR_device_group_creation" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="4" name="VK_KHR_DEVICE_GROUP_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_device_group&quot;" name="VK_KHR_DEVICE_GROUP_EXTENSION_NAME"/>
@@ -12608,14 +17746,14 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_DISPATCH_BASE_KHR" alias="VK_PIPELINE_CREATE_DISPATCH_BASE"/>
<enum extends="VkDependencyFlagBits" name="VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR" alias="VK_DEPENDENCY_DEVICE_GROUP_BIT"/>
</require>
- <require extension="VK_KHR_bind_memory2">
+ <require depends="VK_KHR_bind_memory2">
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR" alias="VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO"/>
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR" alias="VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO"/>
<type name="VkBindBufferMemoryDeviceGroupInfoKHR"/>
<type name="VkBindImageMemoryDeviceGroupInfoKHR"/>
<enum extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR" alias="VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT"/>
</require>
- <require extension="VK_KHR_surface">
+ <require depends="VK_KHR_surface">
<enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR"/>
<type name="VkDeviceGroupPresentModeFlagBitsKHR"/>
<type name="VkDeviceGroupPresentModeFlagsKHR"/>
@@ -12624,7 +17762,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetDeviceGroupSurfacePresentModesKHR"/>
<command name="vkGetPhysicalDevicePresentRectanglesKHR"/>
</require>
- <require extension="VK_KHR_swapchain">
+ <require depends="VK_KHR_swapchain">
<enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR"/>
<enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR"/>
<enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR"/>
@@ -12639,16 +17777,16 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkAcquireNextImage2KHR"/>
</require>
</extension>
- <extension name="VK_EXT_validation_flags" number="62" type="instance" author="GOOGLE" contact="Tobin Ehlis @tobine" specialuse="debugging" supported="vulkan" deprecatedby="VK_EXT_validation_features">
+ <extension name="VK_EXT_validation_flags" number="62" type="instance" author="GOOGLE" contact="Tobin Ehlis @tobine" specialuse="debugging" supported="vulkan" deprecatedby="VK_EXT_layer_settings">
<require>
- <enum value="2" name="VK_EXT_VALIDATION_FLAGS_SPEC_VERSION"/>
+ <enum value="3" name="VK_EXT_VALIDATION_FLAGS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_validation_flags&quot;" name="VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT"/>
<type name="VkValidationFlagsEXT"/>
<type name="VkValidationCheckEXT"/>
</require>
</extension>
- <extension name="VK_NN_vi_surface" number="63" type="instance" author="NN" contact="Mathias Heyer gitlab:@mheyer" requires="VK_KHR_surface" platform="vi" supported="vulkan">
+ <extension name="VK_NN_vi_surface" number="63" type="instance" author="NN" contact="Mathias Heyer gitlab:@mheyer" depends="VK_KHR_surface" platform="vi" supported="vulkan">
<require>
<enum value="1" name="VK_NN_VI_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_NN_vi_surface&quot;" name="VK_NN_VI_SURFACE_EXTENSION_NAME"/>
@@ -12658,7 +17796,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateViSurfaceNN"/>
</require>
</extension>
- <extension name="VK_KHR_shader_draw_parameters" number="64" type="device" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_shader_draw_parameters" number="64" type="device" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_draw_parameters&quot;" name="VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME"/>
@@ -12676,48 +17814,58 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_EXT_shader_subgroup_vote&quot;" name="VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_texture_compression_astc_hdr" number="67" type="device" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" requires="VK_KHR_get_physical_device_properties2" supported="vulkan">
+ <extension name="VK_EXT_texture_compression_astc_hdr" number="67" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
- <enum value="1" name="VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_texture_compression_astc_hdr&quot;" name="VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT"/>
+ <enum value="1" name="VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_texture_compression_astc_hdr&quot;" name="VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES"/>
<type name="VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT"/>
- <enum extends="VkFormat" extnumber="67" offset="0" name="VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="1" name="VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="2" name="VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="3" name="VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="4" name="VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="5" name="VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="6" name="VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="7" name="VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="8" name="VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="9" name="VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="10" name="VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="11" name="VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="12" name="VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT"/>
- <enum extends="VkFormat" extnumber="67" offset="13" name="VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT"/>
- </require>
- </extension>
- <extension name="VK_EXT_astc_decode_mode" number="68" type="device" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" requires="VK_KHR_get_physical_device_properties2" supported="vulkan">
- <require>
- <enum value="1" name="VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_astc_decode_mode&quot;" name="VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK"/>
+ <enum extends="VkFormat" name="VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT" alias="VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_astc_decode_mode" number="68" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan,vulkansc">
+ <require>
+ <enum value="1" name="VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_astc_decode_mode&quot;" name="VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT"/>
<type name="VkImageViewASTCDecodeModeEXT"/>
<type name="VkPhysicalDeviceASTCDecodeFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_IMG_extension_69" number="69" type="device" author="IMG" contact="Tobias Hector @tobski" supported="disabled">
+ <extension name="VK_EXT_pipeline_robustness" depends="VK_KHR_get_physical_device_properties2" number="69" type="device" author="IMG" contact="Jarred Davies" supported="vulkan">
<require>
- <enum value="0" name="VK_IMG_EXTENSION_69_SPEC_VERSION"/>
- <enum value="&quot;VK_IMG_extension_69&quot;" name="VK_IMG_EXTENSION_69_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_pipeline_robustness&quot;" name="VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT"/>
+ <type name="VkPhysicalDevicePipelineRobustnessFeaturesEXT"/>
+ <type name="VkPhysicalDevicePipelineRobustnessPropertiesEXT"/>
+ <type name="VkPipelineRobustnessCreateInfoEXT"/>
+ <type name="VkPipelineRobustnessBufferBehaviorEXT"/>
+ <type name="VkPipelineRobustnessImageBehaviorEXT"/>
</require>
</extension>
- <extension name="VK_KHR_maintenance1" number="70" type="device" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_maintenance1" number="70" type="device" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
- <enum value="2" name="VK_KHR_MAINTENANCE1_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_maintenance1&quot;" name="VK_KHR_MAINTENANCE1_EXTENSION_NAME"/>
+ <enum value="2" name="VK_KHR_MAINTENANCE_1_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_maintenance1&quot;" name="VK_KHR_MAINTENANCE_1_EXTENSION_NAME"/>
+ <enum alias="VK_KHR_MAINTENANCE_1_SPEC_VERSION" name="VK_KHR_MAINTENANCE1_SPEC_VERSION" deprecated="aliased"/>
+ <enum alias="VK_KHR_MAINTENANCE_1_EXTENSION_NAME" name="VK_KHR_MAINTENANCE1_EXTENSION_NAME" deprecated="aliased"/>
<enum extends="VkResult" name="VK_ERROR_OUT_OF_POOL_MEMORY_KHR" alias="VK_ERROR_OUT_OF_POOL_MEMORY"/>
<enum extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR" alias="VK_FORMAT_FEATURE_TRANSFER_SRC_BIT"/>
<enum extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR" alias="VK_FORMAT_FEATURE_TRANSFER_DST_BIT"/>
@@ -12726,7 +17874,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkTrimCommandPoolKHR"/>
</require>
</extension>
- <extension name="VK_KHR_device_group_creation" number="71" type="instance" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_device_group_creation" number="71" type="instance" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_device_group_creation&quot;" name="VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME"/>
@@ -12739,7 +17887,7 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkMemoryHeapFlagBits" name="VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR" alias="VK_MEMORY_HEAP_MULTI_INSTANCE_BIT"/>
</require>
</extension>
- <extension name="VK_KHR_external_memory_capabilities" number="72" type="instance" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_external_memory_capabilities" number="72" type="instance" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_memory_capabilities&quot;" name="VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME"/>
@@ -12772,7 +17920,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceExternalBufferPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_memory" number="73" type="device" requires="VK_KHR_external_memory_capabilities" author="KHR" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_external_memory" number="73" type="device" depends="VK_KHR_external_memory_capabilities" author="KHR" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_memory&quot;" name="VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME"/>
@@ -12786,7 +17934,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkExportMemoryAllocateInfoKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_memory_win32" number="74" type="device" requires="VK_KHR_external_memory" author="KHR" contact="James Jones @cubanismo" platform="win32" supported="vulkan">
+ <extension name="VK_KHR_external_memory_win32" number="74" type="device" depends="VK_KHR_external_memory" author="KHR" contact="James Jones @cubanismo" platform="win32" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_memory_win32&quot;" name="VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME"/>
@@ -12802,7 +17950,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetMemoryWin32HandlePropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_memory_fd" number="75" type="device" requires="VK_KHR_external_memory" author="KHR" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_KHR_external_memory_fd" number="75" type="device" depends="VK_KHR_external_memory,VK_VERSION_1_1" author="KHR" contact="James Jones @cubanismo" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_memory_fd&quot;" name="VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME"/>
@@ -12816,7 +17964,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetMemoryFdPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_win32_keyed_mutex" number="76" type="device" requires="VK_KHR_external_memory_win32" author="KHR" contact="Carsten Rohde @crohde" platform="win32" supported="vulkan">
+ <extension name="VK_KHR_win32_keyed_mutex" number="76" type="device" depends="VK_KHR_external_memory_win32" author="KHR" contact="Carsten Rohde @crohde" platform="win32" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_win32_keyed_mutex&quot;" name="VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME"/>
@@ -12824,7 +17972,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkWin32KeyedMutexAcquireReleaseInfoKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_semaphore_capabilities" number="77" type="instance" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_external_semaphore_capabilities" number="77" type="instance" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_semaphore_capabilities&quot;" name="VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME"/>
@@ -12849,7 +17997,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_semaphore" number="78" type="device" requires="VK_KHR_external_semaphore_capabilities" author="KHR" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_external_semaphore" number="78" type="device" depends="VK_KHR_external_semaphore_capabilities" author="KHR" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_semaphore&quot;" name="VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME"/>
@@ -12860,7 +18008,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkExportSemaphoreCreateInfoKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_semaphore_win32" number="79" type="device" requires="VK_KHR_external_semaphore" author="KHR" contact="James Jones @cubanismo" platform="win32" supported="vulkan">
+ <extension name="VK_KHR_external_semaphore_win32" number="79" type="device" depends="VK_KHR_external_semaphore" author="KHR" contact="James Jones @cubanismo" platform="win32" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_semaphore_win32&quot;" name="VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME"/>
@@ -12876,7 +18024,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetSemaphoreWin32HandleKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_semaphore_fd" number="80" type="device" requires="VK_KHR_external_semaphore" author="KHR" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_KHR_external_semaphore_fd" number="80" type="device" depends="VK_KHR_external_semaphore,VK_VERSION_1_1" author="KHR" contact="James Jones @cubanismo" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_semaphore_fd&quot;" name="VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME"/>
@@ -12888,7 +18036,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetSemaphoreFdKHR"/>
</require>
</extension>
- <extension name="VK_KHR_push_descriptor" number="81" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_KHR_push_descriptor" number="81" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Jeff Bolz @jeffbolznv" supported="vulkan" ratified="vulkan">
<require>
<enum value="2" name="VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_push_descriptor&quot;" name="VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME"/>
@@ -12897,16 +18045,16 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdPushDescriptorSetKHR"/>
<type name="VkPhysicalDevicePushDescriptorPropertiesKHR"/>
</require>
- <require feature="VK_VERSION_1_1">
+ <require depends="VK_VERSION_1_1">
<command name="vkCmdPushDescriptorSetWithTemplateKHR"/>
<enum value="1" extends="VkDescriptorUpdateTemplateType" name="VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR" comment="Create descriptor update template for pushed descriptor updates"/>
</require>
- <require extension="VK_KHR_descriptor_update_template">
+ <require depends="VK_KHR_descriptor_update_template">
<command name="vkCmdPushDescriptorSetWithTemplateKHR"/>
<enum value="1" extends="VkDescriptorUpdateTemplateType" name="VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR" comment="Create descriptor update template for pushed descriptor updates"/>
</require>
</extension>
- <extension name="VK_EXT_conditional_rendering" number="82" type="device" author="NV" contact="Vikram Kushwaha @vkushwaha" supported="vulkan">
+ <extension name="VK_EXT_conditional_rendering" number="82" type="device" author="NV" contact="Vikram Kushwaha @vkushwaha" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
<enum value="2" name="VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_conditional_rendering&quot;" name="VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME"/>
@@ -12925,17 +18073,17 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkCommandBufferInheritanceConditionalRenderingInfoEXT"/>
</require>
</extension>
- <extension name="VK_KHR_shader_float16_int8" number="83" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_shader_float16_int8" number="83" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
- <enum value="1" name="VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_shader_float16_int8&quot;" name="VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME"/>
- <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES"/>
- <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES"/>
+ <enum value="1" name="VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_shader_float16_int8&quot;" name="VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES"/>
<type name="VkPhysicalDeviceShaderFloat16Int8FeaturesKHR"/>
<type name="VkPhysicalDeviceFloat16Int8FeaturesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_16bit_storage" number="84" type="device" requires="VK_KHR_get_physical_device_properties2,VK_KHR_storage_buffer_storage_class" author="KHR" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_16bit_storage" number="84" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_storage_buffer_storage_class" author="KHR" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_16BIT_STORAGE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_16bit_storage&quot;" name="VK_KHR_16BIT_STORAGE_EXTENSION_NAME"/>
@@ -12943,7 +18091,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDevice16BitStorageFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_incremental_present" number="85" type="device" author="KHR" requires="VK_KHR_swapchain" contact="Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_KHR_incremental_present" number="85" type="device" author="KHR" depends="VK_KHR_swapchain" contact="Ian Elliott @ianelliottus" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="2" name="VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_incremental_present&quot;" name="VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME"/>
@@ -12953,7 +18101,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkRectLayerKHR"/>
</require>
</extension>
- <extension name="VK_KHR_descriptor_update_template" number="86" type="device" author="KHR" contact="Markus Tavenrath @mtavenrath" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_descriptor_update_template" number="86" type="device" author="KHR" contact="Markus Tavenrath @mtavenrath" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_descriptor_update_template&quot;" name="VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME"/>
@@ -12969,11 +18117,11 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkDescriptorUpdateTemplateCreateInfoKHR"/>
<enum extends="VkDescriptorUpdateTemplateType" name="VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR" alias="VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET"/>
</require>
- <require extension="VK_KHR_push_descriptor">
+ <require depends="VK_KHR_push_descriptor">
<command name="vkCmdPushDescriptorSetWithTemplateKHR"/>
<enum value="1" extends="VkDescriptorUpdateTemplateType" name="VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR" comment="Create descriptor update template for pushed descriptor updates"/>
</require>
- <require extension="VK_EXT_debug_report">
+ <require depends="VK_EXT_debug_report">
<enum extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT"/>
</require>
</extension>
@@ -12994,14 +18142,14 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetViewportWScalingNV"/>
</require>
</extension>
- <extension name="VK_EXT_direct_mode_display" number="89" type="instance" requires="VK_KHR_display" author="NV" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_EXT_direct_mode_display" number="89" type="instance" depends="VK_KHR_display" author="NV" contact="James Jones @cubanismo" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_direct_mode_display&quot;" name="VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME"/>
<command name="vkReleaseDisplayEXT"/>
</require>
</extension>
- <extension name="VK_EXT_acquire_xlib_display" number="90" type="instance" requires="VK_EXT_direct_mode_display" author="NV" contact="James Jones @cubanismo" platform="xlib_xrandr" supported="vulkan">
+ <extension name="VK_EXT_acquire_xlib_display" number="90" type="instance" depends="VK_EXT_direct_mode_display" author="NV" contact="James Jones @cubanismo" platform="xlib_xrandr" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_acquire_xlib_display&quot;" name="VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME"/>
@@ -13009,19 +18157,19 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetRandROutputDisplayEXT"/>
</require>
</extension>
- <extension name="VK_EXT_display_surface_counter" number="91" type="instance" requires="VK_KHR_display" author="NV" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_EXT_display_surface_counter" number="91" type="instance" depends="VK_KHR_display" author="NV" contact="James Jones @cubanismo" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_display_surface_counter&quot;" name="VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT"/>
- <enum alias="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT" comment="Backwards-compatible alias containing a typo"/>
+ <enum api="vulkan" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT" alias="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT" deprecated="aliased"/>
<type name="VkSurfaceCounterFlagsEXT"/>
<type name="VkSurfaceCounterFlagBitsEXT"/>
<type name="VkSurfaceCapabilities2EXT"/>
<command name="vkGetPhysicalDeviceSurfaceCapabilities2EXT"/>
</require>
</extension>
- <extension name="VK_EXT_display_control" number="92" type="device" requires="VK_EXT_display_surface_counter,VK_KHR_swapchain" author="NV" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_EXT_display_control" number="92" type="device" depends="VK_EXT_display_surface_counter+VK_KHR_swapchain" author="NV" contact="James Jones @cubanismo" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_DISPLAY_CONTROL_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_display_control&quot;" name="VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME"/>
@@ -13042,7 +18190,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetSwapchainCounterEXT"/>
</require>
</extension>
- <extension name="VK_GOOGLE_display_timing" number="93" type="device" author="GOOGLE" requires="VK_KHR_swapchain" contact="Ian Elliott @ianelliottus" supported="vulkan">
+ <extension name="VK_GOOGLE_display_timing" number="93" type="device" author="GOOGLE" depends="VK_KHR_swapchain" contact="Ian Elliott @ianelliottus" supported="vulkan">
<require>
<enum value="1" name="VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION"/>
<enum value="&quot;VK_GOOGLE_display_timing&quot;" name="VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME"/>
@@ -13055,7 +18203,11 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPastPresentationTimingGOOGLE"/>
</require>
</extension>
- <extension name="RESERVED_DO_NOT_USE_94" number="94" supported="disabled" comment="Used for functionality subsumed into Vulkan 1.1 and not published as an extension">
+ <extension name="VK_RESERVED_do_not_use_94" number="94" supported="disabled" comment="Used for functionality subsumed into Vulkan 1.1 and not published as an extension">
+ <require>
+ <enum value="1" name="VK_RESERVED_DO_NOT_USE_94_SPEC_VERSION"/>
+ <enum value="&quot;VK_RESERVED_do_not_use_94&quot;" name="VK_RESERVED_DO_NOT_USE_94_EXTENSION_NAME"/>
+ </require>
</extension>
<extension name="VK_NV_sample_mask_override_coverage" number="95" type="device" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
<require>
@@ -13076,11 +18228,13 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_NV_viewport_array2" number="97" type="device" author="NV" contact="Daniel Koch @dgkoch" supported="vulkan">
<require>
- <enum value="1" name="VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_viewport_array2&quot;" name="VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME"/>
+ <enum value="1" name="VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_viewport_array2&quot;" name="VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME"/>
+ <enum alias="VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION" name="VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION" deprecated="aliased"/>
+ <enum alias="VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME" name="VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME" deprecated="aliased"/>
</require>
</extension>
- <extension name="VK_NVX_multiview_per_view_attributes" number="98" type="device" requires="VK_KHR_multiview" author="NVX" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_NVX_multiview_per_view_attributes" number="98" type="device" depends="VK_KHR_multiview" author="NVX" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
<require>
<enum value="1" name="VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION"/>
<enum value="&quot;VK_NVX_multiview_per_view_attributes&quot;" name="VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME"/>
@@ -13101,18 +18255,22 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPipelineViewportSwizzleStateCreateFlagsNV"/>
</require>
</extension>
- <extension name="VK_EXT_discard_rectangles" number="100" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_discard_rectangles" number="100" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc" ratified="vulkan">
<require>
- <enum value="1" name="VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION"/>
+ <enum value="2" name="VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_discard_rectangles&quot;" name="VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT"/>
<enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT"/>
+ <enum offset="1" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DISCARD_RECTANGLE_ENABLE_EXT"/>
+ <enum offset="2" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DISCARD_RECTANGLE_MODE_EXT"/>
<type name="VkPhysicalDeviceDiscardRectanglePropertiesEXT"/>
<type name="VkPipelineDiscardRectangleStateCreateInfoEXT"/>
<type name="VkPipelineDiscardRectangleStateCreateFlagsEXT"/>
<type name="VkDiscardRectangleModeEXT"/>
<command name="vkCmdSetDiscardRectangleEXT"/>
+ <command name="vkCmdSetDiscardRectangleEnableEXT"/>
+ <command name="vkCmdSetDiscardRectangleModeEXT"/>
</require>
</extension>
<extension name="VK_NV_extension_101" number="101" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
@@ -13121,7 +18279,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_NV_extension_101&quot;" name="VK_NV_EXTENSION_101_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_conservative_rasterization" number="102" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_conservative_rasterization" number="102" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_conservative_rasterization&quot;" name="VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME"/>
@@ -13133,7 +18291,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkConservativeRasterizationModeEXT"/>
</require>
</extension>
- <extension name="VK_EXT_depth_clip_enable" number="103" type="device" author="EXT" contact="Piers Daniell @pdaniell-nv" specialuse="d3demulation" supported="vulkan">
+ <extension name="VK_EXT_depth_clip_enable" number="103" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Piers Daniell @pdaniell-nv" specialuse="d3demulation" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_depth_clip_enable&quot;" name="VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME"/>
@@ -13148,9 +18306,10 @@ typedef void <name>CAMetalLayer</name>;
<require>
<enum value="0" name="VK_NV_EXTENSION_104_SPEC_VERSION"/>
<enum value="&quot;VK_NV_extension_104&quot;" name="VK_NV_EXTENSION_104_EXTENSION_NAME"/>
+ <enum bitpos="0" extends="VkPrivateDataSlotCreateFlagBits" name="VK_PRIVATE_DATA_SLOT_CREATE_RESERVED_0_BIT_NV"/>
</require>
</extension>
- <extension name="VK_EXT_swapchain_colorspace" number="105" type="instance" author="GOOGLE" contact="Courtney Goeltzenleuchter @courtney-g" requires="VK_KHR_surface" supported="vulkan">
+ <extension name="VK_EXT_swapchain_colorspace" number="105" type="instance" depends="VK_KHR_surface" author="GOOGLE" contact="Courtney Goeltzenleuchter @courtney-g" supported="vulkan,vulkansc">
<require>
<enum value="4" name="VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_swapchain_colorspace&quot;" name="VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME"/>
@@ -13168,10 +18327,10 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="12" extends="VkColorSpaceKHR" name="VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT"/>
<enum offset="13" extends="VkColorSpaceKHR" name="VK_COLOR_SPACE_PASS_THROUGH_EXT"/>
<enum offset="14" extends="VkColorSpaceKHR" name="VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT"/>
- <enum extends="VkColorSpaceKHR" name="VK_COLOR_SPACE_DCI_P3_LINEAR_EXT" alias="VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT" comment="Deprecated name for backwards compatibility"/>
+ <enum api="vulkan" extends="VkColorSpaceKHR" name="VK_COLOR_SPACE_DCI_P3_LINEAR_EXT" alias="VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT" deprecated="aliased"/>
</require>
</extension>
- <extension name="VK_EXT_hdr_metadata" number="106" type="device" requires="VK_KHR_swapchain" author="GOOGLE" contact="Courtney Goeltzenleuchter @courtney-g" supported="vulkan">
+ <extension name="VK_EXT_hdr_metadata" number="106" type="device" depends="VK_KHR_swapchain" author="GOOGLE" contact="Courtney Goeltzenleuchter @courtney-g" supported="vulkan,vulkansc">
<require>
<enum value="2" name="VK_EXT_HDR_METADATA_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_hdr_metadata&quot;" name="VK_EXT_HDR_METADATA_EXTENSION_NAME"/>
@@ -13193,7 +18352,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_IMG_extension_108&quot;" name="VK_IMG_EXTENSION_108_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_imageless_framebuffer" requires="VK_KHR_maintenance2,VK_KHR_image_format_list" number="109" author="KHR" contact="Tobias Hector @tobias" type="device" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_imageless_framebuffer" depends="VK_KHR_maintenance2+VK_KHR_image_format_list+VK_KHR_get_physical_device_properties2" number="109" author="KHR" contact="Tobias Hector @tobias" type="device" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_imageless_framebuffer&quot;" name="VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME"/>
@@ -13208,7 +18367,7 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkFramebufferCreateFlagBits" name="VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR" alias="VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT"/>
</require>
</extension>
- <extension name="VK_KHR_create_renderpass2" requires="VK_KHR_multiview,VK_KHR_maintenance2" number="110" contact="Tobias Hector @tobias" type="device" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_create_renderpass2" depends="VK_KHR_multiview+VK_KHR_maintenance2" number="110" author="KHR" contact="Tobias Hector @tobias" type="device" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_create_renderpass2&quot;" name="VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME"/>
@@ -13232,13 +18391,15 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkSubpassEndInfoKHR"/>
</require>
</extension>
- <extension name="VK_IMG_extension_111" number="111" author="IMG" contact="Michael Worcester @michaelworcester" supported="disabled">
+ <extension name="VK_IMG_relaxed_line_rasterization" number="111" type="device" depends="(VK_KHR_get_physical_device_properties2,VK_VERSION_1_1)" author="IMG" contact="James Fitzpatrick @jamesfitzpatrick" supported="vulkan" specialuse="glemulation">
<require>
- <enum value="0" name="VK_IMG_EXTENSION_111_SPEC_VERSION"/>
- <enum value="&quot;VK_IMG_extension_111&quot;" name="VK_IMG_EXTENSION_111_EXTENSION_NAME"/>
+ <enum value="1" name="VK_IMG_RELAXED_LINE_RASTERIZATION_SPEC_VERSION"/>
+ <enum value="&quot;VK_IMG_relaxed_line_rasterization&quot;" name="VK_IMG_RELAXED_LINE_RASTERIZATION_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RELAXED_LINE_RASTERIZATION_FEATURES_IMG"/>
+ <type name="VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG"/>
</require>
</extension>
- <extension name="VK_KHR_shared_presentable_image" number="112" type="device" requires="VK_KHR_swapchain,VK_KHR_get_physical_device_properties2,VK_KHR_get_surface_capabilities2" author="KHR" contact="Alon Or-bach @alonorbach" supported="vulkan">
+ <extension name="VK_KHR_shared_presentable_image" number="112" type="device" depends="VK_KHR_swapchain+VK_KHR_get_surface_capabilities2+(VK_KHR_get_physical_device_properties2,VK_VERSION_1_1)" author="KHR" contact="Alon Or-bach @alonorbach" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shared_presentable_image&quot;" name="VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME"/>
@@ -13250,7 +18411,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetSwapchainStatusKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_fence_capabilities" number="113" type="instance" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Jesse Hall @critsec" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_external_fence_capabilities" number="113" type="instance" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Jesse Hall @critsec" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_fence_capabilities&quot;" name="VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME"/>
@@ -13274,7 +18435,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceExternalFencePropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_fence" number="114" type="device" requires="VK_KHR_external_fence_capabilities" author="KHR" contact="Jesse Hall @critsec" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_external_fence" number="114" type="device" depends="VK_KHR_external_fence_capabilities" author="KHR" contact="Jesse Hall @critsec" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_FENCE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_fence&quot;" name="VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME"/>
@@ -13285,7 +18446,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkExportFenceCreateInfoKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_fence_win32" number="115" type="device" requires="VK_KHR_external_fence" author="KHR" contact="Jesse Hall @critsec" platform="win32" supported="vulkan">
+ <extension name="VK_KHR_external_fence_win32" number="115" type="device" depends="VK_KHR_external_fence" author="KHR" contact="Jesse Hall @critsec" platform="win32" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_fence_win32&quot;" name="VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME"/>
@@ -13299,7 +18460,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetFenceWin32HandleKHR"/>
</require>
</extension>
- <extension name="VK_KHR_external_fence_fd" number="116" type="device" requires="VK_KHR_external_fence" author="KHR" contact="Jesse Hall @critsec" supported="vulkan">
+ <extension name="VK_KHR_external_fence_fd" number="116" type="device" depends="VK_KHR_external_fence,VK_VERSION_1_1" author="KHR" contact="Jesse Hall @critsec" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_external_fence_fd&quot;" name="VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME"/>
@@ -13311,7 +18472,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetFenceFdKHR"/>
</require>
</extension>
- <extension name="VK_KHR_performance_query" number="117" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alon Or-bach @alonorbach" specialuse="devtools" supported="vulkan">
+ <extension name="VK_KHR_performance_query" number="117" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="KHR" contact="Alon Or-bach @alonorbach" specialuse="devtools" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_performance_query&quot;" name="VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME"/>
@@ -13343,11 +18504,17 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkAcquireProfilingLockKHR"/>
<command name="vkReleaseProfilingLockKHR"/>
</require>
+ <require depends="VKSC_VERSION_1_0" api="vulkansc">
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_RESERVATION_INFO_KHR"/>
+ <type name="VkPerformanceQueryReservationInfoKHR"/>
+ </require>
</extension>
- <extension name="VK_KHR_maintenance2" number="118" type="device" author="KHR" contact="Michael Worcester @michaelworcester" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_maintenance2" number="118" type="device" author="KHR" contact="Michael Worcester @michaelworcester" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
- <enum value="1" name="VK_KHR_MAINTENANCE2_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_maintenance2&quot;" name="VK_KHR_MAINTENANCE2_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_MAINTENANCE_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_maintenance2&quot;" name="VK_KHR_MAINTENANCE_2_EXTENSION_NAME"/>
+ <enum alias="VK_KHR_MAINTENANCE_2_SPEC_VERSION" name="VK_KHR_MAINTENANCE2_SPEC_VERSION" deprecated="aliased"/>
+ <enum alias="VK_KHR_MAINTENANCE_2_EXTENSION_NAME" name="VK_KHR_MAINTENANCE2_EXTENSION_NAME" deprecated="aliased"/>
<enum extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR" alias="VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT"/>
<enum extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR" alias="VK_IMAGE_CREATE_EXTENDED_USAGE_BIT"/>
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES"/>
@@ -13375,7 +18542,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_KHR_extension_119&quot;" name="VK_KHR_EXTENSION_119_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_get_surface_capabilities2" number="120" type="instance" requires="VK_KHR_surface" author="KHR" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_KHR_get_surface_capabilities2" number="120" type="instance" depends="VK_KHR_surface" author="KHR" contact="James Jones @cubanismo" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_get_surface_capabilities2&quot;" name="VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME"/>
@@ -13389,7 +18556,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceSurfaceFormats2KHR"/>
</require>
</extension>
- <extension name="VK_KHR_variable_pointers" number="121" type="device" author="KHR" contact="Jesse Hall @critsec" requires="VK_KHR_get_physical_device_properties2,VK_KHR_storage_buffer_storage_class" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_variable_pointers" number="121" type="device" author="KHR" contact="Jesse Hall @critsec" depends="VK_KHR_get_physical_device_properties2+VK_KHR_storage_buffer_storage_class" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_VARIABLE_POINTERS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_variable_pointers&quot;" name="VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME"/>
@@ -13399,15 +18566,15 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceVariablePointersFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_get_display_properties2" number="122" type="instance" requires="VK_KHR_display" author="KHR" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_KHR_get_display_properties2" number="122" type="instance" depends="VK_KHR_display" author="KHR" contact="James Jones @cubanismo" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
- <enum value="1" name="VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_get_display_properties2&quot;" name="VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR"/>
+ <enum value="1" name="VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_get_display_properties2&quot;" name="VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR"/>
<type name="VkDisplayProperties2KHR"/>
<type name="VkDisplayPlaneProperties2KHR"/>
<type name="VkDisplayModeProperties2KHR"/>
@@ -13419,7 +18586,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetDisplayPlaneCapabilities2KHR"/>
</require>
</extension>
- <extension name="VK_MVK_ios_surface" number="123" type="instance" requires="VK_KHR_surface" platform="ios" supported="vulkan" author="MVK" contact="Bill Hollings @billhollings" deprecatedby="VK_EXT_metal_surface">
+ <extension name="VK_MVK_ios_surface" number="123" type="instance" depends="VK_KHR_surface" platform="ios" supported="vulkan" author="MVK" contact="Bill Hollings @billhollings" deprecatedby="VK_EXT_metal_surface">
<require>
<enum value="3" name="VK_MVK_IOS_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_MVK_ios_surface&quot;" name="VK_MVK_IOS_SURFACE_EXTENSION_NAME"/>
@@ -13429,7 +18596,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateIOSSurfaceMVK"/>
</require>
</extension>
- <extension name="VK_MVK_macos_surface" number="124" type="instance" requires="VK_KHR_surface" platform="macos" supported="vulkan" author="MVK" contact="Bill Hollings @billhollings" deprecatedby="VK_EXT_metal_surface">
+ <extension name="VK_MVK_macos_surface" number="124" type="instance" depends="VK_KHR_surface" platform="macos" supported="vulkan" author="MVK" contact="Bill Hollings @billhollings" deprecatedby="VK_EXT_metal_surface">
<require>
<enum value="3" name="VK_MVK_MACOS_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_MVK_macos_surface&quot;" name="VK_MVK_MACOS_SURFACE_EXTENSION_NAME"/>
@@ -13445,21 +18612,21 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_MVK_moltenvk&quot;" name="VK_MVK_MOLTENVK_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_external_memory_dma_buf" number="126" type="device" requires="VK_KHR_external_memory_fd" author="EXT" contact="Chad Versace @chadversary" supported="vulkan">
+ <extension name="VK_EXT_external_memory_dma_buf" number="126" type="device" depends="VK_KHR_external_memory_fd" author="EXT" contact="Lina Versace @versalinyaa" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_external_memory_dma_buf&quot;" name="VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME"/>
<enum bitpos="9" extends="VkExternalMemoryHandleTypeFlagBits" name="VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT"/>
</require>
</extension>
- <extension name="VK_EXT_queue_family_foreign" number="127" type="device" author="EXT" requires="VK_KHR_external_memory" contact="Chad Versace @chadversary" supported="vulkan">
+ <extension name="VK_EXT_queue_family_foreign" number="127" type="device" author="EXT" depends="VK_KHR_external_memory,VK_VERSION_1_1" contact="Lina Versace @versalinyaa" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_queue_family_foreign&quot;" name="VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME"/>
<enum name="VK_QUEUE_FAMILY_FOREIGN_EXT"/>
</require>
</extension>
- <extension name="VK_KHR_dedicated_allocation" number="128" type="device" author="KHR" requires="VK_KHR_get_memory_requirements2" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_dedicated_allocation" number="128" type="device" author="KHR" depends="VK_KHR_get_memory_requirements2" contact="James Jones @cubanismo" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="3" name="VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_dedicated_allocation&quot;" name="VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME"/>
@@ -13469,7 +18636,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkMemoryDedicatedAllocateInfoKHR"/>
</require>
</extension>
- <extension name="VK_EXT_debug_utils" number="129" type="instance" author="EXT" contact="Mark Young @marky-lunarg" specialuse="debugging" supported="vulkan">
+ <extension name="VK_EXT_debug_utils" number="129" type="instance" author="EXT" contact="Mark Young @marky-lunarg" specialuse="debugging" supported="vulkan,vulkansc">
<require>
<enum value="2" name="VK_EXT_DEBUG_UTILS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_debug_utils&quot;" name="VK_EXT_DEBUG_UTILS_EXTENSION_NAME"/>
@@ -13505,9 +18672,9 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkSubmitDebugUtilsMessageEXT"/>
</require>
</extension>
- <extension name="VK_ANDROID_external_memory_android_hardware_buffer" number="130" type="device" author="ANDROID" requires="VK_KHR_sampler_ycbcr_conversion,VK_KHR_external_memory,VK_EXT_queue_family_foreign,VK_KHR_dedicated_allocation" platform="android" contact="Jesse Hall @critsec" supported="vulkan">
+ <extension name="VK_ANDROID_external_memory_android_hardware_buffer" number="130" type="device" author="ANDROID" depends="VK_KHR_sampler_ycbcr_conversion+VK_KHR_external_memory+VK_EXT_queue_family_foreign+VK_KHR_dedicated_allocation" platform="android" contact="Jesse Hall @critsec" supported="vulkan">
<require>
- <enum value="3" name="VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION"/>
+ <enum value="5" name="VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION"/>
<enum value="&quot;VK_ANDROID_external_memory_android_hardware_buffer&quot;" name="VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME"/>
<enum bitpos="10" extends="VkExternalMemoryHandleTypeFlagBits" name="VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID"/>
@@ -13526,8 +18693,12 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetMemoryAndroidHardwareBufferANDROID"/>
<type name="AHardwareBuffer"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <type name="VkAndroidHardwareBufferFormatProperties2ANDROID"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_2_ANDROID"/>
+ </require>
</extension>
- <extension name="VK_EXT_sampler_filter_minmax" number="131" type="device" author="NV" requires="VK_KHR_get_physical_device_properties2" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_EXT_sampler_filter_minmax" number="131" type="device" author="NV" depends="VK_KHR_get_physical_device_properties2" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2">
<require>
<enum value="2" name="VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_sampler_filter_minmax&quot;" name="VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME"/>
@@ -13542,7 +18713,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT"/>
</require>
</extension>
- <extension name="VK_KHR_storage_buffer_storage_class" number="132" type="device" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_storage_buffer_storage_class" number="132" type="device" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_storage_buffer_storage_class&quot;" name="VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME"/>
@@ -13560,16 +18731,50 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_134&quot;" name="VK_AMD_EXTENSION_134_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_AMD_extension_135" number="135" author="AMD" contact="Mais Alnasser @malnasse" supported="disabled">
- <require>
- <enum value="0" name="VK_AMD_EXTENSION_135_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_135&quot;" name="VK_AMD_EXTENSION_135_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_AMD_extension_136" number="136" author="AMD" contact="Mais Alnasser @malnasse" supported="disabled">
- <require>
- <enum value="0" name="VK_AMD_EXTENSION_136_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_136&quot;" name="VK_AMD_EXTENSION_136_EXTENSION_NAME"/>
+ <extension name="VK_AMDX_shader_enqueue" number="135" author="AMD" depends="VK_KHR_get_physical_device_properties2+VK_KHR_synchronization2+VK_KHR_pipeline_library+VK_KHR_spirv_1_4" type="device" contact="Tobias Hector @tobski" provisional="true" platform="provisional" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_AMDX_SHADER_ENQUEUE_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMDX_shader_enqueue&quot;" name="VK_AMDX_SHADER_ENQUEUE_EXTENSION_NAME"/>
+ <enum name="VK_SHADER_INDEX_UNUSED_AMDX"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_FEATURES_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_PROPERTIES_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXECUTION_GRAPH_PIPELINE_SCRATCH_SIZE_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXECUTION_GRAPH_PIPELINE_CREATE_INFO_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NODE_CREATE_INFO_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum bitpos="25" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_EXECUTION_GRAPH_SCRATCH_BIT_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="0" extends="VkPipelineBindPoint" name="VK_PIPELINE_BIND_POINT_EXECUTION_GRAPH_AMDX" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <type name="VkPhysicalDeviceShaderEnqueueFeaturesAMDX"/>
+ <type name="VkPhysicalDeviceShaderEnqueuePropertiesAMDX"/>
+ <type name="VkExecutionGraphPipelineScratchSizeAMDX"/>
+ <type name="VkExecutionGraphPipelineCreateInfoAMDX"/>
+ <type name="VkDispatchGraphInfoAMDX"/>
+ <type name="VkDispatchGraphCountInfoAMDX"/>
+ <type name="VkPipelineShaderStageNodeCreateInfoAMDX"/>
+ <type name="VkDeviceOrHostAddressConstAMDX"/>
+ <command name="vkCreateExecutionGraphPipelinesAMDX"/>
+ <command name="vkGetExecutionGraphPipelineScratchSizeAMDX"/>
+ <command name="vkGetExecutionGraphPipelineNodeIndexAMDX"/>
+ <command name="vkCmdInitializeGraphScratchMemoryAMDX"/>
+ <command name="vkCmdDispatchGraphAMDX"/>
+ <command name="vkCmdDispatchGraphIndirectAMDX"/>
+ <command name="vkCmdDispatchGraphIndirectCountAMDX"/>
+ </require>
+ <require depends="VK_KHR_maintenance5">
+ <enum bitpos="25" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_EXECUTION_GRAPH_SCRATCH_BIT_AMDX"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_136" number="136" type="device" author="KHR" contact="Tobias Hector @tobski" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_136_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_136&quot;" name="VK_KHR_EXTENSION_136_EXTENSION_NAME"/>
+ <enum bitpos="28" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_RESERVED_28_BIT_KHR"/>
+ <enum bitpos="29" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_RESERVED_29_BIT_KHR"/>
+ <enum bitpos="30" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_RESERVED_30_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_maintenance5">
+ <enum bitpos="28" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_RESERVED_28_BIT_KHR"/>
+ <enum bitpos="29" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_RESERVED_29_BIT_KHR"/>
+ <enum bitpos="30" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_RESERVED_30_BIT_KHR"/>
</require>
</extension>
<extension name="VK_AMD_mixed_attachment_samples" number="137" type="device" author="AMD" contact="Matthaeus G. Chajdas @anteru" supported="vulkan">
@@ -13584,15 +18789,15 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_shader_fragment_mask&quot;" name="VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_inline_uniform_block" number="139" type="device" author="EXT" requires="VK_KHR_get_physical_device_properties2,VK_KHR_maintenance1" contact="Daniel Rakos @aqnuep" supported="vulkan">
+ <extension name="VK_EXT_inline_uniform_block" number="139" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2+VK_KHR_maintenance1" contact="Daniel Rakos @aqnuep" supported="vulkan" promotedto="VK_VERSION_1_3">
<require>
- <enum value="1" name="VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_inline_uniform_block&quot;" name="VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME"/>
- <enum offset="0" extends="VkDescriptorType" name="VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT"/>
+ <enum value="1" name="VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_inline_uniform_block&quot;" name="VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME"/>
+ <enum extends="VkDescriptorType" name="VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT" alias="VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT" alias="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO"/>
<type name="VkPhysicalDeviceInlineUniformBlockFeaturesEXT"/>
<type name="VkPhysicalDeviceInlineUniformBlockPropertiesEXT"/>
<type name="VkWriteDescriptorSetInlineUniformBlockEXT"/>
@@ -13605,7 +18810,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_140&quot;" name="VK_AMD_EXTENSION_140_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_shader_stencil_export" number="141" type="device" author="EXT" contact="Dominik Witczak @dominikwitczakamd" supported="vulkan">
+ <extension name="VK_EXT_shader_stencil_export" number="141" type="device" author="EXT" contact="Dominik Witczak @dominikwitczakamd" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_shader_stencil_export&quot;" name="VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME"/>
@@ -13623,7 +18828,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_143&quot;" name="VK_AMD_EXTENSION_143_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_sample_locations" number="144" type="device" author="AMD" contact="Daniel Rakos @drakos-amd" supported="vulkan" requires="VK_KHR_get_physical_device_properties2">
+ <extension name="VK_EXT_sample_locations" number="144" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="AMD" contact="Daniel Rakos @drakos-amd" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_sample_locations&quot;" name="VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME"/>
@@ -13646,15 +18851,19 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceMultisamplePropertiesEXT"/>
</require>
</extension>
- <extension name="VK_KHR_relaxed_block_layout" number="145" type="device" author="KHR" contact="John Kessenich @johnkslang" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_relaxed_block_layout" number="145" type="device" author="KHR" contact="John Kessenich @johnkslang" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_relaxed_block_layout&quot;" name="VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="RESERVED_DO_NOT_USE_146" number="146" supported="disabled" comment="Used for functionality subsumed into Vulkan 1.1 and not published as an extension">
+ <extension name="VK_RESERVED_do_not_use_146" number="146" supported="disabled" comment="Used for functionality subsumed into Vulkan 1.1 and not published as an extension">
+ <require>
+ <enum value="1" name="VK_RESERVED_DO_NOT_USE_146_SPEC_VERSION"/>
+ <enum value="&quot;VK_RESERVED_do_not_use_146&quot;" name="VK_RESERVED_DO_NOT_USE_146_EXTENSION_NAME"/>
+ </require>
</extension>
- <extension name="VK_KHR_get_memory_requirements2" number="147" type="device" author="KHR" contact="Jason Ekstrand @jekstrand" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_get_memory_requirements2" number="147" type="device" author="KHR" contact="Faith Ekstrand @gfxstrand" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_get_memory_requirements2&quot;" name="VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME"/>
@@ -13673,7 +18882,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetImageSparseMemoryRequirements2KHR"/>
</require>
</extension>
- <extension name="VK_KHR_image_format_list" number="148" type="device" author="KHR" contact="Jason Ekstrand @jekstrand" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_image_format_list" number="148" type="device" author="KHR" contact="Faith Ekstrand @gfxstrand" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_image_format_list&quot;" name="VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME"/>
@@ -13681,7 +18890,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkImageFormatListCreateInfoKHR"/>
</require>
</extension>
- <extension name="VK_EXT_blend_operation_advanced" number="149" type="device" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_EXT_blend_operation_advanced" number="149" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan,vulkansc">
<require>
<enum value="2" name="VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_blend_operation_advanced&quot;" name="VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME"/>
@@ -13750,9 +18959,9 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPipelineCoverageToColorStateCreateInfoNV"/>
</require>
</extension>
- <extension name="VK_KHR_acceleration_structure" number="151" type="device" requiresCore="1.1" requires="VK_EXT_descriptor_indexing,VK_KHR_buffer_device_address,VK_KHR_deferred_host_operations" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" sortorder="1">
+ <extension name="VK_KHR_acceleration_structure" number="151" type="device" depends="VK_VERSION_1_1+VK_EXT_descriptor_indexing+VK_KHR_buffer_device_address+VK_KHR_deferred_host_operations" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" sortorder="1" ratified="vulkan">
<require>
- <enum value="12" name="VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION"/>
+ <enum value="13" name="VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_acceleration_structure&quot;" name="VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME"/>
<enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR"/>
@@ -13776,7 +18985,6 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR"/>
<enum offset="1" extends="VkQueryType" name="VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR"/>
<enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR"/>
- <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT"/>
<enum offset="0" extends="VkIndexType" extnumber="166" name="VK_INDEX_TYPE_NONE_KHR"/>
<enum bitpos="29" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR"/>
<enum bitpos="19" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR"/>
@@ -13835,8 +19043,14 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetDeviceAccelerationStructureCompatibilityKHR"/>
<command name="vkGetAccelerationStructureBuildSizesKHR"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="29" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR"/>
+ </require>
+ <require depends="VK_EXT_debug_report">
+ <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT"/>
+ </require>
</extension>
- <extension name="VK_KHR_ray_tracing_pipeline" number="348" type="device" requiresCore="1.1" requires="VK_KHR_spirv_1_4,VK_KHR_acceleration_structure" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" sortorder="1">
+ <extension name="VK_KHR_ray_tracing_pipeline" number="348" type="device" depends="VK_KHR_spirv_1_4+VK_KHR_acceleration_structure" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" sortorder="1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_ray_tracing_pipeline&quot;" name="VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME"/>
@@ -13881,7 +19095,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetRayTracingPipelineStackSizeKHR"/>
</require>
</extension>
- <extension name="VK_KHR_ray_query" number="349" type="device" requiresCore="1.1" requires="VK_KHR_spirv_1_4,VK_KHR_acceleration_structure" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" sortorder="1">
+ <extension name="VK_KHR_ray_query" number="349" type="device" depends="VK_KHR_spirv_1_4+VK_KHR_acceleration_structure" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" sortorder="1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_RAY_QUERY_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_ray_query&quot;" name="VK_KHR_RAY_QUERY_EXTENSION_NAME"/>
@@ -13912,7 +19126,7 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="0" extends="VkPolygonMode" name="VK_POLYGON_MODE_FILL_RECTANGLE_NV"/>
</require>
</extension>
- <extension name="VK_NV_shader_sm_builtins" number="155" type="device" requiresCore="1.1" author="NV" contact="Daniel Koch @dgkoch" supported="vulkan">
+ <extension name="VK_NV_shader_sm_builtins" number="155" type="device" depends="VK_VERSION_1_1" author="NV" contact="Daniel Koch @dgkoch" supported="vulkan">
<require>
<enum value="1" name="VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION"/>
<enum value="&quot;VK_NV_shader_sm_builtins&quot;" name="VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME"/>
@@ -13922,13 +19136,13 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceShaderSMBuiltinsFeaturesNV"/>
</require>
</extension>
- <extension name="VK_EXT_post_depth_coverage" number="156" type="device" author="NV" contact="Daniel Koch @dgkoch" supported="vulkan">
+ <extension name="VK_EXT_post_depth_coverage" number="156" type="device" author="NV" contact="Daniel Koch @dgkoch" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_post_depth_coverage&quot;" name="VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_sampler_ycbcr_conversion" number="157" type="device" requires="VK_KHR_maintenance1,VK_KHR_bind_memory2,VK_KHR_get_memory_requirements2,VK_KHR_get_physical_device_properties2" author="KHR" contact="Andrew Garrard @fluppeteer" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_sampler_ycbcr_conversion" number="157" type="device" depends="VK_KHR_maintenance1+VK_KHR_bind_memory2+VK_KHR_get_memory_requirements2+VK_KHR_get_physical_device_properties2" author="KHR" contact="Andrew Garrard @fluppeteer" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="14" name="VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_sampler_ycbcr_conversion&quot;" name="VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME"/>
@@ -13938,7 +19152,6 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR" alias="VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO"/>
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES"/>
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR" alias="VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES"/>
- <enum extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT"/>
<enum extends="VkObjectType" name="VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR" alias="VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION"/>
<enum extends="VkFormat" name="VK_FORMAT_G8B8G8R8_422_UNORM_KHR" alias="VK_FORMAT_G8B8G8R8_422_UNORM"/>
<enum extends="VkFormat" name="VK_FORMAT_B8G8R8G8_422_UNORM_KHR" alias="VK_FORMAT_B8G8R8G8_422_UNORM"/>
@@ -14007,11 +19220,12 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkChromaLocation" name="VK_CHROMA_LOCATION_COSITED_EVEN_KHR" alias="VK_CHROMA_LOCATION_COSITED_EVEN"/>
<enum extends="VkChromaLocation" name="VK_CHROMA_LOCATION_MIDPOINT_KHR" alias="VK_CHROMA_LOCATION_MIDPOINT"/>
</require>
- <require extension="VK_EXT_debug_report">
+ <require depends="VK_EXT_debug_report">
<enum extends="VkDebugReportObjectTypeEXT" offset="0" name="VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT"/>
+ <enum extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT" alias="VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT"/>
</require>
</extension>
- <extension name="VK_KHR_bind_memory2" number="158" type="device" author="KHR" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_bind_memory2" number="158" type="device" author="KHR" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_BIND_MEMORY_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_bind_memory2&quot;" name="VK_KHR_BIND_MEMORY_2_EXTENSION_NAME"/>
@@ -14024,35 +19238,34 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkBindImageMemoryInfoKHR"/>
</require>
</extension>
- <extension name="VK_EXT_image_drm_format_modifier" number="159" type="device" requires="VK_KHR_bind_memory2,VK_KHR_get_physical_device_properties2,VK_KHR_image_format_list,VK_KHR_sampler_ycbcr_conversion" author="EXT" contact="Chad Versace @chadversary" supported="vulkan">
+ <extension name="VK_EXT_image_drm_format_modifier" number="159" type="device" depends="((VK_KHR_bind_memory2+VK_KHR_get_physical_device_properties2+VK_KHR_sampler_ycbcr_conversion),VK_VERSION_1_1)+(VK_KHR_image_format_list,VK_VERSION_1_2)" author="EXT" contact="Lina Versace @versalinyaa" supported="vulkan,vulkansc">
<require>
- <enum value="1" name="VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION"/>
+ <enum value="2" name="VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_image_drm_format_modifier&quot;" name="VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME"/>
-
- <enum offset="0" dir="-" extends="VkResult" name="VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT"/>
-
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT"/>
-
- <enum offset="0" extends="VkImageTiling" name="VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT"/>
-
- <enum bitpos="7" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT"/>
- <enum bitpos="8" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT"/>
- <enum bitpos="9" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT"/>
- <enum bitpos="10" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT"/>
-
+ <enum offset="0" dir="-" extends="VkResult" name="VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT"/>
+ <enum offset="0" extends="VkImageTiling" name="VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT"/>
+ <enum bitpos="7" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT"/>
+ <enum bitpos="8" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT"/>
+ <enum bitpos="9" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT"/>
+ <enum bitpos="10" extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT"/>
<type name="VkDrmFormatModifierPropertiesListEXT"/>
<type name="VkDrmFormatModifierPropertiesEXT"/>
<type name="VkPhysicalDeviceImageDrmFormatModifierInfoEXT"/>
<type name="VkImageDrmFormatModifierListCreateInfoEXT"/>
<type name="VkImageDrmFormatModifierExplicitCreateInfoEXT"/>
<type name="VkImageDrmFormatModifierPropertiesEXT"/>
-
<command name="vkGetImageDrmFormatModifierPropertiesEXT"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <type name="VkDrmFormatModifierPropertiesList2EXT"/>
+ <type name="VkDrmFormatModifierProperties2EXT"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT"/>
+ </require>
</extension>
<extension name="VK_EXT_extension_160" number="160" author="EXT" contact="Mark Young @marky-lunarg" supported="disabled">
<require>
@@ -14078,7 +19291,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetValidationCacheDataEXT"/>
</require>
</extension>
- <extension name="VK_EXT_descriptor_indexing" number="162" type="device" requires="VK_KHR_get_physical_device_properties2,VK_KHR_maintenance3" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_EXT_descriptor_indexing" number="162" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_maintenance3" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2">
<require>
<enum value="2" name="VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_descriptor_indexing&quot;" name="VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME"/>
@@ -14109,7 +19322,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_EXT_shader_viewport_index_layer&quot;" name="VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_portability_subset" number="164" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Bill Hollings @billhollings" platform="provisional" supported="vulkan" provisional="true">
+ <extension name="VK_KHR_portability_subset" number="164" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Bill Hollings @billhollings" platform="provisional" supported="vulkan" provisional="true" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_PORTABILITY_SUBSET_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_portability_subset&quot;" name="VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME"/>
@@ -14119,7 +19332,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDevicePortabilitySubsetPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_NV_shading_rate_image" number="165" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
+ <extension name="VK_NV_shading_rate_image" number="165" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
<require>
<enum value="3" name="VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION"/>
<enum value="&quot;VK_NV_shading_rate_image&quot;" name="VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME"/>
@@ -14147,7 +19360,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetCoarseSampleOrderNV"/>
</require>
</extension>
- <extension name="VK_NV_ray_tracing" number="166" type="device" requires="VK_KHR_get_physical_device_properties2,VK_KHR_get_memory_requirements2" author="NV" contact="Eric Werness @ewerness" supported="vulkan">
+ <extension name="VK_NV_ray_tracing" number="166" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_get_memory_requirements2" author="NV" contact="Eric Werness @ewerness-nv" supported="vulkan">
<require>
<enum value="3" name="VK_NV_RAY_TRACING_SPEC_VERSION"/>
<enum value="&quot;VK_NV_ray_tracing&quot;" name="VK_NV_RAY_TRACING_EXTENSION_NAME"/>
@@ -14179,7 +19392,6 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV"/>
<enum bitpos="5" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV"/>
<enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV"/>
- <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT"/>
<enum extends="VkIndexType" name="VK_INDEX_TYPE_NONE_NV" alias="VK_INDEX_TYPE_NONE_KHR"/>
<type name="VkRayTracingShaderGroupCreateInfoNV"/>
<type name="VkRayTracingShaderGroupTypeNV"/>
@@ -14224,7 +19436,6 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkWriteDescriptorSetAccelerationStructureNV"/>
<type name="VkAccelerationStructureMemoryRequirementsInfoNV"/>
<type name="VkPhysicalDeviceRayTracingPropertiesNV"/>
- <type name="VkMemoryRequirements2KHR"/>
<type name="VkAccelerationStructureMemoryRequirementsTypeNV"/>
<type name="VkTransformMatrixNV"/>
<type name="VkAabbPositionsNV"/>
@@ -14242,8 +19453,14 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdWriteAccelerationStructuresPropertiesNV"/>
<command name="vkCompileDeferredNV"/>
</require>
+ <require depends="VK_KHR_get_memory_requirements2">
+ <type name="VkMemoryRequirements2KHR"/>
+ </require>
+ <require depends="VK_EXT_debug_report">
+ <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT"/>
+ </require>
</extension>
- <extension name="VK_NV_representative_fragment_test" number="167" type="device" author="NV" contact="Kedarnath Thangudu @kthangudu" supported="vulkan">
+ <extension name="VK_NV_representative_fragment_test" number="167" type="device" author="NV" contact="Kedarnath Thangudu @kthangudu" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
<enum value="2" name="VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION"/>
<enum value="&quot;VK_NV_representative_fragment_test&quot;" name="VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME"/>
@@ -14255,14 +19472,16 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_NV_extension_168" number="168" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_168_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_168&quot;" name="VK_EXT_EXTENSION_168_EXTENSION_NAME"/>
+ <enum value="0" name="VK_NV_EXTENSION_168_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_168&quot;" name="VK_NV_EXTENSION_168_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_maintenance3" number="169" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1">
+ <extension name="VK_KHR_maintenance3" number="169" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_1" ratified="vulkan">
<require>
- <enum value="1" name="VK_KHR_MAINTENANCE3_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_maintenance3&quot;" name="VK_KHR_MAINTENANCE3_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_MAINTENANCE_3_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_maintenance3&quot;" name="VK_KHR_MAINTENANCE_3_EXTENSION_NAME"/>
+ <enum alias="VK_KHR_MAINTENANCE_3_SPEC_VERSION" name="VK_KHR_MAINTENANCE3_SPEC_VERSION" deprecated="aliased"/>
+ <enum alias="VK_KHR_MAINTENANCE_3_EXTENSION_NAME" name="VK_KHR_MAINTENANCE3_EXTENSION_NAME" deprecated="aliased"/>
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES"/>
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR" alias="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT"/>
<type name="VkPhysicalDeviceMaintenance3PropertiesKHR"/>
@@ -14270,7 +19489,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetDescriptorSetLayoutSupportKHR"/>
</require>
</extension>
- <extension name="VK_KHR_draw_indirect_count" number="170" type="device" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_draw_indirect_count" number="170" type="device" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_draw_indirect_count&quot;" name="VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME"/>
@@ -14278,19 +19497,19 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdDrawIndexedIndirectCountKHR"/>
</require>
</extension>
- <extension name="VK_EXT_filter_cubic" number="171" type="device" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="vulkan">
+ <extension name="VK_EXT_filter_cubic" number="171" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan,vulkansc">
<require>
<enum value="3" name="VK_EXT_FILTER_CUBIC_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_filter_cubic&quot;" name="VK_EXT_FILTER_CUBIC_EXTENSION_NAME"/>
- <enum extends="VkFilter" name="VK_FILTER_CUBIC_EXT" alias="VK_FILTER_CUBIC_IMG"/>
- <enum extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT" alias="VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG"/>
+ <enum offset="0" extends="VkFilter" extnumber="16" name="VK_FILTER_CUBIC_EXT"/>
+ <enum bitpos="13" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT"/>
<type name="VkPhysicalDeviceImageViewImageFormatInfoEXT"/>
<type name="VkFilterCubicImageViewImageFormatPropertiesEXT"/>
</require>
</extension>
- <extension name="VK_QCOM_render_pass_shader_resolve" number="172" type="device" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="vulkan">
+ <extension name="VK_QCOM_render_pass_shader_resolve" number="172" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
<require>
<enum value="4" name="VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION"/>
<enum value="&quot;VK_QCOM_render_pass_shader_resolve&quot;" name="VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME"/>
@@ -14298,32 +19517,29 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="3" extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_173" number="173" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_173" number="173" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_173_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_173&quot;" name="VK_QCOM_extension_173_EXTENSION_NAME"/>
- <enum bitpos="18" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_RESERVED_18_BIT_QCOM"/>
- <enum bitpos="16" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_16_BIT_QCOM"/>
- <enum bitpos="17" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_17_BIT_QCOM"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_173_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_173&quot;" name="VK_QCOM_EXTENSION_173_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_174" number="174" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_174" number="174" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_174_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_174&quot;" name="VK_QCOM_extension_174_EXTENSION_NAME"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_174_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_174&quot;" name="VK_QCOM_EXTENSION_174_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_global_priority" number="175" type="device" author="EXT" contact="Andres Rodriguez @lostgoat" supported="vulkan">
+ <extension name="VK_EXT_global_priority" number="175" type="device" author="EXT" contact="Andres Rodriguez @lostgoat" supported="vulkan,vulkansc" promotedto="VK_KHR_global_priority">
<require>
<enum value="2" name="VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_global_priority&quot;" name="VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT"/>
- <enum offset="1" dir="-" extends="VkResult" name="VK_ERROR_NOT_PERMITTED_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR"/>
+ <enum extends="VkResult" name="VK_ERROR_NOT_PERMITTED_EXT" alias="VK_ERROR_NOT_PERMITTED_KHR"/>
<type name="VkDeviceQueueGlobalPriorityCreateInfoEXT"/>
<type name="VkQueueGlobalPriorityEXT"/>
</require>
</extension>
- <extension name="VK_KHR_shader_subgroup_extended_types" number="176" type="device" requiresCore="1.1" author="KHR" contact="Neil Henning @sheredom" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_shader_subgroup_extended_types" number="176" type="device" depends="VK_VERSION_1_1" author="KHR" contact="Neil Henning @sheredom" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_subgroup_extended_types&quot;" name="VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME"/>
@@ -14333,11 +19549,11 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_EXT_extension_177" number="177" author="EXT" contact="Neil Henning @sheredom" supported="disabled">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_177_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_177&quot;" name="VK_KHR_EXTENSION_177_EXTENSION_NAME"/>
+ <enum value="0" name="VK_EXT_EXTENSION_177_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_177&quot;" name="VK_EXT_EXTENSION_177_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_8bit_storage" number="178" type="device" requires="VK_KHR_get_physical_device_properties2,VK_KHR_storage_buffer_storage_class" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_8bit_storage" number="178" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_storage_buffer_storage_class" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_8BIT_STORAGE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_8bit_storage&quot;" name="VK_KHR_8BIT_STORAGE_EXTENSION_NAME"/>
@@ -14345,7 +19561,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDevice8BitStorageFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_EXT_external_memory_host" number="179" type="device" author="EXT" requires="VK_KHR_external_memory" contact="Daniel Rakos @drakos-amd" supported="vulkan">
+ <extension name="VK_EXT_external_memory_host" number="179" type="device" author="EXT" depends="VK_KHR_external_memory,VK_VERSION_1_1" contact="Daniel Rakos @drakos-amd" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_external_memory_host&quot;" name="VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME"/>
@@ -14367,7 +19583,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdWriteBufferMarkerAMD"/>
</require>
</extension>
- <extension name="VK_KHR_shader_atomic_int64" number="181" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Aaron Hagan @ahagan" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_shader_atomic_int64" number="181" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Aaron Hagan @ahagan" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_atomic_int64&quot;" name="VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME"/>
@@ -14375,7 +19591,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceShaderAtomicInt64FeaturesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_shader_clock" number="182" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Aaron Hagan @ahagan" supported="vulkan">
+ <extension name="VK_KHR_shader_clock" number="182" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Aaron Hagan @ahagan" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_SHADER_CLOCK_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_clock&quot;" name="VK_KHR_SHADER_CLOCK_EXTENSION_NAME"/>
@@ -14385,8 +19601,8 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_AMD_extension_183" number="183" author="AMD" contact="Daniel Rakos @drakos-amd" supported="disabled">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_183_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_183&quot;" name="VK_KHR_EXTENSION_183_EXTENSION_NAME"/>
+ <enum value="0" name="VK_AMD_EXTENSION_183_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_183&quot;" name="VK_AMD_EXTENSION_183_EXTENSION_NAME"/>
</require>
</extension>
<extension name="VK_AMD_pipeline_compiler_control" number="184" type="device" author="AMD" contact="Matthaeus G. Chajdas @anteru" supported="vulkan">
@@ -14399,18 +19615,22 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPipelineCompilerControlCreateInfoAMD"/>
</require>
</extension>
- <extension name="VK_EXT_calibrated_timestamps" number="185" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Daniel Rakos @drakos-amd" supported="vulkan">
+ <extension name="VK_EXT_calibrated_timestamps" number="185" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Daniel Rakos @drakos-amd" promotedto="VK_KHR_calibrated_timestamps" supported="vulkan,vulkansc">
<require>
<enum value="2" name="VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_calibrated_timestamps&quot;" name="VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT" alias="VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR"/>
+ <enum extends="VkTimeDomainKHR" name="VK_TIME_DOMAIN_DEVICE_EXT" alias="VK_TIME_DOMAIN_DEVICE_KHR"/>
+ <enum extends="VkTimeDomainKHR" name="VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT" alias="VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR"/>
+ <enum extends="VkTimeDomainKHR" name="VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT" alias="VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_KHR"/>
+ <enum extends="VkTimeDomainKHR" name="VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT" alias="VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_KHR"/>
<type name="VkTimeDomainEXT"/>
<type name="VkCalibratedTimestampInfoEXT"/>
<command name="vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"/>
<command name="vkGetCalibratedTimestampsEXT"/>
</require>
</extension>
- <extension name="VK_AMD_shader_core_properties" number="186" type="device" author="AMD" requires="VK_KHR_get_physical_device_properties2" contact="Martin Dinkov @mdinkov" supported="vulkan">
+ <extension name="VK_AMD_shader_core_properties" number="186" type="device" author="AMD" depends="VK_KHR_get_physical_device_properties2" contact="Martin Dinkov @mdinkov" supported="vulkan">
<require>
<enum value="2" name="VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION"/>
<enum value="&quot;VK_AMD_shader_core_properties&quot;" name="VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME"/>
@@ -14420,38 +19640,44 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_AMD_extension_187" number="187" author="AMD" contact="Daniel Rakos @drakos-amd" supported="disabled">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_187_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_187&quot;" name="VK_KHR_EXTENSION_187_EXTENSION_NAME"/>
+ <enum value="0" name="VK_AMD_EXTENSION_187_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_187&quot;" name="VK_AMD_EXTENSION_187_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_video_decode_h265" number="188" type="device" requires="VK_KHR_video_decode_queue" author="KHR" contact="peter.fang@amd.com" provisional="true" platform="provisional" supported="vulkan">
+ <extension name="VK_KHR_video_decode_h265" number="188" type="device" depends="VK_KHR_video_decode_queue" author="KHR" contact="peter.fang@amd.com" supported="vulkan" ratified="vulkan">
<require>
- <enum value="1" name="VK_EXT_VIDEO_DECODE_H265_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_video_decode_h265&quot;" name="VK_EXT_VIDEO_DECODE_H265_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_CREATE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="1" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_EXT" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum value="8" name="VK_KHR_VIDEO_DECODE_H265_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_video_decode_h265&quot;" name="VK_KHR_VIDEO_DECODE_H265_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR"/>
+ <enum bitpos="1" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR"/>
- <type name="VkVideoDecodeH265CreateFlagsEXT"/>
- <type name="VkVideoDecodeH265ProfileEXT"/>
- <type name="VkVideoDecodeH265CapabilitiesEXT"/>
- <type name="VkVideoDecodeH265SessionCreateInfoEXT"/>
+ <type name="VkVideoDecodeH265ProfileInfoKHR"/>
+ <type name="VkVideoDecodeH265CapabilitiesKHR"/>
- <type name="VkVideoDecodeH265SessionParametersCreateInfoEXT"/>
- <type name="VkVideoDecodeH265SessionParametersAddInfoEXT"/>
- <type name="VkVideoDecodeH265PictureInfoEXT"/>
- <type name="VkVideoDecodeH265DpbSlotInfoEXT"/>
+ <type name="VkVideoDecodeH265SessionParametersCreateInfoKHR"/>
+ <type name="VkVideoDecodeH265SessionParametersAddInfoKHR"/>
+ <type name="VkVideoDecodeH265PictureInfoKHR"/>
+ <type name="VkVideoDecodeH265DpbSlotInfoKHR"/>
</require>
</extension>
- <extension name="VK_AMD_extension_189" number="189" author="AMD" contact="Daniel Rakos @drakos-amd" supported="disabled">
+ <extension name="VK_KHR_global_priority" number="189" type="device" author="KHR" contact="Tobias Hector @tobski" depends="VK_KHR_get_physical_device_properties2" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_189_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_189&quot;" name="VK_KHR_EXTENSION_189_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_GLOBAL_PRIORITY_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_global_priority&quot;" name="VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="175" name="VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR"/>
+ <enum offset="0" extends="VkStructureType" extnumber="389" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" extnumber="389" name="VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR"/>
+ <enum extends="VkResult" extnumber="175" offset="1" dir="-" name="VK_ERROR_NOT_PERMITTED_KHR"/>
+ <enum name="VK_MAX_GLOBAL_PRIORITY_SIZE_KHR"/>
+ <type name="VkDeviceQueueGlobalPriorityCreateInfoKHR"/>
+ <type name="VkQueueGlobalPriorityKHR"/>
+ <type name="VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR"/>
+ <type name="VkQueueFamilyGlobalPriorityPropertiesKHR"/>
</require>
</extension>
<extension name="VK_AMD_memory_overallocation_behavior" number="190" type="device" author="AMD" contact="Martin Dinkov @mdinkov" supported="vulkan">
@@ -14463,20 +19689,20 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkDeviceMemoryOverallocationCreateInfoAMD"/>
</require>
</extension>
- <extension name="VK_EXT_vertex_attribute_divisor" number="191" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Vikram Kushwaha @vkushwaha" supported="vulkan">
+ <extension name="VK_EXT_vertex_attribute_divisor" number="191" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="NV" contact="Vikram Kushwaha @vkushwaha" supported="vulkan,vulkansc" promotedto="VK_KHR_vertex_attribute_divisor">
<require>
- <enum value="3" name="VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_vertex_attribute_divisor&quot;" name="VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT"/>
+ <enum value="3" name="VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_vertex_attribute_divisor&quot;" name="VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR"/>
<type name="VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT"/>
<type name="VkVertexInputBindingDivisorDescriptionEXT"/>
<type name="VkPipelineVertexInputDivisorStateCreateInfoEXT"/>
<type name="VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_GGP_frame_token" number="192" type="device" requires="VK_KHR_swapchain,VK_GGP_stream_descriptor_surface" platform="ggp" author="GGP" contact="Jean-Francois Roy @jfroy" supported="vulkan">
+ <extension name="VK_GGP_frame_token" number="192" type="device" depends="VK_KHR_swapchain+VK_GGP_stream_descriptor_surface" platform="ggp" author="GGP" contact="Jean-Francois Roy @jfroy" supported="vulkan">
<require>
<enum value="1" name="VK_GGP_FRAME_TOKEN_SPEC_VERSION"/>
<enum value="&quot;VK_GGP_frame_token&quot;" name="VK_GGP_FRAME_TOKEN_EXTENSION_NAME"/>
@@ -14484,11 +19710,11 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPresentFrameTokenGGP"/>
</require>
</extension>
- <extension name="VK_EXT_pipeline_creation_feedback" number="193" type="device" author="GOOGLE" contact="Jean-Francois Roy @jfroy" specialuse="devtools" supported="vulkan">
+ <extension name="VK_EXT_pipeline_creation_feedback" number="193" type="device" author="GOOGLE" contact="Jean-Francois Roy @jfroy" specialuse="devtools" supported="vulkan" promotedto="VK_VERSION_1_3">
<require>
- <enum value="1" name="VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION"/>
+ <enum value="1" name="VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_pipeline_creation_feedback&quot;" name="VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO"/>
<type name="VkPipelineCreationFeedbackFlagBitsEXT"/>
<type name="VkPipelineCreationFeedbackFlagsEXT"/>
<type name="VkPipelineCreationFeedbackCreateInfoEXT"/>
@@ -14511,11 +19737,9 @@ typedef void <name>CAMetalLayer</name>;
<require>
<enum value="0" name="VK_GOOGLE_EXTENSION_196_SPEC_VERSION"/>
<enum value="&quot;VK_GOOGLE_extension_196&quot;" name="VK_GOOGLE_EXTENSION_196_EXTENSION_NAME"/>
- <enum bitpos="1" extends="VkPipelineCacheCreateFlagBits"
- name="VK_PIPELINE_CACHE_CREATE_RESERVED_1_BIT_EXT"/>
</require>
</extension>
- <extension name="VK_KHR_driver_properties" number="197" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Daniel Rakos @drakos-amd" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_driver_properties" number="197" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Daniel Rakos @drakos-amd" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_driver_properties&quot;" name="VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME"/>
@@ -14539,7 +19763,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceDriverPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_shader_float_controls" number="198" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_shader_float_controls" number="198" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alexander Galazin @alegal-arm" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="4" name="VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_float_controls&quot;" name="VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME"/>
@@ -14551,14 +19775,14 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkShaderFloatControlsIndependence" name="VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR" alias="VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE"/>
</require>
</extension>
- <extension name="VK_NV_shader_subgroup_partitioned" number="199" type="device" requiresCore="1.1" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_NV_shader_subgroup_partitioned" number="199" type="device" depends="VK_VERSION_1_1" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
<require>
<enum value="1" name="VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION"/>
<enum value="&quot;VK_NV_shader_subgroup_partitioned&quot;" name="VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME"/>
<enum bitpos="8" extends="VkSubgroupFeatureFlagBits" name="VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV"/>
</require>
</extension>
- <extension name="VK_KHR_depth_stencil_resolve" number="200" type="device" requires="VK_KHR_create_renderpass2" author="KHR" contact="Jan-Harald Fredriksen @janharald" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_depth_stencil_resolve" number="200" type="device" depends="VK_KHR_create_renderpass2" author="KHR" contact="Jan-Harald Fredriksen @janharald" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_depth_stencil_resolve&quot;" name="VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME"/>
@@ -14575,14 +19799,14 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkResolveModeFlagBits" name="VK_RESOLVE_MODE_MAX_BIT_KHR" alias="VK_RESOLVE_MODE_MAX_BIT"/>
</require>
</extension>
- <extension name="VK_KHR_swapchain_mutable_format" number="201" type="device" author="KHR" requires="VK_KHR_swapchain,VK_KHR_maintenance2,VK_KHR_image_format_list" contact="Daniel Rakos @drakos-arm" supported="vulkan">
+ <extension name="VK_KHR_swapchain_mutable_format" number="201" type="device" author="KHR" depends="VK_KHR_swapchain+(VK_KHR_maintenance2,VK_VERSION_1_1)+(VK_KHR_image_format_list,VK_VERSION_1_2)" contact="Daniel Rakos @drakos-amd" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_swapchain_mutable_format&quot;" name="VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME"/>
<enum bitpos="2" extends="VkSwapchainCreateFlagBitsKHR" name="VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR"/>
</require>
</extension>
- <extension name="VK_NV_compute_shader_derivatives" number="202" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
+ <extension name="VK_NV_compute_shader_derivatives" number="202" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
<require>
<enum value="1" name="VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION"/>
<enum value="&quot;VK_NV_compute_shader_derivatives&quot;" name="VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME"/>
@@ -14590,16 +19814,16 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceComputeShaderDerivativesFeaturesNV"/>
</require>
</extension>
- <extension name="VK_NV_mesh_shader" number="203" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Christoph Kubisch @pixeljetstream" supported="vulkan">
+ <extension name="VK_NV_mesh_shader" number="203" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Christoph Kubisch @pixeljetstream" supported="vulkan">
<require>
<enum value="1" name="VK_NV_MESH_SHADER_SPEC_VERSION"/>
<enum value="&quot;VK_NV_mesh_shader&quot;" name="VK_NV_MESH_SHADER_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV"/>
- <enum bitpos="6" extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_TASK_BIT_NV"/>
- <enum bitpos="7" extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_MESH_BIT_NV"/>
- <enum bitpos="19" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV"/>
- <enum bitpos="20" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV"/>
+ <enum extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_TASK_BIT_NV" alias="VK_SHADER_STAGE_TASK_BIT_EXT"/>
+ <enum extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_MESH_BIT_NV" alias="VK_SHADER_STAGE_MESH_BIT_EXT"/>
+ <enum extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV" alias="VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT"/>
+ <enum extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV" alias="VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT"/>
<command name="vkCmdDrawMeshTasksNV"/>
<command name="vkCmdDrawMeshTasksIndirectNV"/>
<command name="vkCmdDrawMeshTasksIndirectCountNV"/>
@@ -14608,15 +19832,15 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkDrawMeshTasksIndirectCommandNV"/>
</require>
</extension>
- <extension name="VK_NV_fragment_shader_barycentric" number="204" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
+ <extension name="VK_NV_fragment_shader_barycentric" number="204" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan" promotedto="VK_KHR_fragment_shader_barycentric">
<require>
<enum value="1" name="VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION"/>
<enum value="&quot;VK_NV_fragment_shader_barycentric&quot;" name="VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR"/>
<type name="VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV"/>
</require>
</extension>
- <extension name="VK_NV_shader_image_footprint" number="205" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
+ <extension name="VK_NV_shader_image_footprint" number="205" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
<require>
<enum value="2" name="VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION"/>
<enum value="&quot;VK_NV_shader_image_footprint&quot;" name="VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME"/>
@@ -14624,19 +19848,21 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceShaderImageFootprintFeaturesNV"/>
</require>
</extension>
- <extension name="VK_NV_scissor_exclusive" number="206" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
+ <extension name="VK_NV_scissor_exclusive" number="206" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
<require>
- <enum value="1" name="VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION"/>
+ <enum value="2" name="VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION"/>
<enum value="&quot;VK_NV_scissor_exclusive&quot;" name="VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV"/>
- <enum offset="1" extends="VkDynamicState" name="VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV"/>
<enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV"/>
+ <enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_ENABLE_NV"/>
+ <enum offset="1" extends="VkDynamicState" name="VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV"/>
<type name="VkPipelineViewportExclusiveScissorStateCreateInfoNV"/>
<type name="VkPhysicalDeviceExclusiveScissorFeaturesNV"/>
+ <command name="vkCmdSetExclusiveScissorEnableNV"/>
<command name="vkCmdSetExclusiveScissorNV"/>
</require>
</extension>
- <extension name="VK_NV_device_diagnostic_checkpoints" type="device" number="207" requires="VK_KHR_get_physical_device_properties2" author="NVIDIA" contact="Nuno Subtil @nsubtil" supported="vulkan">
+ <extension name="VK_NV_device_diagnostic_checkpoints" type="device" number="207" depends="VK_KHR_get_physical_device_properties2" author="NVIDIA" contact="Nuno Subtil @nsubtil" supported="vulkan">
<require>
<enum value="2" name="VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION"/>
<enum value="&quot;VK_NV_device_diagnostic_checkpoints&quot;" name="VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME"/>
@@ -14648,7 +19874,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetQueueCheckpointDataNV"/>
</require>
</extension>
- <extension name="VK_KHR_timeline_semaphore" number="208" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Jason Ekstrand @jekstrand" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_timeline_semaphore" number="208" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Faith Ekstrand @gfxstrand" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="2" name="VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_timeline_semaphore&quot;" name="VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME"/>
@@ -14681,7 +19907,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_KHR_extension_209&quot;" name="VK_KHR_EXTENSION_209_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_INTEL_shader_integer_functions2" number="210" type="device" requires="VK_KHR_get_physical_device_properties2" author="INTEL" contact="Ian Romanick @ianromanick" supported="vulkan">
+ <extension name="VK_INTEL_shader_integer_functions2" number="210" type="device" depends="VK_KHR_get_physical_device_properties2" author="INTEL" contact="Ian Romanick @ianromanick" supported="vulkan">
<require>
<enum value="1" name="VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION"/>
<enum value="&quot;VK_INTEL_shader_integer_functions2&quot;" name="VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME"/>
@@ -14694,7 +19920,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="2" name="VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION"/>
<enum value="&quot;VK_INTEL_performance_query&quot;" name="VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL"/>
- <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL" alias="VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL" comment="Backwards-compatible alias"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL" alias="VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL" deprecated="aliased"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL"/>
<enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL"/>
<enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL"/>
@@ -14728,7 +19954,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPerformanceParameterINTEL"/>
</require>
</extension>
- <extension name="VK_KHR_vulkan_memory_model" number="212" type="device" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_vulkan_memory_model" number="212" type="device" author="KHR" contact="Jeff Bolz @jeffbolznv" depends="VK_KHR_get_physical_device_properties2" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="3" name="VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_vulkan_memory_model&quot;" name="VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME"/>
@@ -14736,7 +19962,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceVulkanMemoryModelFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_EXT_pci_bus_info" number="213" type="device" author="EXT" requires="VK_KHR_get_physical_device_properties2" contact="Matthaeus G. Chajdas @anteru" supported="vulkan">
+ <extension name="VK_EXT_pci_bus_info" number="213" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Matthaeus G. Chajdas @anteru" supported="vulkan,vulkansc">
<require>
<enum value="2" name="VK_EXT_PCI_BUS_INFO_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_pci_bus_info&quot;" name="VK_EXT_PCI_BUS_INFO_EXTENSION_NAME"/>
@@ -14744,7 +19970,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDevicePCIBusInfoPropertiesEXT"/>
</require>
</extension>
- <extension name="VK_AMD_display_native_hdr" number="214" type="device" author="AMD" requires="VK_KHR_get_physical_device_properties2,VK_KHR_get_surface_capabilities2,VK_KHR_swapchain" contact="Matthaeus G. Chajdas @anteru" supported="vulkan">
+ <extension name="VK_AMD_display_native_hdr" number="214" type="device" author="AMD" depends="VK_KHR_get_physical_device_properties2+VK_KHR_get_surface_capabilities2+VK_KHR_swapchain" contact="Matthaeus G. Chajdas @anteru" supported="vulkan">
<require>
<enum value="1" name="VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION"/>
<enum value="&quot;VK_AMD_display_native_hdr&quot;" name="VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME"/>
@@ -14756,7 +19982,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkSetLocalDimmingAMD"/>
</require>
</extension>
- <extension name="VK_FUCHSIA_imagepipe_surface" number="215" type="instance" author="FUCHSIA" requires="VK_KHR_surface" platform="fuchsia" contact="Craig Stout @cdotstout" supported="vulkan">
+ <extension name="VK_FUCHSIA_imagepipe_surface" number="215" type="instance" author="FUCHSIA" depends="VK_KHR_surface" platform="fuchsia" contact="Craig Stout @cdotstout" supported="vulkan">
<require>
<enum value="1" name="VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_FUCHSIA_imagepipe_surface&quot;" name="VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME"/>
@@ -14766,21 +19992,21 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateImagePipeSurfaceFUCHSIA"/>
</require>
</extension>
- <extension name="VK_KHR_shader_terminate_invocation" number="216" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Jesse Hall @critsec" supported="vulkan">
+ <extension name="VK_KHR_shader_terminate_invocation" number="216" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Jesse Hall @critsec" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_terminate_invocation&quot;" name="VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES"/>
<type name="VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR"/>
</require>
</extension>
<extension name="VK_GOOGLE_extension_217" number="217" author="GOOGLE" contact="Jesse Hall @critsec" supported="disabled">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_217_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_217&quot;" name="VK_KHR_EXTENSION_217_EXTENSION_NAME"/>
+ <enum value="0" name="VK_GOOGLE_EXTENSION_217_SPEC_VERSION"/>
+ <enum value="&quot;VK_GOOGLE_extension_217&quot;" name="VK_GOOGLE_EXTENSION_217_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_metal_surface" number="218" type="instance" requires="VK_KHR_surface" platform="metal" supported="vulkan" author="EXT" contact="Dzmitry Malyshau @kvark">
+ <extension name="VK_EXT_metal_surface" number="218" type="instance" depends="VK_KHR_surface" platform="metal" supported="vulkan" author="EXT" contact="Dzmitry Malyshau @kvark">
<require>
<enum value="1" name="VK_EXT_METAL_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_metal_surface&quot;" name="VK_EXT_METAL_SURFACE_EXTENSION_NAME"/>
@@ -14791,9 +20017,9 @@ typedef void <name>CAMetalLayer</name>;
<type name="CAMetalLayer"/>
</require>
</extension>
- <extension name="VK_EXT_fragment_density_map" number="219" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <extension name="VK_EXT_fragment_density_map" number="219" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Matthew Netsch @mnetsch" supported="vulkan">
<require>
- <enum value="1" name="VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION"/>
+ <enum value="2" name="VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_fragment_density_map&quot;" name="VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT"/>
@@ -14811,6 +20037,9 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceFragmentDensityMapPropertiesEXT"/>
<type name="VkRenderPassFragmentDensityMapCreateInfoEXT"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="24" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_FRAGMENT_DENSITY_MAP_BIT_EXT"/>
+ </require>
</extension>
<extension name="VK_EXT_extension_220" number="220" author="EXT" contact="Dzmitry Malyshau @kvark" supported="disabled">
<require>
@@ -14825,7 +20054,7 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="0" extends="VkRenderPassCreateFlagBits" name="VK_RENDER_PASS_CREATE_RESERVED_0_BIT_KHR"/>
</require>
</extension>
- <extension name="VK_EXT_scalar_block_layout" number="222" requires="VK_KHR_get_physical_device_properties2" type="device" author="EXT" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_EXT_scalar_block_layout" number="222" depends="VK_KHR_get_physical_device_properties2" type="device" author="EXT" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_2">
<require>
<enum value="1" name="VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_scalar_block_layout&quot;" name="VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME"/>
@@ -14841,8 +20070,10 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_GOOGLE_hlsl_functionality1" number="224" type="device" author="GOOGLE" contact="Hai Nguyen @chaoticbob" supported="vulkan">
<require>
- <enum value="1" name="VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION"/>
- <enum value="&quot;VK_GOOGLE_hlsl_functionality1&quot;" name="VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME"/>
+ <enum value="1" name="VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION"/>
+ <enum value="&quot;VK_GOOGLE_hlsl_functionality1&quot;" name="VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME"/>
+ <enum alias="VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION" name="VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION" deprecated="aliased"/>
+ <enum alias="VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME" name="VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME" deprecated="aliased"/>
</require>
</extension>
<extension name="VK_GOOGLE_decorate_string" number="225" type="device" author="GOOGLE" contact="Hai Nguyen @chaoticbob" supported="vulkan">
@@ -14851,23 +20082,23 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_GOOGLE_decorate_string&quot;" name="VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_subgroup_size_control" number="226" type="device" requiresCore="1.1" author="EXT" contact="Neil Henning @sheredom" supported="vulkan">
+ <extension name="VK_EXT_subgroup_size_control" number="226" type="device" depends="VK_VERSION_1_1" author="EXT" contact="Neil Henning @sheredom" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
- <enum value="2" name="VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_subgroup_size_control&quot;" name="VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME"/>
- <type name="VkPhysicalDeviceSubgroupSizeControlFeaturesEXT"/>
- <type name="VkPhysicalDeviceSubgroupSizeControlPropertiesEXT"/>
- <type name="VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT"/>
- <enum bitpos="0" extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT"/>
- <enum bitpos="1" extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT"/>
+ <enum value="2" name="VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_subgroup_size_control&quot;" name="VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME"/>
+ <type name="VkPhysicalDeviceSubgroupSizeControlFeaturesEXT"/>
+ <type name="VkPhysicalDeviceSubgroupSizeControlPropertiesEXT"/>
+ <type name="VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES"/>
+ <enum extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT" alias="VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT"/>
+ <enum extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT" alias="VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT"/>
</require>
</extension>
- <extension name="VK_KHR_fragment_shading_rate" number="227" type="device" requires="VK_KHR_create_renderpass2,VK_KHR_get_physical_device_properties2" author="KHR" contact="Tobias Hector @tobski" supported="vulkan">
+ <extension name="VK_KHR_fragment_shading_rate" number="227" type="device" depends="(VK_KHR_create_renderpass2,VK_VERSION_1_2)+(VK_KHR_get_physical_device_properties2,VK_VERSION_1_1)" author="KHR" contact="Tobias Hector @tobski" supported="vulkan,vulkansc" ratified="vulkan,vulkansc">
<require>
- <enum value="1" name="VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION"/>
+ <enum value="2" name="VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_fragment_shading_rate&quot;" name="VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME"/>
<type name="VkFragmentShadingRateCombinerOpKHR"/>
<type name="VkFragmentShadingRateAttachmentInfoKHR"/>
@@ -14889,8 +20120,11 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="22" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
<enum bitpos="30" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="30" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
+ </require>
</extension>
- <extension name="VK_AMD_shader_core_properties2" number="228" type="device" author="AMD" contact="Matthaeus G. Chajdas @anteru" supported="vulkan" requires="VK_AMD_shader_core_properties">
+ <extension name="VK_AMD_shader_core_properties2" number="228" type="device" author="AMD" contact="Matthaeus G. Chajdas @anteru" supported="vulkan" depends="VK_AMD_shader_core_properties">
<require>
<enum value="1" name="VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION"/>
<enum value="&quot;VK_AMD_shader_core_properties2&quot;" name="VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME"/>
@@ -14906,7 +20140,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_229&quot;" name="VK_AMD_EXTENSION_229_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_AMD_device_coherent_memory" number="230" type="device" author="AMD" contact="Tobias Hector @tobski" supported="vulkan">
+ <extension name="VK_AMD_device_coherent_memory" number="230" type="device" author="AMD" contact="Tobias Hector @tobski" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
<enum value="1" name="VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION"/>
<enum value="&quot;VK_AMD_device_coherent_memory&quot;" name="VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME"/>
@@ -14928,10 +20162,19 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_232&quot;" name="VK_AMD_EXTENSION_232_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_AMD_extension_233" number="233" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
+ <extension name="VK_KHR_dynamic_rendering_local_read" number="233" type="device" depends="VK_KHR_dynamic_rendering" author="AMD" contact="Tobias Hector @tobski" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_AMD_EXTENSION_233_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_233&quot;" name="VK_AMD_EXTENSION_233_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_dynamic_rendering_local_read&quot;" name="VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR"/>
+ <command name="vkCmdSetRenderingAttachmentLocationsKHR"/>
+ <command name="vkCmdSetRenderingInputAttachmentIndicesKHR"/>
+ <type name="VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR"/>
+ <type name="VkRenderingAttachmentLocationInfoKHR"/>
+ <type name="VkRenderingInputAttachmentIndexInfoKHR"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR"/>
</require>
</extension>
<extension name="VK_AMD_extension_234" number="234" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
@@ -14940,7 +20183,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_234&quot;" name="VK_AMD_EXTENSION_234_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_shader_image_atomic_int64" number="235" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Tobias Hector @tobski" supported="vulkan">
+ <extension name="VK_EXT_shader_image_atomic_int64" number="235" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Tobias Hector @tobski" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_shader_image_atomic_int64&quot;" name="VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME"/>
@@ -14948,19 +20191,21 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT"/>
</require>
</extension>
- <extension name="VK_AMD_extension_236" number="236" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
+ <extension name="VK_KHR_shader_quad_control" number="236" type="device" depends="VK_VERSION_1_1+VK_KHR_vulkan_memory_model+VK_KHR_shader_maximal_reconvergence" author="KHR" contact="Tobias Hector @tobski" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_AMD_EXTENSION_236_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_236&quot;" name="VK_AMD_EXTENSION_236_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_SHADER_QUAD_CONTROL_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_shader_quad_control&quot;" name="VK_KHR_SHADER_QUAD_CONTROL_EXTENSION_NAME"/>
+ <type name="VkPhysicalDeviceShaderQuadControlFeaturesKHR"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR"/>
</require>
</extension>
- <extension name="VK_KHR_spirv_1_4" number="237" type="device" requiresCore="1.1" requires="VK_KHR_shader_float_controls" author="KHR" contact="Jesse Hall @critsec" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_spirv_1_4" number="237" type="device" depends="VK_VERSION_1_1+VK_KHR_shader_float_controls" author="KHR" contact="Jesse Hall @critsec" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SPIRV_1_4_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_spirv_1_4&quot;" name="VK_KHR_SPIRV_1_4_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_memory_budget" number="238" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_EXT_memory_budget" number="238" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_MEMORY_BUDGET_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_memory_budget&quot;" name="VK_EXT_MEMORY_BUDGET_EXTENSION_NAME"/>
@@ -14968,7 +20213,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceMemoryBudgetPropertiesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_memory_priority" number="239" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_EXT_memory_priority" number="239" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_MEMORY_PRIORITY_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_memory_priority&quot;" name="VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME"/>
@@ -14978,7 +20223,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkMemoryPriorityAllocateInfoEXT"/>
</require>
</extension>
- <extension name="VK_KHR_surface_protected_capabilities" number="240" type="instance" requiresCore="1.1" requires="VK_KHR_get_surface_capabilities2" author="KHR" contact="Sandeep Shinde @sashinde" supported="vulkan">
+ <extension name="VK_KHR_surface_protected_capabilities" number="240" type="instance" depends="VK_VERSION_1_1+VK_KHR_get_surface_capabilities2" author="KHR" contact="Sandeep Shinde @sashinde" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_surface_protected_capabilities&quot;" name="VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME"/>
@@ -14986,7 +20231,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkSurfaceProtectedCapabilitiesKHR"/>
</require>
</extension>
- <extension name="VK_NV_dedicated_allocation_image_aliasing" number="241" type="device" requires="VK_KHR_dedicated_allocation" author="NVIDIA" contact="Nuno Subtil @nsubtil" supported="vulkan">
+ <extension name="VK_NV_dedicated_allocation_image_aliasing" number="241" type="device" depends="VK_KHR_dedicated_allocation+VK_KHR_get_physical_device_properties2" author="NVIDIA" contact="Nuno Subtil @nsubtil" supported="vulkan">
<require>
<enum value="1" name="VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION"/>
<enum value="&quot;VK_NV_dedicated_allocation_image_aliasing&quot;" name="VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME"/>
@@ -14994,7 +20239,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV"/>
</require>
</extension>
- <extension name="VK_KHR_separate_depth_stencil_layouts" number="242" type="device" requires="VK_KHR_get_physical_device_properties2,VK_KHR_create_renderpass2" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_separate_depth_stencil_layouts" number="242" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_create_renderpass2" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_separate_depth_stencil_layouts&quot;" name="VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME"/>
@@ -15014,6 +20259,7 @@ typedef void <name>CAMetalLayer</name>;
<require>
<enum value="0" name="VK_INTEL_EXTENSION_243_SPEC_VERSION"/>
<enum value="&quot;VK_INTEL_extension_243&quot;" name="VK_INTEL_EXTENSION_243_EXTENSION_NAME"/>
+ <enum bitpos="46" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_46_BIT_INTEL"/>
</require>
</extension>
<extension name="VK_MESA_extension_244" number="244" author="MESA" contact="Andres Rodriguez @lostgoat" supported="disabled">
@@ -15022,7 +20268,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_MESA_extension_244&quot;" name="VK_MESA_EXTENSION_244_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_buffer_device_address" number="245" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Jeff Bolz @jeffbolznv" deprecatedby="VK_KHR_buffer_device_address" supported="vulkan">
+ <extension name="VK_EXT_buffer_device_address" number="245" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Jeff Bolz @jeffbolznv" deprecatedby="VK_KHR_buffer_device_address" supported="vulkan">
<require>
<enum value="2" name="VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_buffer_device_address&quot;" name="VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME"/>
@@ -15040,25 +20286,25 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetBufferDeviceAddressEXT"/>
</require>
</extension>
- <extension name="VK_EXT_tooling_info" number="246" type="device" author="EXT" contact="Tobias Hector @tobski" supported="vulkan">
+ <extension name="VK_EXT_tooling_info" number="246" type="device" author="EXT" contact="Tobias Hector @tobski" supported="vulkan" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_TOOLING_INFO_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_tooling_info&quot;" name="VK_EXT_TOOLING_INFO_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES"/>
<type name="VkToolPurposeFlagBitsEXT"/>
<type name="VkToolPurposeFlagsEXT"/>
<type name="VkPhysicalDeviceToolPropertiesEXT"/>
<command name="vkGetPhysicalDeviceToolPropertiesEXT"/>
</require>
- <require extension="VK_EXT_debug_report">
- <enum bitpos="5" extends="VkToolPurposeFlagBitsEXT" name="VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT"/>
+ <require depends="VK_EXT_debug_report">
+ <enum bitpos="5" extends="VkToolPurposeFlagBits" name="VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT"/>
</require>
- <require extension="VK_EXT_debug_marker">
- <enum bitpos="6" extends="VkToolPurposeFlagBitsEXT" name="VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT"/>
+ <require depends="VK_EXT_debug_marker">
+ <enum bitpos="6" extends="VkToolPurposeFlagBits" name="VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT"/>
</require>
- <require extension="VK_EXT_debug_utils">
- <enum bitpos="5" extends="VkToolPurposeFlagBitsEXT" name="VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT"/>
- <enum bitpos="6" extends="VkToolPurposeFlagBitsEXT" name="VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT"/>
+ <require depends="VK_EXT_debug_utils">
+ <enum bitpos="5" extends="VkToolPurposeFlagBits" name="VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT"/>
+ <enum bitpos="6" extends="VkToolPurposeFlagBits" name="VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT"/>
</require>
</extension>
<extension name="VK_EXT_separate_stencil_usage" number="247" type="device" author="EXT" contact="Daniel Rakos @drakos-amd" supported="vulkan" promotedto="VK_VERSION_1_2">
@@ -15069,9 +20315,9 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkImageStencilUsageCreateInfoEXT"/>
</require>
</extension>
- <extension name="VK_EXT_validation_features" number="248" type="instance" author="LUNARG" contact="Karl Schultz @karl-lunarg" specialuse="debugging" supported="vulkan">
+ <extension name="VK_EXT_validation_features" number="248" type="instance" author="LUNARG" contact="Karl Schultz @karl-lunarg" specialuse="debugging" supported="vulkan,vulkansc" deprecatedby="VK_EXT_layer_settings">
<require>
- <enum value="5" name="VK_EXT_VALIDATION_FEATURES_SPEC_VERSION"/>
+ <enum value="6" name="VK_EXT_VALIDATION_FEATURES_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_validation_features&quot;" name="VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT"/>
<type name="VkValidationFeaturesEXT"/>
@@ -15079,7 +20325,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkValidationFeatureDisableEXT"/>
</require>
</extension>
- <extension name="VK_KHR_present_wait" number="249" type="device" requires="VK_KHR_swapchain,VK_KHR_present_id" author="KHR" contact="Keith Packard @keithp" supported="vulkan">
+ <extension name="VK_KHR_present_wait" number="249" type="device" depends="VK_KHR_swapchain+VK_KHR_present_id" author="KHR" contact="Keith Packard @keithp" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_PRESENT_WAIT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_present_wait&quot;" name="VK_KHR_PRESENT_WAIT_EXTENSION_NAME"/>
@@ -15088,7 +20334,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDevicePresentWaitFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_NV_cooperative_matrix" number="250" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_NV_cooperative_matrix" number="250" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
<require>
<enum value="1" name="VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION"/>
<enum value="&quot;VK_NV_cooperative_matrix&quot;" name="VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME"/>
@@ -15097,13 +20343,28 @@ typedef void <name>CAMetalLayer</name>;
<enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV"/>
<type name="VkCooperativeMatrixPropertiesNV"/>
<type name="VkScopeNV"/>
+ <enum extends="VkScopeKHR" name="VK_SCOPE_DEVICE_NV" alias="VK_SCOPE_DEVICE_KHR"/>
+ <enum extends="VkScopeKHR" name="VK_SCOPE_WORKGROUP_NV" alias="VK_SCOPE_WORKGROUP_KHR"/>
+ <enum extends="VkScopeKHR" name="VK_SCOPE_SUBGROUP_NV" alias="VK_SCOPE_SUBGROUP_KHR"/>
+ <enum extends="VkScopeKHR" name="VK_SCOPE_QUEUE_FAMILY_NV" alias="VK_SCOPE_QUEUE_FAMILY_KHR"/>
<type name="VkComponentTypeNV"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_FLOAT16_NV" alias="VK_COMPONENT_TYPE_FLOAT16_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_FLOAT32_NV" alias="VK_COMPONENT_TYPE_FLOAT32_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_FLOAT64_NV" alias="VK_COMPONENT_TYPE_FLOAT64_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_SINT8_NV" alias="VK_COMPONENT_TYPE_SINT8_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_SINT16_NV" alias="VK_COMPONENT_TYPE_SINT16_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_SINT32_NV" alias="VK_COMPONENT_TYPE_SINT32_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_SINT64_NV" alias="VK_COMPONENT_TYPE_SINT64_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_UINT8_NV" alias="VK_COMPONENT_TYPE_UINT8_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_UINT16_NV" alias="VK_COMPONENT_TYPE_UINT16_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_UINT32_NV" alias="VK_COMPONENT_TYPE_UINT32_KHR"/>
+ <enum extends="VkComponentTypeKHR" name="VK_COMPONENT_TYPE_UINT64_NV" alias="VK_COMPONENT_TYPE_UINT64_KHR"/>
<type name="VkPhysicalDeviceCooperativeMatrixFeaturesNV"/>
<type name="VkPhysicalDeviceCooperativeMatrixPropertiesNV"/>
<command name="vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"/>
</require>
</extension>
- <extension name="VK_NV_coverage_reduction_mode" number="251" requires="VK_NV_framebuffer_mixed_samples" type="device" author="NV" contact="Kedarnath Thangudu @kthangudu" supported="vulkan">
+ <extension name="VK_NV_coverage_reduction_mode" number="251" depends="VK_NV_framebuffer_mixed_samples+VK_KHR_get_physical_device_properties2" type="device" author="NV" contact="Kedarnath Thangudu @kthangudu" supported="vulkan">
<require>
<enum value="1" name="VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION"/>
<enum value="&quot;VK_NV_coverage_reduction_mode&quot;" name="VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME"/>
@@ -15118,7 +20379,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"/>
</require>
</extension>
- <extension name="VK_EXT_fragment_shader_interlock" number="252" author="EXT" type="device" requires="VK_KHR_get_physical_device_properties2" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_fragment_shader_interlock" number="252" author="EXT" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_fragment_shader_interlock&quot;" name="VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME"/>
@@ -15126,7 +20387,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_ycbcr_image_arrays" number="253" type="device" requires="VK_KHR_sampler_ycbcr_conversion" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_ycbcr_image_arrays" number="253" type="device" depends="VK_KHR_sampler_ycbcr_conversion,VK_VERSION_1_1" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_ycbcr_image_arrays&quot;" name="VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME"/>
@@ -15134,7 +20395,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceYcbcrImageArraysFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_KHR_uniform_buffer_standard_layout" number="254" requires="VK_KHR_get_physical_device_properties2" type="device" author="KHR" contact="Graeme Leese @gnl21" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_uniform_buffer_standard_layout" number="254" depends="VK_KHR_get_physical_device_properties2" type="device" author="KHR" contact="Graeme Leese @gnl21" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_uniform_buffer_standard_layout&quot;" name="VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME"/>
@@ -15142,7 +20403,7 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES"/>
</require>
</extension>
- <extension name="VK_EXT_provoking_vertex" number="255" type="device" author="EXT" requires="VK_KHR_get_physical_device_properties2" contact="Jesse Hall @jessehall" specialuse="glemulation" supported="vulkan">
+ <extension name="VK_EXT_provoking_vertex" number="255" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2" contact="Jesse Hall @jessehall" specialuse="glemulation" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_PROVOKING_VERTEX_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_provoking_vertex&quot;" name="VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME"/>
@@ -15155,7 +20416,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkProvokingVertexModeEXT"/>
</require>
</extension>
- <extension name="VK_EXT_full_screen_exclusive" number="256" type="device" author="EXT" requires="VK_KHR_get_physical_device_properties2,VK_KHR_surface,VK_KHR_get_surface_capabilities2,VK_KHR_swapchain" platform="win32" contact="James Jones @cubanismo" supported="vulkan">
+ <extension name="VK_EXT_full_screen_exclusive" number="256" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2+VK_KHR_surface+VK_KHR_get_surface_capabilities2+VK_KHR_swapchain" platform="win32" contact="James Jones @cubanismo" supported="vulkan">
<require>
<enum value="4" name="VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_full_screen_exclusive&quot;" name="VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME"/>
@@ -15169,18 +20430,18 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkAcquireFullScreenExclusiveModeEXT"/>
<command name="vkReleaseFullScreenExclusiveModeEXT"/>
</require>
- <require extension="VK_KHR_win32_surface">
+ <require depends="VK_KHR_win32_surface">
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT"/>
<type name="VkSurfaceFullScreenExclusiveWin32InfoEXT"/>
</require>
- <require extension="VK_KHR_device_group">
+ <require depends="VK_KHR_device_group">
<command name="vkGetDeviceGroupSurfacePresentModes2EXT"/>
</require>
- <require feature="VK_VERSION_1_1">
+ <require depends="VK_VERSION_1_1">
<command name="vkGetDeviceGroupSurfacePresentModes2EXT"/>
</require>
</extension>
- <extension name="VK_EXT_headless_surface" number="257" type="instance" requires="VK_KHR_surface" author="EXT" contact="Lisa Wu @chengtianww" supported="vulkan">
+ <extension name="VK_EXT_headless_surface" number="257" type="instance" depends="VK_KHR_surface" author="EXT" contact="Lisa Wu @chengtianww" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_HEADLESS_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_headless_surface&quot;" name="VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME"/>
@@ -15190,7 +20451,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCreateHeadlessSurfaceEXT"/>
</require>
</extension>
- <extension name="VK_KHR_buffer_device_address" number="258" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2">
+ <extension name="VK_KHR_buffer_device_address" number="258" type="device" depends="(VK_KHR_get_physical_device_properties2+VK_KHR_device_group),VK_VERSION_1_1" author="KHR" contact="Jeff Bolz @jeffbolznv" supported="vulkan" promotedto="VK_VERSION_1_2" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_buffer_device_address&quot;" name="VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME"/>
@@ -15218,16 +20479,20 @@ typedef void <name>CAMetalLayer</name>;
<require>
<enum value="0" name="VK_EXT_EXTENSION_259_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_extension_259&quot;" name="VK_EXT_EXTENSION_259_EXTENSION_NAME"/>
+ <enum bitpos="9" extends="VkQueueFlagBits" name="VK_QUEUE_RESERVED_9_BIT_EXT"/>
+ <enum bitpos="44" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_44_BIT_EXT"/>
+ <enum bitpos="45" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_45_BIT_EXT"/>
+ <enum bitpos="19" extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_RESERVED_19_BIT_EXT"/>
</require>
</extension>
- <extension name="VK_EXT_line_rasterization" number="260" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Jeff Bolz @jeffbolznv" specialuse="cadsupport" supported="vulkan">
+ <extension name="VK_EXT_line_rasterization" number="260" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Jeff Bolz @jeffbolznv" specialuse="cadsupport" supported="vulkan,vulkansc" promotedto="VK_KHR_line_rasterization">
<require>
<enum value="1" name="VK_EXT_LINE_RASTERIZATION_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_line_rasterization&quot;" name="VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT"/>
- <enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_LINE_STIPPLE_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_LINE_STIPPLE_EXT" alias="VK_DYNAMIC_STATE_LINE_STIPPLE_KHR"/>
<type name="VkPhysicalDeviceLineRasterizationFeaturesEXT"/>
<type name="VkPhysicalDeviceLineRasterizationPropertiesEXT"/>
<type name="VkPipelineRasterizationLineStateCreateInfoEXT"/>
@@ -15235,7 +20500,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetLineStippleEXT"/>
</require>
</extension>
- <extension name="VK_EXT_shader_atomic_float" number="261" type="device" author="NV" requires="VK_KHR_get_physical_device_properties2" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan">
+ <extension name="VK_EXT_shader_atomic_float" number="261" type="device" author="NV" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_shader_atomic_float&quot;" name="VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME"/>
@@ -15243,7 +20508,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceShaderAtomicFloatFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_host_query_reset" number="262" author="EXT" contact="Bas Nieuwenhuizen @BNieuwenhuizen" supported="vulkan" type="device" requires="VK_KHR_get_physical_device_properties2" promotedto="VK_VERSION_1_2">
+ <extension name="VK_EXT_host_query_reset" number="262" author="EXT" contact="Bas Nieuwenhuizen @BNieuwenhuizen" supported="vulkan" type="device" depends="VK_KHR_get_physical_device_properties2" promotedto="VK_VERSION_1_2">
<require>
<enum value="1" name="VK_EXT_HOST_QUERY_RESET_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_host_query_reset&quot;" name="VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME"/>
@@ -15254,8 +20519,8 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_GGP_extension_263" number="263" author="GGP" contact="Jean-Francois Roy @jfroy" supported="disabled">
<require>
- <enum value="0" name="VK_GOOGLE_EXTENSION_263_SPEC_VERSION"/>
- <enum value="&quot;VK_GGP_extension_263&quot;" name="VK_GOOGLE_EXTENSION_263_EXTENSION_NAME"/>
+ <enum value="0" name="VK_GGP_EXTENSION_263_SPEC_VERSION"/>
+ <enum value="&quot;VK_GGP_extension_263&quot;" name="VK_GGP_EXTENSION_263_EXTENSION_NAME"/>
</require>
</extension>
<extension name="VK_BRCM_extension_264" number="264" author="BRCM" contact="Graeme Leese @gnl21" supported="disabled">
@@ -15270,39 +20535,39 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_BRCM_extension_265&quot;" name="VK_BRCM_EXTENSION_265_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_index_type_uint8" number="266" type="device" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_index_type_uint8" number="266" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc" promotedto="VK_KHR_index_type_uint8">
<require>
<enum value="1" name="VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_index_type_uint8&quot;" name="VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT"/>
- <enum offset="0" extends="VkIndexType" name="VK_INDEX_TYPE_UINT8_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR"/>
+ <enum extends="VkIndexType" name="VK_INDEX_TYPE_UINT8_EXT" alias="VK_INDEX_TYPE_UINT8_KHR"/>
<type name="VkPhysicalDeviceIndexTypeUint8FeaturesEXT"/>
</require>
</extension>
<extension name="VK_EXT_extension_267" number="267" type="device" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="disabled">
<require>
<enum value="0" name="VK_EXT_EXTENSION_267_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_267&quot;" name="VK_EXT_extension_267"/>
+ <enum value="&quot;VK_EXT_extension_267&quot;" name="VK_EXT_EXTENSION_267_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_extended_dynamic_state" number="268" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_extended_dynamic_state" number="268" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_extended_dynamic_state&quot;" name="VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT"/>
- <enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_CULL_MODE_EXT"/>
- <enum offset="1" extends="VkDynamicState" name="VK_DYNAMIC_STATE_FRONT_FACE_EXT"/>
- <enum offset="2" extends="VkDynamicState" name="VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT"/>
- <enum offset="3" extends="VkDynamicState" name="VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT"/>
- <enum offset="4" extends="VkDynamicState" name="VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT"/>
- <enum offset="5" extends="VkDynamicState" name="VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT"/>
- <enum offset="6" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT"/>
- <enum offset="7" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT"/>
- <enum offset="8" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT"/>
- <enum offset="9" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT"/>
- <enum offset="10" extends="VkDynamicState" name="VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT"/>
- <enum offset="11" extends="VkDynamicState" name="VK_DYNAMIC_STATE_STENCIL_OP_EXT"/>
- <type name="VkPhysicalDeviceExtendedDynamicStateFeaturesEXT"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT" comment="Not promoted to 1.3"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_CULL_MODE_EXT" alias="VK_DYNAMIC_STATE_CULL_MODE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_FRONT_FACE_EXT" alias="VK_DYNAMIC_STATE_FRONT_FACE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT" alias="VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT" alias="VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT" alias="VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT" alias="VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT" alias="VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT" alias="VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT" alias="VK_DYNAMIC_STATE_DEPTH_COMPARE_OP"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT" alias="VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT" alias="VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_STENCIL_OP_EXT" alias="VK_DYNAMIC_STATE_STENCIL_OP"/>
+ <type name="VkPhysicalDeviceExtendedDynamicStateFeaturesEXT" comment="Not promoted to 1.3"/>
<command name="vkCmdSetCullModeEXT"/>
<command name="vkCmdSetFrontFaceEXT"/>
<command name="vkCmdSetPrimitiveTopologyEXT"/>
@@ -15317,7 +20582,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetStencilOpEXT"/>
</require>
</extension>
- <extension name="VK_KHR_deferred_host_operations" number="269" type="device" author="KHR" contact="Josh Barczak @jbarczak" supported="vulkan">
+ <extension name="VK_KHR_deferred_host_operations" number="269" type="device" author="KHR" contact="Josh Barczak @jbarczak" supported="vulkan" ratified="vulkan">
<require>
<enum value="4" name="VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_deferred_host_operations&quot;" name="VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME"/>
@@ -15334,7 +20599,7 @@ typedef void <name>CAMetalLayer</name>;
<enum extends="VkResult" offset="3" name="VK_OPERATION_NOT_DEFERRED_KHR" />
</require>
</extension>
- <extension name="VK_KHR_pipeline_executable_properties" number="270" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Jason Ekstrand @jekstrand" specialuse="devtools" supported="vulkan">
+ <extension name="VK_KHR_pipeline_executable_properties" number="270" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Faith Ekstrand @gfxstrand" specialuse="devtools" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_pipeline_executable_properties&quot;" name="VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME"/>
@@ -15359,25 +20624,73 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPipelineExecutableInternalRepresentationsKHR"/>
</require>
</extension>
- <extension name="VK_INTEL_extension_271" number="271" type="device" author="INTEL" contact="Jason Ekstrand @jekstrand" supported="disabled">
- <require>
- <enum value="0" name="VK_INTEL_EXTENSION_271_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_271&quot;" name="VK_INTEL_extension_271"/>
+ <extension name="VK_EXT_host_image_copy" number="271" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_copy_commands2+VK_KHR_format_feature_flags2" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_HOST_IMAGE_COPY_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_host_image_copy&quot;" name="VK_EXT_HOST_IMAGE_COPY_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT"/>
+ <enum bitpos="22" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT" comment="Can be used with host image copies"/>
+ <enum bitpos="46" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT_EXT" comment="Host image copies are supported"/>
+ <type name="VkPhysicalDeviceHostImageCopyFeaturesEXT"/>
+ <type name="VkPhysicalDeviceHostImageCopyPropertiesEXT"/>
+ <type name="VkHostImageCopyFlagBitsEXT"/>
+ <type name="VkHostImageCopyFlagsEXT"/>
+ <type name="VkMemoryToImageCopyEXT"/>
+ <type name="VkImageToMemoryCopyEXT"/>
+ <type name="VkCopyMemoryToImageInfoEXT"/>
+ <type name="VkCopyImageToMemoryInfoEXT"/>
+ <type name="VkCopyImageToImageInfoEXT"/>
+ <type name="VkHostImageLayoutTransitionInfoEXT"/>
+ <type name="VkSubresourceHostMemcpySizeEXT"/>
+ <type name="VkHostImageCopyDevicePerformanceQueryEXT"/>
+ <command name="vkCopyMemoryToImageEXT"/>
+ <command name="vkCopyImageToMemoryEXT"/>
+ <command name="vkCopyImageToImageEXT"/>
+ <command name="vkTransitionImageLayoutEXT"/>
+
+ <type name="VkSubresourceLayout2EXT"/>
+ <type name="VkImageSubresource2EXT"/>
+ <command name="vkGetImageSubresourceLayout2EXT" comment="Taken from VK_EXT_image_compression_control. VkStructureType enums defined in that extension"/>
</require>
</extension>
- <extension name="VK_INTEL_extension_272" number="272" type="device" author="INTEL" contact="Jason Ekstrand @jekstrand" supported="disabled">
+ <extension name="VK_KHR_map_memory2" number="272" type="device" author="KHR" contact="Faith Ekstrand @gfxstrand" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_INTEL_EXTENSION_272_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_272&quot;" name="VK_INTEL_extension_272"/>
+ <enum value="1" name="VK_KHR_MAP_MEMORY_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_map_memory2&quot;" name="VK_KHR_MAP_MEMORY_2_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR"/>
+ <type name="VkMemoryMapInfoKHR"/>
+ <type name="VkMemoryUnmapInfoKHR"/>
+ <type name="VkMemoryUnmapFlagBitsKHR"/>
+ <type name="VkMemoryUnmapFlagsKHR"/>
+ <command name="vkMapMemory2KHR"/>
+ <command name="vkUnmapMemory2KHR"/>
</require>
</extension>
- <extension name="VK_INTEL_extension_273" number="273" type="device" author="INTEL" contact="Jason Ekstrand @jekstrand" supported="disabled">
+ <extension name="VK_EXT_map_memory_placed" number="273" type="device" depends="VK_KHR_map_memory2" author="EXT" contact="Faith Ekstrand @gfxstrand" supported="vulkan">
<require>
- <enum value="0" name="VK_INTEL_EXTENSION_273_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_273&quot;" name="VK_INTEL_extension_273"/>
+ <enum value="1" name="VK_EXT_MAP_MEMORY_PLACED_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_map_memory_placed&quot;" name="VK_EXT_MAP_MEMORY_PLACED_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_PROPERTIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_MAP_PLACED_INFO_EXT"/>
+ <enum bitpos="0" extends="VkMemoryMapFlagBits" name="VK_MEMORY_MAP_PLACED_BIT_EXT"/>
+ <enum bitpos="0" extends="VkMemoryUnmapFlagBitsKHR" name="VK_MEMORY_UNMAP_RESERVE_BIT_EXT"/>
+ <type name="VkPhysicalDeviceMapMemoryPlacedFeaturesEXT"/>
+ <type name="VkPhysicalDeviceMapMemoryPlacedPropertiesEXT"/>
+ <type name="VkMemoryMapPlacedInfoEXT"/>
</require>
</extension>
- <extension name="VK_EXT_shader_atomic_float2" number="274" type="device" requires="VK_EXT_shader_atomic_float" author="EXT" contact="Jason Ekstrand @jekstrand" supported="vulkan">
+ <extension name="VK_EXT_shader_atomic_float2" number="274" type="device" depends="VK_EXT_shader_atomic_float" author="EXT" contact="Faith Ekstrand @gfxstrand" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_SHADER_ATOMIC_FLOAT_2_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_shader_atomic_float2&quot;" name="VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME"/>
@@ -15385,28 +20698,56 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT"/>
</require>
</extension>
- <extension name="VK_KHR_extension_275" number="275" type="instance" author="KHR" contact="Lionel Landwerlin @llandwerlin" supported="disabled">
- <require>
- <enum value="0" name="VK_KHR_EXTENSION_275_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_275&quot;" name="VK_KHR_extension_275"/>
- </require>
- </extension>
- <extension name="VK_KHR_extension_276" number="276" type="device" author="KHR" contact="James Jones @cubanismo" supported="disabled">
- <require>
- <enum value="0" name="VK_KHR_EXTENSION_276_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_276&quot;" name="VK_KHR_extension_276"/>
- </require>
- </extension>
- <extension name="VK_EXT_shader_demote_to_helper_invocation" number="277" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_EXT_surface_maintenance1" number="275" type="instance" depends="VK_KHR_surface+VK_KHR_get_surface_capabilities2" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_SURFACE_MAINTENANCE_1_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_surface_maintenance1&quot;" name="VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT"/>
+ <type name="VkSurfacePresentModeEXT"/>
+ <type name="VkPresentScalingFlagBitsEXT"/>
+ <type name="VkPresentScalingFlagsEXT"/>
+ <type name="VkPresentGravityFlagBitsEXT"/>
+ <type name="VkPresentGravityFlagsEXT"/>
+ <type name="VkSurfacePresentScalingCapabilitiesEXT"/>
+ <type name="VkSurfacePresentModeCompatibilityEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_swapchain_maintenance1" number="276" type="device" depends="VK_KHR_swapchain+VK_EXT_surface_maintenance1+VK_KHR_get_physical_device_properties2" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_SWAPCHAIN_MAINTENANCE_1_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_swapchain_maintenance1&quot;" name="VK_EXT_SWAPCHAIN_MAINTENANCE_1_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_FENCE_INFO_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODES_CREATE_INFO_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_SCALING_CREATE_INFO_EXT"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RELEASE_SWAPCHAIN_IMAGES_INFO_EXT"/>
+ <enum bitpos="3" extends="VkSwapchainCreateFlagBitsKHR" name="VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT"/>
+ <type name="VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT"/>
+ <type name="VkSwapchainPresentFenceInfoEXT"/>
+ <type name="VkSwapchainPresentModesCreateInfoEXT"/>
+ <type name="VkSwapchainPresentModeInfoEXT"/>
+ <type name="VkSwapchainPresentScalingCreateInfoEXT"/>
+ <type name="VkReleaseSwapchainImagesInfoEXT"/>
+ <command name="vkReleaseSwapchainImagesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_shader_demote_to_helper_invocation" number="277" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_shader_demote_to_helper_invocation&quot;" name="VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES"/>
<type name="VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_NV_device_generated_commands" number="278" type="device" requiresCore="1.1" author="NV" contact="Christoph Kubisch @pixeljetstream" supported="vulkan">
+ <extension name="VK_NV_device_generated_commands" number="278" type="device" depends="VK_VERSION_1_1+VK_KHR_buffer_device_address" author="NV" contact="Christoph Kubisch @pixeljetstream" supported="vulkan">
<require>
+ <comment>
+ This extension requires buffer_device_address functionality.
+ VK_EXT_buffer_device_address is also acceptable, but since it is deprecated the KHR version is preferred.
+ </comment>
<enum value="3" name="VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION"/>
<enum value="&quot;VK_NV_device_generated_commands&quot;" name="VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV"/>
@@ -15449,7 +20790,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkDestroyIndirectCommandsLayoutNV"/>
</require>
</extension>
- <extension name="VK_NV_inherited_viewport_scissor" number="279" type="device" author="NV" contact="David Zhao Akeley @akeley98" supported="vulkan">
+ <extension name="VK_NV_inherited_viewport_scissor" number="279" type="device" author="NV" contact="David Zhao Akeley @akeley98" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
<enum value="1" name="VK_NV_INHERITED_VIEWPORT_SCISSOR_SPEC_VERSION"/>
<enum value="&quot;VK_NV_inherited_viewport_scissor&quot;" name="VK_NV_INHERITED_VIEWPORT_SCISSOR_EXTENSION_NAME"/>
@@ -15459,35 +20800,35 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkCommandBufferInheritanceViewportScissorInfoNV"/>
</require>
</extension>
- <extension name="VK_KHR_extension_280" number="280" type="device" author="KHR" contact="Kevin Petit @kevinpetit" supported="disabled">
+ <extension name="VK_KHR_extension_280" number="280" type="device" author="KHR" contact="Kevin Petit @kpet" supported="disabled">
<require>
<enum value="0" name="VK_KHR_EXTENSION_280_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_280&quot;" name="VK_KHR_extension_280"/>
+ <enum value="&quot;VK_KHR_extension_280&quot;" name="VK_KHR_EXTENSION_280_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_shader_integer_dot_product" number="281" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Kevin Petit @kevinpetit" supported="vulkan">
+ <extension name="VK_KHR_shader_integer_dot_product" number="281" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Kevin Petit @kpet" supported="vulkan" promotedto="VK_VERSION_1_3" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_integer_dot_product&quot;" name="VK_KHR_SHADER_INTEGER_DOT_PRODUCT_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES"/>
<type name="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR"/>
<type name="VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_EXT_texel_buffer_alignment" number="282" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <extension name="VK_EXT_texel_buffer_alignment" number="282" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Jeff Bolz @jeffbolznv" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_texel_buffer_alignment&quot;" name="VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT"/>
- <type name="VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT" comment="Not promoted to 1.3"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES"/>
+ <type name="VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT" comment="Not promoted to 1.3"/>
<type name="VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT"/>
</require>
</extension>
- <extension name="VK_QCOM_render_pass_transform" number="283" type="device" requires="VK_KHR_swapchain,VK_KHR_surface" author="QCOM" contact="Jeff Leger @jackohound" supported="vulkan">
+ <extension name="VK_QCOM_render_pass_transform" number="283" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
<require>
- <enum value="2" name="VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION"/>
+ <enum value="4" name="VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION"/>
<enum value="&quot;VK_QCOM_render_pass_transform&quot;" name="VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM"/>
@@ -15496,13 +20837,21 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkCommandBufferInheritanceRenderPassTransformInfoQCOM"/>
</require>
</extension>
- <extension name="VK_EXT_extension_284" number="284" type="device" author="EXT" contact="Samuel Pitoiset @hakzsam" supported="disabled">
+ <extension name="VK_EXT_depth_bias_control" number="284" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Joshua Ashton @Joshua-Ashton" specialuse="d3demulation" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_284_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_284&quot;" name="VK_EXT_extension_284"/>
+ <enum value="1" name="VK_EXT_DEPTH_BIAS_CONTROL_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_depth_bias_control&quot;" name="VK_EXT_DEPTH_BIAS_CONTROL_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_BIAS_CONTROL_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEPTH_BIAS_INFO_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEPTH_BIAS_REPRESENTATION_INFO_EXT"/>
+ <type name="VkPhysicalDeviceDepthBiasControlFeaturesEXT"/>
+ <type name="VkDepthBiasInfoEXT"/>
+ <type name="VkDepthBiasRepresentationEXT"/>
+ <type name="VkDepthBiasRepresentationInfoEXT"/>
+ <command name="vkCmdSetDepthBias2EXT"/>
</require>
</extension>
- <extension name="VK_EXT_device_memory_report" number="285" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Yiwei Zhang @zhangyiwei" specialuse="devtools" supported="vulkan">
+ <extension name="VK_EXT_device_memory_report" number="285" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Yiwei Zhang @zhangyiwei" specialuse="devtools" supported="vulkan">
<require>
<enum value="2" name="VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_device_memory_report&quot;" name="VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME"/>
@@ -15517,7 +20866,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="PFN_vkDeviceMemoryReportCallbackEXT"/>
</require>
</extension>
- <extension name="VK_EXT_acquire_drm_display" number="286" type="instance" requires="VK_EXT_direct_mode_display" author="EXT" contact="Drew DeVault sir@cmpwn.com" supported="vulkan">
+ <extension name="VK_EXT_acquire_drm_display" number="286" type="instance" depends="VK_EXT_direct_mode_display" author="EXT" contact="Drew DeVault sir@cmpwn.com" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_ACQUIRE_DRM_DISPLAY_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_acquire_drm_display&quot;" name="VK_EXT_ACQUIRE_DRM_DISPLAY_EXTENSION_NAME"/>
@@ -15525,7 +20874,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetDrmDisplayEXT"/>
</require>
</extension>
- <extension name="VK_EXT_robustness2" number="287" type="device" author="EXT" contact="Liam Middlebrook @liam-middlebrook" supported="vulkan">
+ <extension name="VK_EXT_robustness2" number="287" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Liam Middlebrook @liam-middlebrook" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_ROBUSTNESS_2_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_robustness2&quot;" name="VK_EXT_ROBUSTNESS_2_EXTENSION_NAME"/>
@@ -15535,7 +20884,7 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceRobustness2PropertiesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_custom_border_color" number="288" type="device" author="EXT" contact="Liam Middlebrook @liam-middlebrook" specialuse="glemulation,d3demulation" supported="vulkan">
+ <extension name="VK_EXT_custom_border_color" number="288" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Liam Middlebrook @liam-middlebrook" specialuse="glemulation,d3demulation" supported="vulkan,vulkansc">
<require>
<enum value="12" name="VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_custom_border_color&quot;" name="VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME"/>
@@ -15596,7 +20945,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_GOOGLE_user_type&quot;" name="VK_GOOGLE_USER_TYPE_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_pipeline_library" number="291" type="device" author="KHR" contact="Christoph Kubisch @pixeljetstream" supported="vulkan">
+ <extension name="VK_KHR_pipeline_library" number="291" type="device" author="KHR" contact="Christoph Kubisch @pixeljetstream" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_pipeline_library&quot;" name="VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME"/>
@@ -15611,19 +20960,25 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_NV_extension_292&quot;" name="VK_NV_EXTENSION_292_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_NV_extension_293" number="293" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
+ <extension name="VK_NV_present_barrier" number="293" type="device" author="NV" depends="VK_KHR_get_physical_device_properties2+VK_KHR_surface+VK_KHR_get_surface_capabilities2+VK_KHR_swapchain" contact="Liya Li @liyli" supported="vulkan">
<require>
- <enum value="0" name="VK_NV_EXTENSION_293_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_293&quot;" name="VK_NV_EXTENSION_293_EXTENSION_NAME"/>
+ <enum value="1" name="VK_NV_PRESENT_BARRIER_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_present_barrier&quot;" name="VK_NV_PRESENT_BARRIER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_BARRIER_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_PRESENT_BARRIER_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_BARRIER_CREATE_INFO_NV"/>
+ <type name="VkPhysicalDevicePresentBarrierFeaturesNV"/>
+ <type name="VkSurfaceCapabilitiesPresentBarrierNV"/>
+ <type name="VkSwapchainPresentBarrierCreateInfoNV"/>
</require>
</extension>
- <extension name="VK_KHR_shader_non_semantic_info" number="294" type="device" author="KHR" contact="Baldur Karlsson @baldurk" supported="vulkan">
+ <extension name="VK_KHR_shader_non_semantic_info" number="294" type="device" author="KHR" contact="Baldur Karlsson @baldurk" supported="vulkan" promotedto="VK_VERSION_1_3" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_non_semantic_info&quot;" name="VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_present_id" number="295" type="device" requires="VK_KHR_swapchain" author="KHR" contact="Keith Packard @keithp" supported="vulkan">
+ <extension name="VK_KHR_present_id" number="295" type="device" depends="VK_KHR_swapchain+VK_KHR_get_physical_device_properties2" author="KHR" contact="Keith Packard @keithp" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_PRESENT_ID_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_present_id&quot;" name="VK_KHR_PRESENT_ID_EXTENSION_NAME"/>
@@ -15633,20 +20988,19 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDevicePresentIdFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_EXT_private_data" number="296" type="device" author="NV" contact="Matthew Rusch @mattruschnv" supported="vulkan">
+ <extension name="VK_EXT_private_data" number="296" type="device" author="NV" contact="Matthew Rusch @mattruschnv" supported="vulkan" depends="VK_KHR_get_physical_device_properties2" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_PRIVATE_DATA_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_private_data&quot;" name="VK_EXT_PRIVATE_DATA_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT"/>
- <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO"/>
+ <enum extends="VkObjectType" name="VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT" alias="VK_OBJECT_TYPE_PRIVATE_DATA_SLOT"/>
<type name="VkPhysicalDevicePrivateDataFeaturesEXT"/>
<type name="VkDevicePrivateDataCreateInfoEXT"/>
<type name="VkPrivateDataSlotCreateInfoEXT"/>
<type name="VkPrivateDataSlotEXT"/>
- <type name="VkPrivateDataSlotCreateFlagsEXT"/>
- <type name="VkPrivateDataSlotCreateFlagBitsEXT"/>
+ <type name="VkPrivateDataSlotCreateFlagsEXT" comment="Will add VkPrivateDataSlotCreateFlagBits when bits are defined in the future"/>
<command name="vkCreatePrivateDataSlotEXT"/>
<command name="vkDestroyPrivateDataSlotEXT"/>
<command name="vkSetPrivateDataEXT"/>
@@ -15660,72 +21014,107 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="3" extends="VkPipelineShaderStageCreateFlagBits" name="VK_PIPELINE_SHADER_STAGE_CREATE_RESERVED_3_BIT_KHR"/>
</require>
</extension>
- <extension name="VK_EXT_pipeline_creation_cache_control" number="298" type="device" author="AMD" contact="Gregory Grebe @grgrebe_amd" supported="vulkan">
+ <extension name="VK_EXT_pipeline_creation_cache_control" number="298" type="device" author="AMD" contact="Gregory Grebe @grgrebe_amd" depends="VK_KHR_get_physical_device_properties2" supported="vulkan" promotedto="VK_VERSION_1_3">
<require>
- <enum value="3" name="VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_pipeline_creation_cache_control&quot;"
- name="VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType"
- name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT"/>
+ <enum value="3" name="VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_pipeline_creation_cache_control&quot;" name="VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES"/>
<type name="VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT"/>
- <enum bitpos="8" extends="VkPipelineCreateFlagBits"
- name="VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT"/>
- <enum bitpos="9" extends="VkPipelineCreateFlagBits"
- name="VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT"/>
- <enum extends="VkResult" offset="0" name="VK_PIPELINE_COMPILE_REQUIRED_EXT"/>
- <enum extends="VkResult" name="VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT" alias="VK_PIPELINE_COMPILE_REQUIRED_EXT"/>
- <enum bitpos="0" extends="VkPipelineCacheCreateFlagBits"
- name="VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT"/>
+ <enum extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT" alias="VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT"/>
+ <enum extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT" alias="VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT"/>
+ <enum extends="VkResult" name="VK_PIPELINE_COMPILE_REQUIRED_EXT" alias="VK_PIPELINE_COMPILE_REQUIRED"/>
+ <enum extends="VkResult" name="VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT" alias="VK_PIPELINE_COMPILE_REQUIRED"/>
+ <enum extends="VkPipelineCacheCreateFlagBits" name="VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT" alias="VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT"/>
<type name="VkPipelineCacheCreateFlagBits" comment="This is a temporary workaround for processors not recognizing that VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT above also requires this type"/>
</require>
</extension>
- <extension name="VK_KHR_extension_299" number="299" author="KHR" contact="Mark Bellamy @mark.bellamy_arm" supported="disabled">
- <require>
- <enum value="0" name="VK_KHR_EXTENSION_299_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_299&quot;" name="VK_KHR_EXTENSION_299_EXTENSION_NAME"/>
- <enum bitpos="2" extends="VkMemoryHeapFlagBits" name="VK_MEMORY_HEAP_RESERVED_2_BIT_KHR"/>
- <enum extends="VkPipelineCacheCreateFlagBits" name="VK_PIPELINE_CACHE_CREATE_RESERVED_1_BIT_KHR" alias="VK_PIPELINE_CACHE_CREATE_RESERVED_1_BIT_EXT"/>
- <enum bitpos="2" extends="VkPipelineCacheCreateFlagBits" name="VK_PIPELINE_CACHE_CREATE_RESERVED_2_BIT_KHR"/>
+ <extension name="VK_KHR_extension_299" number="299" type="device" author="KHR" contact="Mark Bellamy @mark.bellamy_arm" supported="disabled">
+ <require comment="used for Vulkan SC 1.0 namespace">
+ <enum value="0" name="VK_KHR_EXTENSION_299_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_299&quot;" name="VK_KHR_EXTENSION_299_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_video_encode_queue" number="300" type="device" requires="VK_KHR_video_queue,VK_KHR_synchronization2" author="KHR" contact="ahmed.abdelkalek@amd.com" provisional="true" platform="provisional" supported="vulkan">
+ <extension name="VK_KHR_video_encode_queue" number="300" type="device" depends="VK_KHR_video_queue+VK_KHR_synchronization2" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" supported="vulkan" ratified="vulkan">
<require>
- <enum value="2" name="VK_KHR_VIDEO_ENCODE_QUEUE_SPEC_VERSION"/>
+ <enum value="12" name="VK_KHR_VIDEO_ENCODE_QUEUE_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_video_encode_queue&quot;" name="VK_KHR_VIDEO_ENCODE_QUEUE_EXTENSION_NAME"/>
- <enum bitpos="27" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS" />
- <enum bitpos="37" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS" />
- <enum bitpos="38" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="6" extends="VkQueueFlagBits" name="VK_QUEUE_VIDEO_ENCODE_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="15" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_ENCODE_DST_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="16" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_ENCODE_SRC_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="13" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="14" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="15" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="27" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum bitpos="28" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="1" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="2" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
- <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <!-- VkPipelineStageFlagBits bitpos="27" is reserved by this extension, but not used -->
+ <enum bitpos="27" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR"/>
+ <enum bitpos="37" extends="VkAccessFlagBits2" name="VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR"/>
+ <enum bitpos="38" extends="VkAccessFlagBits2" name="VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_CAPABILITIES_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_USAGE_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_QUALITY_LEVEL_INFO_KHR"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUALITY_LEVEL_PROPERTIES_KHR"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUALITY_LEVEL_INFO_KHR"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_PARAMETERS_GET_INFO_KHR"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_PARAMETERS_FEEDBACK_INFO_KHR"/>
+ <enum bitpos="6" extends="VkQueueFlagBits" name="VK_QUEUE_VIDEO_ENCODE_BIT_KHR"/>
+ <enum bitpos="1" extends="VkVideoCodingControlFlagBitsKHR" name="VK_VIDEO_CODING_CONTROL_ENCODE_RATE_CONTROL_BIT_KHR"/>
+ <enum bitpos="2" extends="VkVideoCodingControlFlagBitsKHR" name="VK_VIDEO_CODING_CONTROL_ENCODE_QUALITY_LEVEL_BIT_KHR"/>
+ <enum bitpos="15" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_ENCODE_DST_BIT_KHR"/>
+ <enum bitpos="16" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_VIDEO_ENCODE_SRC_BIT_KHR"/>
+ <enum bitpos="13" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR"/>
+ <enum bitpos="14" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR"/>
+ <enum bitpos="15" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR"/>
+ <enum bitpos="27" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR"/>
+ <enum bitpos="28" extends="VkFormatFeatureFlagBits" name="VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR"/>
+ <enum bitpos="1" extends="VkVideoSessionCreateFlagBitsKHR" name="VK_VIDEO_SESSION_CREATE_ALLOW_ENCODE_PARAMETER_OPTIMIZATIONS_BIT_KHR"/>
+ <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR"/>
+ <enum offset="1" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR"/>
+ <enum offset="2" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR"/>
+ <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR"/>
+ <enum offset="0" extends="VkQueryResultStatusKHR" dir="-" name="VK_QUERY_RESULT_STATUS_INSUFFICIENT_BITSTREAM_BUFFER_RANGE_KHR"/>
+
+ <enum offset="0" extends="VkResult" dir="-" name="VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR"/>
- <type name="VkVideoEncodeFlagBitsKHR"/>
<type name="VkVideoEncodeFlagsKHR"/>
<type name="VkVideoEncodeInfoKHR"/>
- <type name="VkVideoEncodeRateControlFlagBitsKHR"/>
+ <type name="VkVideoEncodeCapabilityFlagBitsKHR"/>
+ <type name="VkVideoEncodeCapabilityFlagsKHR"/>
+ <type name="VkVideoEncodeCapabilitiesKHR"/>
+
+ <type name="VkQueryPoolVideoEncodeFeedbackCreateInfoKHR"/>
+ <type name="VkVideoEncodeFeedbackFlagBitsKHR"/>
+ <type name="VkVideoEncodeFeedbackFlagsKHR"/>
+
+ <type name="VkVideoEncodeUsageFlagBitsKHR"/>
+ <type name="VkVideoEncodeUsageFlagsKHR"/>
+ <type name="VkVideoEncodeContentFlagBitsKHR"/>
+ <type name="VkVideoEncodeContentFlagsKHR"/>
+ <type name="VkVideoEncodeTuningModeKHR"/>
+ <type name="VkVideoEncodeUsageInfoKHR"/>
+
<type name="VkVideoEncodeRateControlFlagsKHR"/>
<type name="VkVideoEncodeRateControlModeFlagBitsKHR"/>
<type name="VkVideoEncodeRateControlModeFlagsKHR"/>
-
<type name="VkVideoEncodeRateControlInfoKHR"/>
+ <type name="VkVideoEncodeRateControlLayerInfoKHR"/>
+
+ <type name="VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR"/>
+ <type name="VkVideoEncodeQualityLevelPropertiesKHR"/>
+ <type name="VkVideoEncodeQualityLevelInfoKHR"/>
+
+ <type name="VkVideoEncodeSessionParametersGetInfoKHR"/>
+ <type name="VkVideoEncodeSessionParametersFeedbackInfoKHR"/>
+
+ <command name="vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR"/>
+ <command name="vkGetEncodedVideoSessionParametersKHR"/>
<command name="vkCmdEncodeVideoKHR"/>
</require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="27" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR"/>
+ <enum bitpos="28" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR"/>
+ </require>
</extension>
- <extension name="VK_NV_device_diagnostics_config" number="301" type="device" requires="VK_KHR_get_physical_device_properties2" author="NV" contact="Kedarnath Thangudu @kthangudu" supported="vulkan">
+ <extension name="VK_NV_device_diagnostics_config" number="301" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Kedarnath Thangudu @kthangudu" supported="vulkan">
<require>
- <enum value="1" name="VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION"/>
+ <enum value="2" name="VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION"/>
<enum value="&quot;VK_NV_device_diagnostics_config&quot;" name="VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV"/>
@@ -15735,72 +21124,143 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkDeviceDiagnosticsConfigFlagBitsNV"/>
</require>
</extension>
- <extension name="VK_QCOM_render_pass_store_ops" number="302" type="device" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="vulkan">
+ <extension name="VK_QCOM_render_pass_store_ops" number="302" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
<require>
<enum value="2" name="VK_QCOM_RENDER_PASS_STORE_OPS_SPEC_VERSION"/>
<enum value="&quot;VK_QCOM_render_pass_store_ops&quot;" name="VK_QCOM_RENDER_PASS_STORE_OPS_EXTENSION_NAME"/>
- <enum extends="VkAttachmentStoreOp" name="VK_ATTACHMENT_STORE_OP_NONE_QCOM" alias="VK_ATTACHMENT_STORE_OP_NONE_EXT"/>
+ <enum extends="VkAttachmentStoreOp" name="VK_ATTACHMENT_STORE_OP_NONE_QCOM" alias="VK_ATTACHMENT_STORE_OP_NONE"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_303" number="303" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_303" number="303" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_303_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_303&quot;" name="VK_QCOM_extension_303_EXTENSION_NAME"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_303_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_303&quot;" name="VK_QCOM_EXTENSION_303_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_304" number="304" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_304" number="304" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_304_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_304&quot;" name="VK_QCOM_extension_304_EXTENSION_NAME"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_304_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_304&quot;" name="VK_QCOM_EXTENSION_304_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_305" number="305" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_305" number="305" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_305_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_305&quot;" name="VK_QCOM_extension_305_EXTENSION_NAME"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_305_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_305&quot;" name="VK_QCOM_EXTENSION_305_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_306" number="306" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_306" number="306" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_306_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_306&quot;" name="VK_QCOM_extension_306_EXTENSION_NAME"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_306_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_306&quot;" name="VK_QCOM_EXTENSION_306_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_307" number="307" author="QCOM" contact="Bill Licea-Kane @wwlk" supported="disabled">
+ <extension name="VK_QCOM_extension_307" number="307" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_QCOM_EXTENSION_307_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_307&quot;" name="VK_QCOM_EXTENSION_307_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_cuda_kernel_launch" number="308" type="device" author="NV" contact="Tristan Lorach @tlorach" supported="vulkan" provisional="true">
<require>
- <enum value="0" name="VK_QCOM_extension_307_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_307&quot;" name="VK_QCOM_extension_307_EXTENSION_NAME"/>
+ <enum value="2" name="VK_NV_CUDA_KERNEL_LAUNCH_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_cuda_kernel_launch&quot;" name="VK_NV_CUDA_KERNEL_LAUNCH_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_CUDA_MODULE_CREATE_INFO_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_CUDA_FUNCTION_CREATE_INFO_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_CUDA_LAUNCH_INFO_NV"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_FEATURES_NV"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_PROPERTIES_NV"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_CUDA_MODULE_NV"/>
+ <enum offset="1" extends="VkObjectType" name="VK_OBJECT_TYPE_CUDA_FUNCTION_NV"/>
+ <type name="VkCudaModuleNV"/>
+ <type name="VkCudaFunctionNV"/>
+ <type name="VkCudaModuleCreateInfoNV"/>
+ <type name="VkCudaFunctionCreateInfoNV"/>
+ <type name="VkCudaLaunchInfoNV"/>
+ <type name="VkPhysicalDeviceCudaKernelLaunchFeaturesNV"/>
+ <type name="VkPhysicalDeviceCudaKernelLaunchPropertiesNV"/>
+ <command name="vkCreateCudaModuleNV"/>
+ <command name="vkGetCudaModuleCacheNV"/>
+ <command name="vkCreateCudaFunctionNV"/>
+ <command name="vkDestroyCudaModuleNV"/>
+ <command name="vkDestroyCudaFunctionNV"/>
+ <command name="vkCmdCudaLaunchKernelNV"/>
</require>
- </extension>
- <extension name="VK_NV_extension_308" number="308" type="device" author="NV" contact="Tristan Lorach @tlorach" supported="disabled">
- <require>
- <enum value="0" name="VK_NV_EXTENSION_308_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_308&quot;" name="VK_NV_EXTENSION_308_EXTENSION_NAME"/>
+ <require depends="VK_EXT_debug_report">
+ <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_CUDA_MODULE_NV_EXT"/>
+ <enum offset="1" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_CUDA_FUNCTION_NV_EXT"/>
</require>
</extension>
- <extension name="VK_KHR_extension_309" number="309" author="KHR" contact="Aidan Fabius @afabius" supported="disabled">
+ <extension name="VK_KHR_object_refresh" number="309" type="device" author="KHR" contact="Aidan Fabius @afabius" supported="vulkansc" ratified="vulkansc">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_309_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_309&quot;" name="VK_KHR_EXTENSION_309_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_OBJECT_REFRESH_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_object_refresh&quot;" name="VK_KHR_OBJECT_REFRESH_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_REFRESH_OBJECT_LIST_KHR"/>
+ <type name="VkRefreshObjectListKHR"/>
+ <type name="VkRefreshObjectKHR"/>
+ <type name="VkRefreshObjectFlagBitsKHR"/>
+ <type name="VkRefreshObjectFlagsKHR"/>
+ <command name="vkCmdRefreshObjectsKHR"/>
+ <command name="vkGetPhysicalDeviceRefreshableObjectTypesKHR"/>
</require>
</extension>
- <extension name="VK_QCOM_extension_310" number="310" author="QCOM" contact="Jeff Leger @jackohound" supported="disabled">
+ <extension name="VK_QCOM_extension_310" number="310" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
<require>
- <enum value="0" name="VK_QCOM_extension_310_SPEC_VERSION"/>
- <enum value="&quot;VK_QCOM_extension_310&quot;" name="VK_QCOM_extension_310_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RESERVED_QCOM"/>
+ <enum value="0" name="VK_QCOM_EXTENSION_310_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_310&quot;" name="VK_QCOM_EXTENSION_310_EXTENSION_NAME"/>
+ <enum bitpos="27" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_RESERVED_27_BIT_QCOM"/>
+ <enum bitpos="27" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_RESERVED_27_BIT_QCOM"/>
+ <enum bitpos="51" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_51_BIT_QCOM"/>
+ <enum bitpos="52" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_52_BIT_QCOM"/>
+ <enum bitpos="53" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_53_BIT_QCOM"/>
+ <enum bitpos="54" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_54_BIT_QCOM"/>
</require>
</extension>
- <extension name="VK_NV_extension_311" number="311" author="NV" contact="Charles Hansen @cshansen" supported="disabled">
+ <extension name="VK_NV_low_latency" number="311" author="NV" type="device" supported="vulkan" contact="Charles Hansen @cshansen" >
<require>
- <enum value="0" name="VK_NV_EXTENSION_311_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_311&quot;" name="VK_NV_EXTENSION_311_EXTENSION_NAME"/>
+ <enum value="1" name="VK_NV_LOW_LATENCY_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_low_latency&quot;" name="VK_NV_LOW_LATENCY_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUERY_LOW_LATENCY_SUPPORT_NV"/>
+ <type name="VkQueryLowLatencySupportNV"/>
</require>
</extension>
- <extension name="VK_EXT_extension_312" number="312" author="MVK" contact="Bill Hollings @billhollings" supported="disabled">
+ <extension name="VK_EXT_metal_objects" number="312" type="device" platform="metal" supported="vulkan" author="EXT" contact="Bill Hollings @billhollings">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_312_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_312&quot;" name="VK_EXT_EXTENSION_312_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_METAL_OBJECTS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_metal_objects&quot;" name="VK_EXT_METAL_OBJECTS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECT_CREATE_INFO_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_METAL_BUFFER_INFO_EXT"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_METAL_TEXTURE_INFO_EXT"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_METAL_IO_SURFACE_INFO_EXT"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT"/>
+ <enum offset="11" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT"/>
+ <type name="VkExportMetalObjectTypeFlagBitsEXT"/>
+ <type name="VkExportMetalObjectTypeFlagsEXT"/>
+ <type name="VkExportMetalObjectCreateInfoEXT"/>
+ <type name="VkExportMetalObjectsInfoEXT"/>
+ <type name="VkExportMetalDeviceInfoEXT"/>
+ <type name="VkExportMetalCommandQueueInfoEXT"/>
+ <type name="VkExportMetalBufferInfoEXT"/>
+ <type name="VkImportMetalBufferInfoEXT"/>
+ <type name="VkExportMetalTextureInfoEXT"/>
+ <type name="VkImportMetalTextureInfoEXT"/>
+ <type name="VkExportMetalIOSurfaceInfoEXT"/>
+ <type name="VkImportMetalIOSurfaceInfoEXT"/>
+ <type name="VkExportMetalSharedEventInfoEXT"/>
+ <type name="VkImportMetalSharedEventInfoEXT"/>
+ <type name="MTLDevice_id"/>
+ <type name="MTLCommandQueue_id"/>
+ <type name="MTLBuffer_id"/>
+ <type name="MTLTexture_id"/>
+ <type name="MTLSharedEvent_id"/>
+ <type name="IOSurfaceRef"/>
+ <command name="vkExportMetalObjectsEXT"/>
</require>
</extension>
<extension name="VK_EXT_extension_313" number="313" author="MVK" contact="Bill Hollings @billhollings" supported="disabled">
@@ -15815,23 +21275,24 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_314&quot;" name="VK_AMD_EXTENSION_314_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_synchronization2" number="315" type="device" author="KHR" requires="VK_KHR_get_physical_device_properties2" contact="Tobias Hector @tobski" supported="vulkan">
+ <extension name="VK_KHR_synchronization2" number="315" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Tobias Hector @tobski" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_synchronization2&quot;" name="VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR"/>
- <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR"/>
- <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR"/>
- <enum bitpos="0" extends="VkEventCreateFlagBits" name="VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR"/>
- <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR"/>
- <enum offset="1" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR"/>
- <enum value="0" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_NONE_KHR"/>
- <enum value="0" extends="VkAccessFlagBits" name="VK_ACCESS_NONE_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR" alias="VK_STRUCTURE_TYPE_MEMORY_BARRIER_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR" alias="VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR" alias="VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR" alias="VK_STRUCTURE_TYPE_DEPENDENCY_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_SUBMIT_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR" alias="VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR" alias="VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES"/>
+ <enum extends="VkEventCreateFlagBits" name="VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR" alias="VK_EVENT_CREATE_DEVICE_ONLY_BIT"/>
+ <enum extends="VkImageLayout" name="VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR" alias="VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL"/>
+ <enum extends="VkImageLayout" name="VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR" alias="VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL"/>
+ <enum extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_NONE_KHR" alias="VK_PIPELINE_STAGE_NONE"/>
+ <enum extends="VkAccessFlagBits" name="VK_ACCESS_NONE_KHR" alias="VK_ACCESS_NONE"/>
+ <type name="VkFlags64"/>
<type name="VkPipelineStageFlags2KHR"/>
<type name="VkPipelineStageFlagBits2KHR"/>
<type name="VkAccessFlags2KHR"/>
@@ -15853,64 +21314,68 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdWriteTimestamp2KHR"/>
<command name="vkQueueSubmit2KHR"/>
</require>
- <require extension="VK_EXT_transform_feedback">
- <enum bitpos="24" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT"/>
- <enum bitpos="25" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT"/>
- <enum bitpos="26" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT"/>
- <enum bitpos="27" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT"/>
+ <require depends="VK_EXT_transform_feedback">
+ <enum bitpos="24" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT"/>
+ <enum bitpos="25" extends="VkAccessFlagBits2" name="VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT"/>
+ <enum bitpos="26" extends="VkAccessFlagBits2" name="VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT"/>
+ <enum bitpos="27" extends="VkAccessFlagBits2" name="VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT"/>
</require>
- <require extension="VK_EXT_conditional_rendering">
- <enum bitpos="18" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT" comment="A pipeline stage for conditional rendering predicate fetch"/>
- <enum bitpos="20" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT" comment="read access flag for reading conditional rendering predicate"/>
+ <require depends="VK_EXT_conditional_rendering">
+ <enum bitpos="18" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT" comment="A pipeline stage for conditional rendering predicate fetch"/>
+ <enum bitpos="20" extends="VkAccessFlagBits2" name="VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT" comment="read access flag for reading conditional rendering predicate"/>
</require>
- <require extension="VK_NV_device_generated_commands">
- <enum bitpos="17" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV"/>
- <enum bitpos="17" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV"/>
- <enum bitpos="18" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV"/>
+ <require depends="VK_NV_device_generated_commands">
+ <enum bitpos="17" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV"/>
+ <enum bitpos="17" extends="VkAccessFlagBits2" name="VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV"/>
+ <enum bitpos="18" extends="VkAccessFlagBits2" name="VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV"/>
</require>
- <require extension="VK_KHR_fragment_shading_rate">
- <enum bitpos="22" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
- <enum bitpos="23" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR"/>
+ <require depends="VK_KHR_fragment_shading_rate">
+ <enum bitpos="22" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
+ <enum bitpos="23" extends="VkAccessFlagBits2" name="VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR"/>
</require>
- <require extension="VK_NV_shading_rate_image">
- <enum extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV" alias="VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
- <enum extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV" alias="VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR"/>
+ <require depends="VK_NV_shading_rate_image">
+ <enum extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV" alias="VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
+ <enum extends="VkAccessFlagBits2" name="VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV" alias="VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR"/>
</require>
- <require extension="VK_KHR_acceleration_structure">
- <enum bitpos="25" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"/>
- <enum bitpos="21" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR"/>
- <enum bitpos="22" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR"/>
+ <require depends="VK_KHR_acceleration_structure">
+ <enum bitpos="25" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"/>
+ <enum bitpos="21" extends="VkAccessFlagBits2" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR"/>
+ <enum bitpos="22" extends="VkAccessFlagBits2" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR"/>
</require>
- <require extension="VK_KHR_ray_tracing_pipeline">
- <enum bitpos="21" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR"/>
+ <require depends="VK_KHR_ray_tracing_pipeline">
+ <enum bitpos="21" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR"/>
</require>
- <require extension="VK_NV_ray_tracing">
- <enum extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV" alias="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR"/>
- <enum extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV" alias="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"/>
- <enum extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV" alias="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR"/>
- <enum extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV" alias="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR"/>
+ <require depends="VK_NV_ray_tracing">
+ <enum extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV" alias="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR"/>
+ <enum extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV" alias="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"/>
+ <enum extends="VkAccessFlagBits2" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV" alias="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR"/>
+ <enum extends="VkAccessFlagBits2" name="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV" alias="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR"/>
</require>
- <require extension="VK_EXT_fragment_density_map">
- <enum bitpos="23" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT"/>
- <enum bitpos="24" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT"/>
+ <require depends="VK_EXT_fragment_density_map">
+ <enum bitpos="23" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT"/>
+ <enum bitpos="24" extends="VkAccessFlagBits2" name="VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT"/>
</require>
- <require extension="VK_EXT_blend_operation_advanced">
- <enum bitpos="19" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT"/>
+ <require depends="VK_EXT_blend_operation_advanced">
+ <enum bitpos="19" extends="VkAccessFlagBits2" name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT"/>
</require>
- <require extension="VK_NV_mesh_shader">
- <enum bitpos="19" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV"/>
- <enum bitpos="20" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV"/>
+ <require depends="VK_NV_mesh_shader">
+ <enum extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV" alias="VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT"/>
+ <enum extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV" alias="VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT"/>
</require>
- <require extension="VK_AMD_buffer_marker">
+ <require depends="VK_AMD_buffer_marker">
<command name="vkCmdWriteBufferMarker2AMD"/>
</require>
- <require extension="VK_NV_device_diagnostic_checkpoints">
+ <require depends="VK_NV_device_diagnostic_checkpoints">
<type name="VkQueueFamilyCheckpointProperties2NV"/>
<type name="VkCheckpointData2NV"/>
<command name="vkGetQueueCheckpointData2NV"/>
<enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV"/>
<enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV"/>
</require>
+ <require depends="VK_EXT_mesh_shader">
+ <enum bitpos="19" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT"/>
+ <enum bitpos="20" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT"/>
+ </require>
</extension>
<extension name="VK_AMD_extension_316" number="316" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
<require>
@@ -15918,10 +21383,62 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_316&quot;" name="VK_AMD_EXTENSION_316_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_AMD_extension_317" number="317" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
- <require>
- <enum value="0" name="VK_AMD_EXTENSION_317_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_317&quot;" name="VK_AMD_EXTENSION_317_EXTENSION_NAME"/>
+ <extension name="VK_EXT_descriptor_buffer" number="317" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2+VK_KHR_buffer_device_address+VK_KHR_synchronization2+VK_EXT_descriptor_indexing" contact="Tobias Hector @tobski" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_DESCRIPTOR_BUFFER_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_descriptor_buffer&quot;" name="VK_EXT_DESCRIPTOR_BUFFER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_PROPERTIES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_DENSITY_MAP_PROPERTIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_ADDRESS_INFO_EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_GET_INFO_EXT"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_VIEW_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SAMPLER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OPAQUE_CAPTURE_DESCRIPTOR_DATA_CREATE_INFO_EXT"/>
+ <enum offset="11" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT"/>
+ <enum offset="12" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_PUSH_DESCRIPTOR_BUFFER_HANDLE_EXT"/>
+ <enum bitpos="4" extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <enum bitpos="5" extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT"/>
+ <enum bitpos="21" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <enum bitpos="22" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <enum bitpos="26" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <enum bitpos="5" extends="VkBufferCreateFlagBits" name="VK_BUFFER_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT"/>
+ <enum bitpos="16" extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT"/>
+ <enum bitpos="2" extends="VkImageViewCreateFlagBits" name="VK_IMAGE_VIEW_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT"/>
+ <enum bitpos="3" extends="VkSamplerCreateFlagBits" name="VK_SAMPLER_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT"/>
+ <enum bitpos="3" extends="VkAccelerationStructureCreateFlagBitsKHR" name="VK_ACCELERATION_STRUCTURE_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT"/>
+ <enum bitpos="41" extends="VkAccessFlagBits2" name="VK_ACCESS_2_DESCRIPTOR_BUFFER_READ_BIT_EXT"/>
+ <enum bitpos="29" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <type name="VkPhysicalDeviceDescriptorBufferPropertiesEXT"/>
+ <type name="VkPhysicalDeviceDescriptorBufferDensityMapPropertiesEXT"/>
+ <type name="VkPhysicalDeviceDescriptorBufferFeaturesEXT"/>
+ <type name="VkDescriptorAddressInfoEXT"/>
+ <type name="VkDescriptorBufferBindingInfoEXT"/>
+ <type name="VkDescriptorBufferBindingPushDescriptorBufferHandleEXT"/>
+ <type name="VkDescriptorDataEXT"/>
+ <type name="VkDescriptorGetInfoEXT"/>
+ <type name="VkBufferCaptureDescriptorDataInfoEXT"/>
+ <type name="VkImageCaptureDescriptorDataInfoEXT"/>
+ <type name="VkImageViewCaptureDescriptorDataInfoEXT"/>
+ <type name="VkSamplerCaptureDescriptorDataInfoEXT"/>
+ <type name="VkOpaqueCaptureDescriptorDataCreateInfoEXT"/>
+ <command name="vkGetDescriptorSetLayoutSizeEXT"/>
+ <command name="vkGetDescriptorSetLayoutBindingOffsetEXT"/>
+ <command name="vkGetDescriptorEXT"/>
+ <command name="vkCmdBindDescriptorBuffersEXT"/>
+ <command name="vkCmdSetDescriptorBufferOffsetsEXT"/>
+ <command name="vkCmdBindDescriptorBufferEmbeddedSamplersEXT"/>
+ <command name="vkGetBufferOpaqueCaptureDescriptorDataEXT"/>
+ <command name="vkGetImageOpaqueCaptureDescriptorDataEXT"/>
+ <command name="vkGetImageViewOpaqueCaptureDescriptorDataEXT"/>
+ <command name="vkGetSamplerOpaqueCaptureDescriptorDataEXT"/>
+ </require>
+ <require depends="VK_KHR_acceleration_structure,VK_NV_ray_tracing">
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT"/>
+ <type name="VkAccelerationStructureCaptureDescriptorDataInfoEXT"/>
+ <command name="vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT"/>
</require>
</extension>
<extension name="VK_AMD_extension_318" number="318" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
@@ -15944,27 +21461,43 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_AMD_extension_320&quot;" name="VK_AMD_EXTENSION_320_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_AMD_extension_321" number="321" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
+ <extension name="VK_EXT_graphics_pipeline_library" number="321" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_pipeline_library" author="AMD" contact="Tobias Hector @tobski" supported="vulkan">
<require>
- <enum value="0" name="VK_AMD_EXTENSION_321_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_321&quot;" name="VK_AMD_EXTENSION_321_EXTENSION_NAME"/>
- <enum bitpos="23" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RESERVED_23_BIT_AMD"/>
- <enum bitpos="10" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RESERVED_10_BIT_AMD"/>
+ <enum value="1" name="VK_EXT_GRAPHICS_PIPELINE_LIBRARY_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_graphics_pipeline_library&quot;" name="VK_EXT_GRAPHICS_PIPELINE_LIBRARY_EXTENSION_NAME"/>
+ <type name="VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT"/>
+ <type name="VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT"/>
+ <type name="VkGraphicsPipelineLibraryCreateInfoEXT"/>
+ <type name="VkGraphicsPipelineLibraryFlagBitsEXT"/>
+ <type name="VkGraphicsPipelineLibraryFlagsEXT"/>
+ <type name="VkPipelineLayoutCreateFlagBits"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT"/>
+ <enum bitpos="23" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT"/>
+ <enum bitpos="10" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT"/>
+ <enum bitpos="1" extends="VkPipelineLayoutCreateFlagBits" name="VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT"/>
</require>
</extension>
- <extension name="VK_AMD_extension_322" number="322" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
+ <extension name="VK_AMD_shader_early_and_late_fragment_tests" number="322" author="EXT" contact="Tobias Hector @tobski" type="device" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
- <enum value="0" name="VK_AMD_EXTENSION_322_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_322&quot;" name="VK_AMD_EXTENSION_322_EXTENSION_NAME"/>
+ <enum value="1" name="VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_shader_early_and_late_fragment_tests&quot;" name="VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_EXTENSION_NAME"/>
+ <type name="VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD"/>
</require>
</extension>
- <extension name="VK_AMD_extension_323" number="323" author="AMD" contact="Martin Dinkov @mdinkov" supported="disabled">
+ <extension name="VK_KHR_fragment_shader_barycentric" number="323" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Stu Smith" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_AMD_EXTENSION_323_SPEC_VERSION"/>
- <enum value="&quot;VK_AMD_extension_323&quot;" name="VK_AMD_EXTENSION_323_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_fragment_shader_barycentric&quot;" name="VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="204" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR"/>
+ <type name="VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR"/>
+ <type name="VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_shader_subgroup_uniform_control_flow" number="324" type="device" requiresCore="1.1" author="KHR" contact="Alan Baker @alan-baker" supported="vulkan">
+ <extension name="VK_KHR_shader_subgroup_uniform_control_flow" number="324" type="device" depends="VK_VERSION_1_1" author="KHR" contact="Alan Baker @alan-baker" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_shader_subgroup_uniform_control_flow&quot;" name="VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_EXTENSION_NAME"/>
@@ -15978,15 +21511,15 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_KHR_extension_325&quot;" name="VK_KHR_EXTENSION_325_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_zero_initialize_workgroup_memory" number="326" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alan Baker @alan-baker" supported="vulkan">
+ <extension name="VK_KHR_zero_initialize_workgroup_memory" number="326" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Alan Baker @alan-baker" supported="vulkan" promotedto="VK_VERSION_1_3" ratified="vulkan">
<require>
- <enum value="1" name="VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION"/>
+ <enum value="1" name="VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_zero_initialize_workgroup_memory&quot;" name="VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES"/>
<type name="VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_NV_fragment_shading_rate_enums" number="327" type="device" requires="VK_KHR_fragment_shading_rate" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
+ <extension name="VK_NV_fragment_shading_rate_enums" number="327" type="device" depends="VK_KHR_fragment_shading_rate" author="NV" contact="Pat Brown @nvpbrown" supported="vulkan">
<require>
<enum value="1" name="VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION"/>
<enum value="&quot;VK_NV_fragment_shading_rate_enums&quot;" name="VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME"/>
@@ -16001,8 +21534,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetFragmentShadingRateEnumNV"/>
</require>
</extension>
- <extension name="VK_NV_ray_tracing_motion_blur" number="328" type="device"
- requires="VK_KHR_ray_tracing_pipeline" author="NV" contact="Eric Werness" supported="vulkan">
+ <extension name="VK_NV_ray_tracing_motion_blur" number="328" type="device" depends="VK_KHR_ray_tracing_pipeline" author="NV" contact="Eric Werness" supported="vulkan">
<require>
<enum value="1" name="VK_NV_RAY_TRACING_MOTION_BLUR_SPEC_VERSION"/>
<enum value="&quot;VK_NV_ray_tracing_motion_blur&quot;" name="VK_NV_RAY_TRACING_MOTION_BLUR_EXTENSION_NAME"/>
@@ -16025,10 +21557,28 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkAccelerationStructureMotionInstanceFlagsNV"/>
</require>
</extension>
- <extension name="VK_NV_extension_329" number="329" author="NV" contact="Pat Brown @nvpbrown" supported="disabled">
+ <extension name="VK_EXT_mesh_shader" number="329" type="device" depends="VK_KHR_spirv_1_4" author="EXT" sortorder="1" contact="Christoph Kubisch @pixeljetstream" supported="vulkan">
<require>
- <enum value="0" name="VK_NV_EXTENSION_329_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_329&quot;" name="VK_NV_EXTENSION_329_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_MESH_SHADER_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_mesh_shader&quot;" name="VK_EXT_MESH_SHADER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT"/>
+ <enum bitpos="6" extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_TASK_BIT_EXT"/>
+ <enum bitpos="7" extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_MESH_BIT_EXT"/>
+ <enum bitpos="19" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT"/>
+ <enum bitpos="20" extends="VkPipelineStageFlagBits" name="VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT"/>
+ <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT"/>
+ <enum bitpos="11" extends="VkQueryPipelineStatisticFlagBits" name="VK_QUERY_PIPELINE_STATISTIC_TASK_SHADER_INVOCATIONS_BIT_EXT"/>
+ <enum bitpos="12" extends="VkQueryPipelineStatisticFlagBits" name="VK_QUERY_PIPELINE_STATISTIC_MESH_SHADER_INVOCATIONS_BIT_EXT"/>
+ <command name="vkCmdDrawMeshTasksEXT"/>
+ <command name="vkCmdDrawMeshTasksIndirectEXT"/>
+ <command name="vkCmdDrawMeshTasksIndirectCountEXT"/>
+ <type name="VkPhysicalDeviceMeshShaderFeaturesEXT"/>
+ <type name="VkPhysicalDeviceMeshShaderPropertiesEXT"/>
+ <type name="VkDrawMeshTasksIndirectCommandEXT"/>
+ </require>
+ <require depends="VK_NV_device_generated_commands">
+ <enum offset="0" extends="VkIndirectCommandsTokenTypeNV" name="VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV"/>
</require>
</extension>
<extension name="VK_NV_extension_330" number="330" author="NV" contact="Liam Middlebrook @liam-middlebrook" supported="disabled">
@@ -16037,15 +21587,20 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_NV_extension_330&quot;" name="VK_NV_EXTENSION_330_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_ycbcr_2plane_444_formats" number="331" type="device" requires="VK_KHR_sampler_ycbcr_conversion" author="EXT" contact="Tony Zlatinski @tzlatinski" supported="vulkan">
+ <extension name="VK_EXT_ycbcr_2plane_444_formats" number="331" type="device" depends="VK_KHR_sampler_ycbcr_conversion,VK_VERSION_1_1" author="EXT" contact="Tony Zlatinski @tzlatinski" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
+ <comment>
+ VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT and
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT
+ were not promoted to Vulkan 1.3.
+ </comment>
<enum value="1" name="VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_ycbcr_2plane_444_formats&quot;" name="VK_EXT_YCBCR_2PLANE_444_FORMATS_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT"/>
- <enum offset="0" extends="VkFormat" name="VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT"/>
- <enum offset="1" extends="VkFormat" name="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT"/>
- <enum offset="2" extends="VkFormat" name="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT"/>
- <enum offset="3" extends="VkFormat" name="VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT"/>
+ <enum extends="VkFormat" name="VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT" alias="VK_FORMAT_G8_B8R8_2PLANE_444_UNORM"/>
+ <enum extends="VkFormat" name="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT" alias="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16"/>
+ <enum extends="VkFormat" name="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT" alias="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16"/>
+ <enum extends="VkFormat" name="VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT" alias="VK_FORMAT_G16_B16R16_2PLANE_444_UNORM"/>
<type name="VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT"/>
</require>
</extension>
@@ -16055,7 +21610,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_NV_extension_332&quot;" name="VK_NV_EXTENSION_332_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_fragment_density_map2" number="333" type="device" requires="VK_EXT_fragment_density_map" author="EXT" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <extension name="VK_EXT_fragment_density_map2" number="333" type="device" depends="VK_EXT_fragment_density_map" author="EXT" contact="Matthew Netsch @mnetsch" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_fragment_density_map2&quot;" name="VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME"/>
@@ -16066,29 +21621,29 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceFragmentDensityMap2PropertiesEXT"/>
</require>
</extension>
- <extension name="VK_QCOM_rotated_copy_commands" number="334" type="device" requires="VK_KHR_swapchain,VK_KHR_copy_commands2" author="QCOM" contact="Jeff Leger @jackohound" supported="vulkan">
+ <extension name="VK_QCOM_rotated_copy_commands" number="334" type="device" depends="VK_KHR_copy_commands2" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
<require>
- <enum value="1" name="VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION"/>
+ <enum value="2" name="VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION"/>
<enum value="&quot;VK_QCOM_rotated_copy_commands&quot;" name="VK_QCOM_ROTATED_COPY_COMMANDS_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM"/>
<type name="VkCopyCommandTransformInfoQCOM"/>
</require>
</extension>
- <extension name="VK_KHR_extension_335" number="335" author="KHR" contact="Mark Bellamy @mark.bellamy_arm" supported="disabled">
+ <extension name="VK_KHR_extension_335" number="335" type="device" author="KHR" contact="Mark Bellamy @mark.bellamy_arm" supported="disabled">
<require>
<enum value="0" name="VK_KHR_EXTENSION_335_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_extension_335&quot;" name="VK_KHR_EXTENSION_335_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_image_robustness" number="336" type="device" author="EXT" contact="Graeme Leese @gnl21" supported="vulkan" requires="VK_KHR_get_physical_device_properties2">
+ <extension name="VK_EXT_image_robustness" number="336" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Graeme Leese @gnl21" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_image_robustness&quot;" name="VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES"/>
<type name="VkPhysicalDeviceImageRobustnessFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_KHR_workgroup_memory_explicit_layout" number="337" type="device" requires="VK_KHR_get_physical_device_properties2" author="KHR" contact="Caio Marcelo de Oliveira Filho @cmarcelo" supported="vulkan">
+ <extension name="VK_KHR_workgroup_memory_explicit_layout" number="337" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Caio Marcelo de Oliveira Filho @cmarcelo" supported="vulkan" ratified="vulkan">
<require>
<enum value="1" name="VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_workgroup_memory_explicit_layout&quot;" name="VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME"/>
@@ -16096,21 +21651,21 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_KHR_copy_commands2" number="338" author="KHR" type="device" contact="Jeff Leger @jackohound" supported="vulkan">
+ <extension name="VK_KHR_copy_commands2" number="338" author="KHR" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Jeff Leger @jackohound" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3" ratified="vulkan,vulkansc">
<require>
<enum value="1" name="VK_KHR_COPY_COMMANDS_2_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_copy_commands2&quot;" name="VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR"/>
- <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR"/>
- <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR"/>
- <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR"/>
- <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR"/>
- <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR"/>
- <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR"/>
- <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR"/>
- <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR" alias="VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR" alias="VK_STRUCTURE_TYPE_BUFFER_COPY_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR" alias="VK_STRUCTURE_TYPE_IMAGE_COPY_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR" alias="VK_STRUCTURE_TYPE_IMAGE_BLIT_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR" alias="VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR" alias="VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2"/>
<type name="VkCopyBufferInfo2KHR"/>
<type name="VkCopyImageInfo2KHR"/>
<type name="VkCopyBufferToImageInfo2KHR"/>
@@ -16130,39 +21685,86 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdResolveImage2KHR"/>
</require>
</extension>
- <extension name="VK_ARM_extension_339" number="339" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
+ <extension name="VK_EXT_image_compression_control" number="339" type="device" author="EXT" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
- <enum value="0" name="VK_ARM_EXTENSION_339_SPEC_VERSION"/>
- <enum value="&quot;VK_ARM_extension_339&quot;" name="VK_ARM_EXTENSION_339_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_IMAGE_COMPRESSION_CONTROL_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_image_compression_control&quot;" name="VK_EXT_IMAGE_COMPRESSION_CONTROL_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceImageCompressionControlFeaturesEXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT"/>
+ <type name="VkImageCompressionControlEXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT" alias="VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR"/>
+ <type name="VkSubresourceLayout2EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT" alias="VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR"/>
+ <type name="VkImageSubresource2EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT"/>
+ <type name="VkImageCompressionPropertiesEXT"/>
+ <type name="VkImageCompressionFlagBitsEXT"/>
+ <type name="VkImageCompressionFlagsEXT"/>
+ <type name="VkImageCompressionFixedRateFlagBitsEXT"/>
+ <type name="VkImageCompressionFixedRateFlagsEXT"/>
+ <enum offset="0" dir="-" extends="VkResult" name="VK_ERROR_COMPRESSION_EXHAUSTED_EXT"/>
+ <command name="vkGetImageSubresourceLayout2EXT"/>
</require>
</extension>
- <extension name="VK_EXT_extension_340" number="340" author="EXT" contact="Joshua Ashton @Joshua-Ashton" supported="disabled">
+ <extension name="VK_EXT_attachment_feedback_loop_layout" number="340" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Joshua Ashton @Joshua-Ashton" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_340_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_340&quot;" name="VK_EXT_EXTENSION_340_EXTENSION_NAME"/>
- <enum bitpos="19" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_19_BIT_EXT"/>
+ <enum value="2" name="VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_attachment_feedback_loop_layout&quot;" name="VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT"/>
+ <enum offset="0" extends="VkImageLayout" name="VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT"/>
+ <enum bitpos="19" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT"/>
+ <enum bitpos="25" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT"/>
+ <enum bitpos="26" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT"/>
+ <enum bitpos="3" extends="VkDependencyFlagBits" name="VK_DEPENDENCY_FEEDBACK_LOOP_BIT_EXT" comment="Dependency may be a feedback loop"/>
+ <type name="VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_4444_formats" number="341" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Joshua Ashton @Joshua-Ashton" supported="vulkan">
+ <extension name="VK_EXT_4444_formats" number="341" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Joshua Ashton @Joshua-Ashton" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
+ <comment>
+ VkPhysicalDevice4444FormatsFeaturesEXT and
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT
+ were not promoted to Vulkan 1.3.
+ </comment>
<enum value="1" name="VK_EXT_4444_FORMATS_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_4444_formats&quot;" name="VK_EXT_4444_FORMATS_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT"/>
- <enum offset="0" extends="VkFormat" name="VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT"/>
- <enum offset="1" extends="VkFormat" name="VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT"/>
+ <enum extends="VkFormat" name="VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT" alias="VK_FORMAT_A4R4G4B4_UNORM_PACK16"/>
+ <enum extends="VkFormat" name="VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT" alias="VK_FORMAT_A4B4G4R4_UNORM_PACK16"/>
<type name="VkPhysicalDevice4444FormatsFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_extension_342" number="342" author="EXT" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <extension name="VK_EXT_device_fault" number="342" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Ralph Potter gitlab:@r_potter" supported="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_342_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_342&quot;" name="VK_EXT_EXTENSION_342_EXTENSION_NAME"/>
+ <enum value="2" name="VK_EXT_DEVICE_FAULT_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_device_fault&quot;" name="VK_EXT_DEVICE_FAULT_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT"/>
+ <type name="VkPhysicalDeviceFaultFeaturesEXT"/>
+ <type name="VkDeviceFaultCountsEXT"/>
+ <type name="VkDeviceFaultInfoEXT"/>
+ <type name="VkDeviceFaultAddressInfoEXT"/>
+ <type name="VkDeviceFaultAddressTypeEXT"/>
+ <type name="VkDeviceFaultVendorInfoEXT"/>
+ <type name="VkDeviceFaultVendorBinaryHeaderVersionEXT"/>
+ <type name="VkDeviceFaultVendorBinaryHeaderVersionOneEXT"/>
+ <command name="vkGetDeviceFaultInfoEXT"/>
</require>
</extension>
- <extension name="VK_ARM_extension_343" number="343" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
+ <extension name="VK_ARM_rasterization_order_attachment_access" number="343" type="device" depends="VK_KHR_get_physical_device_properties2" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan" promotedto="VK_EXT_rasterization_order_attachment_access">
<require>
- <enum value="0" name="VK_ARM_EXTENSION_343_SPEC_VERSION"/>
- <enum value="&quot;VK_ARM_extension_343&quot;" name="VK_ARM_EXTENSION_343_EXTENSION_NAME"/>
+ <enum value="1" name="VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_rasterization_order_attachment_access&quot;" name="VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM"/>
+ <enum extends="VkPipelineColorBlendStateCreateFlagBits" name="VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM" alias="VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_EXT"/>
+ <enum extends="VkPipelineDepthStencilStateCreateFlagBits" name="VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM" alias="VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT"/>
+ <enum extends="VkPipelineDepthStencilStateCreateFlagBits" name="VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM" alias="VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT"/>
+ <enum extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_ARM" alias="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT"/>
+ <enum extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM" alias="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT"/>
+ <enum extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM" alias="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT"/>
</require>
</extension>
<extension name="VK_ARM_extension_344" number="344" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
@@ -16171,13 +21773,15 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_ARM_extension_344&quot;" name="VK_ARM_EXTENSION_344_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_ARM_extension_345" number="345" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
- <require>
- <enum value="0" name="VK_ARM_EXTENSION_345_SPEC_VERSION"/>
- <enum value="&quot;VK_ARM_extension_345&quot;" name="VK_ARM_EXTENSION_345_EXTENSION_NAME"/>
+ <extension name="VK_EXT_rgba10x6_formats" number="345" type="device" depends="VK_KHR_sampler_ycbcr_conversion" author="EXT" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_RGBA10X6_FORMATS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_rgba10x6_formats&quot;" name="VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_NV_acquire_winrt_display" number="346" type="device" requires="VK_EXT_direct_mode_display" author="NV" contact="Jeff Juliano @jjuliano" platform="win32" supported="vulkan">
+ <extension name="VK_NV_acquire_winrt_display" number="346" type="device" depends="VK_EXT_direct_mode_display" author="NV" contact="Jeff Juliano @jjuliano" platform="win32" supported="vulkan">
<require>
<enum value="1" name="VK_NV_ACQUIRE_WINRT_DISPLAY_SPEC_VERSION"/>
<enum value="&quot;VK_NV_acquire_winrt_display&quot;" name="VK_NV_ACQUIRE_WINRT_DISPLAY_EXTENSION_NAME"/>
@@ -16185,7 +21789,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetWinrtDisplayNV"/>
</require>
</extension>
- <extension name="VK_EXT_directfb_surface" number="347" type="instance" requires="VK_KHR_surface" platform="directfb" supported="vulkan" author="EXT" contact="Nicolas Caramelli @caramelli">
+ <extension name="VK_EXT_directfb_surface" number="347" type="instance" depends="VK_KHR_surface" platform="directfb" supported="vulkan" author="EXT" contact="Nicolas Caramelli @caramelli">
<require>
<enum value="1" name="VK_EXT_DIRECTFB_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_directfb_surface&quot;" name="VK_EXT_DIRECTFB_SURFACE_EXTENSION_NAME"/>
@@ -16196,7 +21800,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetPhysicalDeviceDirectFBPresentationSupportEXT"/>
</require>
</extension>
- <extension name="VK_KHR_extension_350" number="350" author="KHR" contact="Mark Bellamy @mark.bellamy_arm" supported="disabled">
+ <extension name="VK_KHR_extension_350" number="350" type="device" author="KHR" contact="Mark Bellamy @mark.bellamy_arm" supported="disabled">
<require>
<enum value="0" name="VK_KHR_EXTENSION_350_SPEC_VERSION"/>
<enum value="&quot;VK_KHR_extension_350&quot;" name="VK_KHR_EXTENSION_350_EXTENSION_NAME"/>
@@ -16208,21 +21812,21 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_NV_extension_351&quot;" name="VK_NV_EXTENSION_351_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_VALVE_mutable_descriptor_type" number="352" type="device" supported="vulkan" author="VALVE" contact="Joshua Ashton @Joshua-Ashton,Hans-Kristian Arntzen @HansKristian-Work" specialuse="d3demulation" requires="VK_KHR_maintenance3">
+ <extension name="VK_VALVE_mutable_descriptor_type" number="352" type="device" supported="vulkan" author="VALVE" contact="Joshua Ashton @Joshua-Ashton,Hans-Kristian Arntzen @HansKristian-Work" specialuse="d3demulation" depends="VK_KHR_maintenance3" promotedto="VK_EXT_mutable_descriptor_type">
<require>
<enum value="1" name="VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION"/>
<enum value="&quot;VK_VALVE_mutable_descriptor_type&quot;" name="VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE"/>
- <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE"/>
- <enum offset="0" extends="VkDescriptorType" name="VK_DESCRIPTOR_TYPE_MUTABLE_VALVE"/>
- <enum bitpos="2" extends="VkDescriptorPoolCreateFlagBits" name="VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE"/>
- <enum bitpos="2" extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE" alias="VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT"/>
+ <enum extends="VkDescriptorType" name="VK_DESCRIPTOR_TYPE_MUTABLE_VALVE" alias="VK_DESCRIPTOR_TYPE_MUTABLE_EXT"/>
+ <enum extends="VkDescriptorPoolCreateFlagBits" name="VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE" alias="VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT"/>
+ <enum extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE" alias="VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT"/>
<type name="VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE"/>
<type name="VkMutableDescriptorTypeListVALVE"/>
<type name="VkMutableDescriptorTypeCreateInfoVALVE"/>
</require>
</extension>
- <extension name="VK_EXT_vertex_input_dynamic_state" number="353" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <extension name="VK_EXT_vertex_input_dynamic_state" number="353" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan,vulkansc">
<require>
<enum value="2" name="VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_vertex_input_dynamic_state&quot;" name="VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME"/>
@@ -16236,7 +21840,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetVertexInputEXT"/>
</require>
</extension>
- <extension name="VK_EXT_physical_device_drm" number="354" author="EXT" type="device" contact="Simon Ser @emersion" supported="vulkan" requires="VK_KHR_get_physical_device_properties2">
+ <extension name="VK_EXT_physical_device_drm" number="354" author="EXT" type="device" contact="Simon Ser @emersion" supported="vulkan" depends="VK_KHR_get_physical_device_properties2">
<require>
<enum value="1" name="VK_EXT_PHYSICAL_DEVICE_DRM_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_physical_device_drm&quot;" name="VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME"/>
@@ -16246,19 +21850,31 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkPhysicalDeviceDrmPropertiesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_extension_355" number="355" author="EXT" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <extension name="VK_EXT_device_address_binding_report" number="355" type="device" depends="VK_KHR_get_physical_device_properties2+VK_EXT_debug_utils" author="EXT" contact="Ralph Potter gitlab:@r_potter" specialuse="debugging,devtools" supported="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_355_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_355&quot;" name="VK_EXT_EXTENSION_355_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_DEVICE_ADDRESS_BINDING_REPORT_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_device_address_binding_report&quot;" name="VK_EXT_DEVICE_ADDRESS_BINDING_REPORT_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ADDRESS_BINDING_REPORT_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_ADDRESS_BINDING_CALLBACK_DATA_EXT"/>
+ <enum bitpos="3" extends="VkDebugUtilsMessageTypeFlagBitsEXT" name="VK_DEBUG_UTILS_MESSAGE_TYPE_DEVICE_ADDRESS_BINDING_BIT_EXT"/>
+ <type name="VkPhysicalDeviceAddressBindingReportFeaturesEXT" />
+ <type name="VkDeviceAddressBindingCallbackDataEXT" />
+ <type name="VkDeviceAddressBindingFlagsEXT" />
+ <type name="VkDeviceAddressBindingFlagBitsEXT" />
+ <type name="VkDeviceAddressBindingTypeEXT" />
</require>
</extension>
- <extension name="VK_EXT_vertex_attribute_aliasing" number="356" type="device" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled" specialuse="glemulation">
+ <extension name="VK_EXT_depth_clip_control" number="356" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" specialuse="glemulation">
<require>
- <enum value="0" name="VK_EXT_VERTEX_ATTRIBUTE_ALIASING_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_vertex_attribute_aliasing&quot;" name="VK_EXT_VERTEX_ATTRIBUTE_ALIASING_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_DEPTH_CLIP_CONTROL_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_depth_clip_control&quot;" name="VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT"/>
+ <type name="VkPhysicalDeviceDepthClipControlFeaturesEXT"/>
+ <type name="VkPipelineViewportDepthClipControlCreateInfoEXT"/>
</require>
</extension>
- <extension name="VK_EXT_primitive_topology_list_restart" number="357" type="device" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" specialuse="glemulation">
+ <extension name="VK_EXT_primitive_topology_list_restart" number="357" type="device" author="EXT" contact="Shahbaz Youssefi @syoussefi" depends="VK_KHR_get_physical_device_properties2" supported="vulkan" specialuse="glemulation">
<require>
<enum value="1" name="VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_primitive_topology_list_restart&quot;" name="VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_EXTENSION_NAME"/>
@@ -16274,20 +21890,24 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_EXT_extension_359" number="359" author="EXT" contact="Bill Hollings @billhollings" supported="disabled" specialuse="glemulation">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_359"/>
- <enum value="&quot;VK_EXT_extension_359&quot;" name="VK_EXT_EXTENSION_359"/>
+ <enum value="0" name="VK_EXT_EXTENSION_359_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_359&quot;" name="VK_EXT_EXTENSION_359_EXTENSION_NAME"/>
</require>
</extension>
<extension name="VK_EXT_extension_360" number="360" author="EXT" contact="Bill Hollings @billhollings" supported="disabled" specialuse="glemulation">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_360"/>
- <enum value="&quot;VK_EXT_extension_360&quot;" name="VK_EXT_EXTENSION_360"/>
+ <enum value="0" name="VK_EXT_EXTENSION_360_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_360&quot;" name="VK_EXT_EXTENSION_360_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_extension_361" number="361" author="KHR" contact="Lionel Landwerlin @llandwerlin" supported="disabled">
+ <extension name="VK_KHR_format_feature_flags2" number="361" author="KHR" type="device" depends="VK_KHR_get_physical_device_properties2" contact="Lionel Landwerlin @llandwerlin" supported="vulkan" promotedto="VK_VERSION_1_3" ratified="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_361"/>
- <enum value="&quot;VK_EXT_extension_361&quot;" name="VK_EXT_EXTENSION_361"/>
+ <enum value="2" name="VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_format_feature_flags2&quot;" name="VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR" alias="VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3"/>
+ <type name="VkFormatFeatureFlags2KHR"/>
+ <type name="VkFormatFeatureFlagBits2KHR"/>
+ <type name="VkFormatProperties3KHR"/>
</require>
</extension>
<extension name="VK_EXT_extension_362" number="362" author="EXT" contact="Lionel Duc @nvlduc" supported="disabled">
@@ -16304,11 +21924,11 @@ typedef void <name>CAMetalLayer</name>;
</extension>
<extension name="VK_FUCHSIA_extension_364" number="364" author="FUCHSIA" contact="Craig Stout @cdotstout" supported="disabled">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_364_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_364&quot;" name="VK_EXT_EXTENSION_364_EXTENSION_NAME"/>
+ <enum value="0" name="VK_FUCHSIA_EXTENSION_364_SPEC_VERSION"/>
+ <enum value="&quot;VK_FUCHSIA_extension_364&quot;" name="VK_FUCHSIA_EXTENSION_364_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_FUCHSIA_external_memory" number="365" type="device" requires="VK_KHR_external_memory_capabilities,VK_KHR_external_memory" author="FUCHSIA" contact="John Rosasco @rosasco" platform="fuchsia" supported="vulkan">
+ <extension name="VK_FUCHSIA_external_memory" number="365" type="device" depends="VK_KHR_external_memory_capabilities+VK_KHR_external_memory" author="FUCHSIA" contact="John Rosasco @rosasco" platform="fuchsia" supported="vulkan">
<require>
<enum value="1" name="VK_FUCHSIA_EXTERNAL_MEMORY_SPEC_VERSION"/>
<enum value="&quot;VK_FUCHSIA_external_memory&quot;" name="VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME"/>
@@ -16323,7 +21943,7 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetMemoryZirconHandlePropertiesFUCHSIA"/>
</require>
</extension>
- <extension name="VK_FUCHSIA_external_semaphore" number="366" type="device" requires="VK_KHR_external_semaphore_capabilities,VK_KHR_external_semaphore" author="FUCHSIA" contact="John Rosasco @rosasco" platform="fuchsia" supported="vulkan">
+ <extension name="VK_FUCHSIA_external_semaphore" number="366" type="device" depends="VK_KHR_external_semaphore_capabilities+VK_KHR_external_semaphore" author="FUCHSIA" contact="John Rosasco @rosasco" platform="fuchsia" supported="vulkan">
<require>
<enum value="1" name="VK_FUCHSIA_EXTERNAL_SEMAPHORE_SPEC_VERSION"/>
<enum value="&quot;VK_FUCHSIA_external_semaphore&quot;" name="VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME"/>
@@ -16336,16 +21956,49 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetSemaphoreZirconHandleFUCHSIA"/>
</require>
</extension>
- <extension name="VK_FUCHSIA_extension_367" number="367" author="FUCHSIA" contact="Craig Stout @cdotstout" supported="disabled">
- <require>
- <enum value="0" name="VK_EXT_EXTENSION_367_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_367&quot;" name="VK_EXT_EXTENSION_367_EXTENSION_NAME"/>
+ <extension name="VK_FUCHSIA_buffer_collection" number="367" type="device" depends="VK_FUCHSIA_external_memory+VK_KHR_sampler_ycbcr_conversion" author="FUCHSIA" contact="John Rosasco @rosasco" supported="vulkan" platform="fuchsia">
+ <require>
+ <enum value="2" name="VK_FUCHSIA_BUFFER_COLLECTION_SPEC_VERSION"/>
+ <enum value="&quot;VK_FUCHSIA_buffer_collection&quot;" name="VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA" comment="VkBufferCollectionFUCHSIA"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES_FUCHSIA"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA"/>
+ <type name="VkBufferCollectionFUCHSIA"/>
+ <type name="VkBufferCollectionCreateInfoFUCHSIA"/>
+ <type name="VkImportMemoryBufferCollectionFUCHSIA"/>
+ <type name="VkBufferCollectionImageCreateInfoFUCHSIA"/>
+ <type name="VkBufferConstraintsInfoFUCHSIA"/>
+ <type name="VkBufferCollectionBufferCreateInfoFUCHSIA"/>
+ <type name="VkBufferCollectionPropertiesFUCHSIA"/>
+ <type name="VkImageFormatConstraintsFlagsFUCHSIA" comment="Will add VkImageFormatConstraintsFlagBitsFUCHSIA when bits are defined in the future"/>
+ <type name="VkSysmemColorSpaceFUCHSIA"/>
+ <type name="VkImageConstraintsInfoFlagBitsFUCHSIA"/>
+ <type name="VkImageConstraintsInfoFlagsFUCHSIA"/>
+ <type name="VkImageConstraintsInfoFUCHSIA"/>
+ <type name="VkImageFormatConstraintsInfoFUCHSIA"/>
+ <type name="VkBufferCollectionConstraintsInfoFUCHSIA"/>
+ <command name="vkCreateBufferCollectionFUCHSIA"/>
+ <command name="vkSetBufferCollectionImageConstraintsFUCHSIA"/>
+ <command name="vkSetBufferCollectionBufferConstraintsFUCHSIA"/>
+ <command name="vkDestroyBufferCollectionFUCHSIA"/>
+ <command name="vkGetBufferCollectionPropertiesFUCHSIA"/>
+ </require>
+ <require depends="VK_EXT_debug_report">
+ <enum offset="0" extends="VkDebugReportObjectTypeEXT" name="VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA_EXT"/>
</require>
</extension>
<extension name="VK_FUCHSIA_extension_368" number="368" author="FUCHSIA" contact="Craig Stout @cdotstout" supported="disabled">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_368_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_368&quot;" name="VK_EXT_EXTENSION_368_EXTENSION_NAME"/>
+ <enum value="0" name="VK_FUCHSIA_EXTENSION_368_SPEC_VERSION"/>
+ <enum value="&quot;VK_FUCHSIA_extension_368&quot;" name="VK_FUCHSIA_EXTENSION_368_EXTENSION_NAME"/>
</require>
</extension>
<extension name="VK_QCOM_extension_369" number="369" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
@@ -16355,15 +22008,16 @@ typedef void <name>CAMetalLayer</name>;
<enum bitpos="4" extends="VkDescriptorBindingFlagBits" name="VK_DESCRIPTOR_BINDING_RESERVED_4_BIT_QCOM"/>
</require>
</extension>
- <extension name="VK_HUAWEI_subpass_shading" number="370" type="device" author="HUAWEI" contact="Hueilong Wang @wyvernathuawei" requires="VK_KHR_create_renderpass2,VK_KHR_synchronization2" supported="vulkan">
+ <extension name="VK_HUAWEI_subpass_shading" number="370" type="device" author="HUAWEI" contact="Pan Gao @PanGao-h" depends="VK_KHR_create_renderpass2+VK_KHR_synchronization2" supported="vulkan">
<require>
- <enum value="2" name="VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION"/>
+ <enum value="3" name="VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION"/>
<enum value="&quot;VK_HUAWEI_subpass_shading&quot;" name="VK_HUAWEI_SUBPASS_SHADING_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBPASS_SHADING_PIPELINE_CREATE_INFO_HUAWEI"/>
<enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI"/>
<enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_PROPERTIES_HUAWEI"/>
<enum offset="3" extends="VkPipelineBindPoint" extnumber="370" name="VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI"/>
- <enum bitpos="39" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI"/>
+ <enum bitpos="39" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI"/>
+ <enum extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI" alias="VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI" deprecated="aliased"/>
<enum bitpos="14" extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_SUBPASS_SHADING_BIT_HUAWEI"/>
<type name="VkSubpassShadingPipelineCreateInfoHUAWEI"/>
<type name="VkPhysicalDeviceSubpassShadingFeaturesHUAWEI"/>
@@ -16372,19 +22026,19 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSubpassShadingHUAWEI"/>
</require>
</extension>
- <extension name="VK_HUAWEI_invocation_mask" number="371" type="device" requires="VK_KHR_ray_tracing_pipeline,VK_KHR_synchronization2" author="Huawei" contact="Yunpeng Zhu @yunxingzhu" supported="vulkan">
+ <extension name="VK_HUAWEI_invocation_mask" number="371" type="device" depends="VK_KHR_ray_tracing_pipeline+VK_KHR_synchronization2" author="Huawei" contact="Pan Gao @PanGao-h" supported="vulkan">
<require>
<enum value="1" name="VK_HUAWEI_INVOCATION_MASK_SPEC_VERSION"/>
<enum value="&quot;VK_HUAWEI_invocation_mask&quot;" name="VK_HUAWEI_INVOCATION_MASK_EXTENSION_NAME"/>
<enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI"/>
- <enum bitpos="39" extends="VkAccessFlagBits2KHR" name="VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI"/>
+ <enum bitpos="39" extends="VkAccessFlagBits2" name="VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI"/>
<enum bitpos="18" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_INVOCATION_MASK_BIT_HUAWEI"/>
- <enum bitpos="40" extends="VkPipelineStageFlagBits2KHR" name="VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI"/>
+ <enum bitpos="40" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI"/>
<type name="VkPhysicalDeviceInvocationMaskFeaturesHUAWEI"/>
<command name="vkCmdBindInvocationMaskHUAWEI"/>
</require>
</extension>
- <extension name="VK_NV_external_memory_rdma" number="372" type="device" requires="VK_KHR_external_memory" author="NV" contact="Carsten Rohde @crohde" supported="vulkan">
+ <extension name="VK_NV_external_memory_rdma" number="372" type="device" depends="VK_KHR_external_memory" author="NV" contact="Carsten Rohde @crohde" supported="vulkan">
<require>
<enum value="1" name="VK_NV_EXTERNAL_MEMORY_RDMA_SPEC_VERSION"/>
<enum value="&quot;VK_NV_external_memory_rdma&quot;" name="VK_NV_EXTERNAL_MEMORY_RDMA_EXTENSION_NAME"/>
@@ -16398,60 +22052,119 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkGetMemoryRemoteAddressNV"/>
</require>
</extension>
- <extension name="VK_NV_extension_373" number="373" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
- <require>
- <enum value="0" name="VK_NV_EXTENSION_373_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_373&quot;" name="VK_NV_EXTENSION_373_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_NV_extension_374" number="374" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
- <require>
- <enum value="0" name="VK_NV_EXTENSION_374_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_374&quot;" name="VK_NV_EXTENSION_374_EXTENSION_NAME"/>
- <enum bitpos="4" extends="VkExternalFenceHandleTypeFlagBits" name="VK_EXTERNAL_FENCE_HANDLE_TYPE_RESERVED_4_BIT_NV"/>
- <enum bitpos="5" extends="VkExternalFenceHandleTypeFlagBits" name="VK_EXTERNAL_FENCE_HANDLE_TYPE_RESERVED_5_BIT_NV"/>
- <enum bitpos="5" extends="VkExternalSemaphoreHandleTypeFlagBits" name="VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_RESERVED_5_BIT_NV"/>
- <enum bitpos="6" extends="VkExternalSemaphoreHandleTypeFlagBits" name="VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_RESERVED_6_BIT_NV"/>
- </require>
- </extension>
- <extension name="VK_NV_extension_375" number="375" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
- <require>
- <enum value="0" name="VK_NV_EXTENSION_375_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_375&quot;" name="VK_NV_EXTENSION_375_EXTENSION_NAME"/>
- <enum bitpos="13" extends="VkExternalMemoryHandleTypeFlagBits" name="VK_EXTERNAL_MEMORY_HANDLE_TYPE_RESERVED_13_BIT_NV"/>
- </require>
- </extension>
- <extension name="VK_EXT_extension_376" number="376" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled">
- <require>
- <enum value="0" name="VK_EXT_EXTENSION_376_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_376&quot;" name="VK_EXT_EXTENSION_376_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_EXT_extension_377" number="377" author="EXT" contact="Hugues Evrard @hevrard" supported="disabled">
- <require>
- <enum value="0" name="VK_EXT_EXTENSION_377_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_377&quot;" name="VK_EXT_EXTENSION_377_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_EXT_extended_dynamic_state2" number="378" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan">
+ <extension name="VK_EXT_pipeline_properties" number="373" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Mukund Keshava @mkeshavanv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_PIPELINE_PROPERTIES_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_pipeline_properties&quot;" name="VK_EXT_PIPELINE_PROPERTIES_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_PROPERTIES_IDENTIFIER_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR"/>
+ <type name="VkPipelineInfoEXT"/>
+ <type name="VkPipelinePropertiesIdentifierEXT"/>
+ <type name="VkPhysicalDevicePipelinePropertiesFeaturesEXT"/>
+ <command name="vkGetPipelinePropertiesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_external_sci_sync" number="374" depends="VK_VERSION_1_1" platform="sci" type="device" author="NV" contact="Kai Zhang @kazhang" supported="vulkansc" deprecatedby="VK_NV_external_sci_sync2">
+ <require>
+ <enum value="2" name="VK_NV_EXTERNAL_SCI_SYNC_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_external_sci_sync&quot;" name="VK_NV_EXTERNAL_SCI_SYNC_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_FENCE_SCI_SYNC_INFO_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_FENCE_SCI_SYNC_INFO_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_FENCE_GET_SCI_SYNC_INFO_NV"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SCI_SYNC_ATTRIBUTES_INFO_NV"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_SCI_SYNC_INFO_NV"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_SCI_SYNC_INFO_NV"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SEMAPHORE_GET_SCI_SYNC_INFO_NV"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SCI_SYNC_FEATURES_NV"/>
+ <enum bitpos="4" extends="VkExternalFenceHandleTypeFlagBits" name="VK_EXTERNAL_FENCE_HANDLE_TYPE_SCI_SYNC_OBJ_BIT_NV"/>
+ <enum bitpos="5" extends="VkExternalFenceHandleTypeFlagBits" name="VK_EXTERNAL_FENCE_HANDLE_TYPE_SCI_SYNC_FENCE_BIT_NV"/>
+ <enum bitpos="5" extends="VkExternalSemaphoreHandleTypeFlagBits" name="VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SCI_SYNC_OBJ_BIT_NV"/>
+ <type name="VkSciSyncClientTypeNV"/>
+ <type name="VkSciSyncPrimitiveTypeNV"/>
+ <type name="VkExportFenceSciSyncInfoNV"/>
+ <type name="VkImportFenceSciSyncInfoNV"/>
+ <type name="VkFenceGetSciSyncInfoNV"/>
+ <type name="VkSciSyncAttributesInfoNV"/>
+ <type name="VkExportSemaphoreSciSyncInfoNV"/>
+ <type name="VkImportSemaphoreSciSyncInfoNV"/>
+ <type name="VkSemaphoreGetSciSyncInfoNV"/>
+ <type name="VkPhysicalDeviceExternalSciSyncFeaturesNV"/>
+ <command name="vkGetFenceSciSyncFenceNV"/>
+ <command name="vkGetFenceSciSyncObjNV"/>
+ <command name="vkImportFenceSciSyncFenceNV"/>
+ <command name="vkImportFenceSciSyncObjNV"/>
+ <command name="vkGetPhysicalDeviceSciSyncAttributesNV"/>
+ <command name="vkGetSemaphoreSciSyncObjNV"/>
+ <command name="vkImportSemaphoreSciSyncObjNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_external_memory_sci_buf" number="375" depends="VK_VERSION_1_1" platform="sci" type="device" author="NV" contact="Kai Zhang @kazhang" supported="vulkansc">
+ <require>
+ <enum value="2" name="VK_NV_EXTERNAL_MEMORY_SCI_BUF_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_external_memory_sci_buf&quot;" name="VK_NV_EXTERNAL_MEMORY_SCI_BUF_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_MEMORY_SCI_BUF_INFO_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_MEMORY_SCI_BUF_INFO_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_GET_SCI_BUF_INFO_NV"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MEMORY_SCI_BUF_PROPERTIES_NV"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCI_BUF_FEATURES_NV"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SCI_BUF_FEATURES_NV" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCI_BUF_FEATURES_NV"/>
+ <enum bitpos="13" extends="VkExternalMemoryHandleTypeFlagBits" name="VK_EXTERNAL_MEMORY_HANDLE_TYPE_SCI_BUF_BIT_NV"/>
+ <type name="VkExportMemorySciBufInfoNV"/>
+ <type name="VkImportMemorySciBufInfoNV"/>
+ <type name="VkMemoryGetSciBufInfoNV"/>
+ <type name="VkMemorySciBufPropertiesNV"/>
+ <type name="VkPhysicalDeviceExternalMemorySciBufFeaturesNV"/>
+ <type name="VkPhysicalDeviceExternalSciBufFeaturesNV"/>
+ <command name="vkGetMemorySciBufNV"/>
+ <command name="vkGetPhysicalDeviceExternalMemorySciBufPropertiesNV"/>
+ <command name="vkGetPhysicalDeviceSciBufAttributesNV"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_frame_boundary" number="376" type="device" author="EXT" contact="James Fitzpatrick @jamesfitzpatrick" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_FRAME_BOUNDARY_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_frame_boundary&quot;" name="VK_EXT_FRAME_BOUNDARY_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAME_BOUNDARY_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_FRAME_BOUNDARY_EXT"/>
+ <type name="VkPhysicalDeviceFrameBoundaryFeaturesEXT"/>
+ <type name="VkFrameBoundaryEXT"/>
+ <type name="VkFrameBoundaryFlagBitsEXT"/>
+ <type name="VkFrameBoundaryFlagsEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_multisampled_render_to_single_sampled" number="377" type="device" depends="VK_KHR_create_renderpass2+VK_KHR_depth_stencil_resolve" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_multisampled_render_to_single_sampled&quot;" name="VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBPASS_RESOLVE_PERFORMANCE_QUERY_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT"/>
+ <enum bitpos="18" extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_BIT_EXT"/>
+ <type name="VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT"/>
+ <type name="VkSubpassResolvePerformanceQueryEXT"/>
+ <type name="VkMultisampledRenderToSingleSampledInfoEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extended_dynamic_state2" number="378" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan,vulkansc" promotedto="VK_VERSION_1_3">
<require>
<enum value="1" name="VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extended_dynamic_state2&quot;" name="VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT"/>
- <enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT"/>
- <enum offset="1" extends="VkDynamicState" name="VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT"/>
- <enum offset="2" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT"/>
- <enum offset="3" extends="VkDynamicState" name="VK_DYNAMIC_STATE_LOGIC_OP_EXT"/>
- <enum offset="4" extends="VkDynamicState" name="VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT"/>
- <type name="VkPhysicalDeviceExtendedDynamicState2FeaturesEXT"/>
- <command name="vkCmdSetPatchControlPointsEXT"/>
+ <enum value="&quot;VK_EXT_extended_dynamic_state2&quot;" name="VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT" comment="Not promoted to 1.3"/>
+ <enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT" comment="Not promoted to 1.3"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT" alias="VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT" alias="VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE"/>
+ <enum offset="3" extends="VkDynamicState" name="VK_DYNAMIC_STATE_LOGIC_OP_EXT" comment="Not promoted to 1.3"/>
+ <enum extends="VkDynamicState" name="VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT" alias="VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE"/>
+ <type name="VkPhysicalDeviceExtendedDynamicState2FeaturesEXT" comment="Not promoted to 1.3"/>
+ <command name="vkCmdSetPatchControlPointsEXT" comment="Not promoted to 1.3"/>
<command name="vkCmdSetRasterizerDiscardEnableEXT"/>
<command name="vkCmdSetDepthBiasEnableEXT"/>
- <command name="vkCmdSetLogicOpEXT"/>
+ <command name="vkCmdSetLogicOpEXT" comment="Not promoted to 1.3"/>
<command name="vkCmdSetPrimitiveRestartEnableEXT"/>
</require>
</extension>
- <extension name="VK_QNX_screen_surface" number="379" type="instance" requires="VK_KHR_surface" platform="screen" author="QNX" contact="Mike Gorchak @mgorchak-blackberry" supported="vulkan">
+ <extension name="VK_QNX_screen_surface" number="379" type="instance" depends="VK_KHR_surface" platform="screen" author="QNX" contact="Mike Gorchak @mgorchak-blackberry" supported="vulkan">
<require>
<enum value="1" name="VK_QNX_SCREEN_SURFACE_SPEC_VERSION"/>
<enum value="&quot;VK_QNX_screen_surface&quot;" name="VK_QNX_SCREEN_SURFACE_EXTENSION_NAME"/>
@@ -16474,7 +22187,7 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_KHR_extension_381&quot;" name="VK_KHR_EXTENSION_381_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_color_write_enable" number="382" type="device" requires="VK_KHR_get_physical_device_properties2" author="EXT" contact="Sharif Elcott @selcott" supported="vulkan">
+ <extension name="VK_EXT_color_write_enable" number="382" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="EXT" contact="Sharif Elcott @selcott" supported="vulkan,vulkansc">
<require>
<enum value="1" name="VK_EXT_COLOR_WRITE_ENABLE_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_color_write_enable&quot;" name="VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME"/>
@@ -16486,10 +22199,13 @@ typedef void <name>CAMetalLayer</name>;
<command name="vkCmdSetColorWriteEnableEXT"/>
</require>
</extension>
- <extension name="VK_EXT_extension_383" number="383" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled">
+ <extension name="VK_EXT_primitives_generated_query" number="383" type="device" depends="VK_EXT_transform_feedback" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" specialuse="glemulation">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_383_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_383&quot;" name="VK_EXT_EXTENSION_383_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_PRIMITIVES_GENERATED_QUERY_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_primitives_generated_query&quot;" name="VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT"/>
+ <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT"/>
+ <type name="VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT"/>
</require>
</extension>
<extension name="VK_EXT_extension_384" number="384" type="instance" author="EXT" contact="Chia-I Wu @olvaffe1" supported="disabled">
@@ -16510,10 +22226,25 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_GOOGLE_extension_386&quot;" name="VK_GOOGLE_EXTENSION_386_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_KHR_extension_387" number="387" author="KHR" contact="Daniel Koch @dgkoch" supported="disabled">
+ <extension name="VK_KHR_ray_tracing_maintenance1" number="387" type="device" depends="VK_KHR_acceleration_structure" author="KHR" contact="Daniel Koch @dgkoch" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_387_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_387&quot;" name="VK_KHR_EXTENSION_387_EXTENSION_NAME"/>
+ <enum value="1" name="VK_KHR_RAY_TRACING_MAINTENANCE_1_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_ray_tracing_maintenance1&quot;" name="VK_KHR_RAY_TRACING_MAINTENANCE_1_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR"/>
+ <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR"/>
+ <enum offset="1" extends="VkQueryType" name="VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR"/>
+ <type name="VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR"/>
+ </require>
+ <require depends="VK_KHR_synchronization2">
+ <!-- VkPipelineStageFlagBits bitpos="28" is reserved by this extension, but not used -->
+ <enum bitpos="28" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_synchronization2+VK_KHR_ray_tracing_pipeline">
+ <enum bitpos="40" extends="VkAccessFlagBits2" name="VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_ray_tracing_pipeline">
+ <type name="VkTraceRaysIndirectCommand2KHR"/>
+ <command name="vkCmdTraceRaysIndirect2KHR"/>
</require>
</extension>
<extension name="VK_EXT_extension_388" number="388" author="EXT" contact="Alan Baker @alan-baker" supported="disabled">
@@ -16522,12 +22253,12 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_EXT_extension_388&quot;" name="VK_EXT_EXTENSION_388_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_global_priority_query" number="389" type="device" requires="VK_EXT_global_priority,VK_KHR_get_physical_device_properties2" author="EXT" contact="Yiwei Zhang @zhangyiwei" supported="vulkan">
+ <extension name="VK_EXT_global_priority_query" number="389" type="device" depends="VK_EXT_global_priority+VK_KHR_get_physical_device_properties2" author="EXT" contact="Yiwei Zhang @zhangyiwei" supported="vulkan" promotedto="VK_KHR_global_priority">
<require>
<enum value="1" name="VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_global_priority_query&quot;" name="VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME"/>
- <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT"/>
- <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT" alias="VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR"/>
<enum name="VK_MAX_GLOBAL_PRIORITY_SIZE_EXT"/>
<type name="VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT"/>
<type name="VkQueueFamilyGlobalPriorityPropertiesEXT"/>
@@ -16545,13 +22276,17 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_EXT_extension_391&quot;" name="VK_EXT_EXTENSION_391_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_extension_392" number="392" author="EXT" contact="Joshua Ashton @Joshua-Ashton" supported="disabled">
+ <extension name="VK_EXT_image_view_min_lod" number="392" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Joshua Ashton @Joshua-Ashton" supported="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_392_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_392&quot;" name="VK_EXT_EXTENSION_392_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_IMAGE_VIEW_MIN_LOD_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_image_view_min_lod&quot;" name="VK_EXT_IMAGE_VIEW_MIN_LOD_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT"/>
+ <type name="VkPhysicalDeviceImageViewMinLodFeaturesEXT"/>
+ <type name="VkImageViewMinLodCreateInfoEXT"/>
</require>
</extension>
- <extension name="VK_EXT_multi_draw" number="393" author="EXT" contact="Mike Blumenkrantz @zmike" type="device" supported="vulkan">
+ <extension name="VK_EXT_multi_draw" number="393" author="EXT" contact="Mike Blumenkrantz @zmike" type="device" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
<enum value="1" name="VK_EXT_MULTI_DRAW_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_multi_draw&quot;" name="VK_EXT_MULTI_DRAW_EXTENSION_NAME"/>
@@ -16565,34 +22300,112 @@ typedef void <name>CAMetalLayer</name>;
<type name="VkMultiDrawIndexedInfoEXT"/>
</require>
</extension>
- <extension name="VK_EXT_extension_394" number="394" author="EXT" contact="Mike Blumenkrantz @zmike" type="device" supported="disabled">
- <require>
- <enum value="0" name="VK_EXT_EXTENSION_394_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_394&quot;" name="VK_EXT_EXTENSION_394_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_KHR_extension_395" number="395" author="KHR" contact="Lenny Komow @lkomow" supported="disabled">
- <require>
- <enum value="0" name="VK_KHR_EXTENSION_395_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_395&quot;" name="VK_KHR_EXTENSION_395_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_KHR_extension_396" number="396" author="EXT" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
- <require>
- <enum value="0" name="VK_KHR_EXTENSION_396_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_396&quot;" name="VK_KHR_EXTENSION_396_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_NV_extension_397" number="397" author="NV" contact="Christoph Kubisch @pixeljetstream" supported="disabled">
- <require>
- <enum value="0" name="VK_NV_EXTENSION_397_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_397&quot;" name="VK_NV_EXTENSION_397_EXTENSION_NAME"/>
- </require>
- </extension>
- <extension name="VK_NV_extension_398" number="398" author="NV" contact="Christoph Kubisch @pixeljetstream" supported="disabled">
- <require>
- <enum value="0" name="VK_NV_EXTENSION_398_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_398&quot;" name="VK_NV_EXTENSION_398_EXTENSION_NAME"/>
+ <extension name="VK_EXT_image_2d_view_of_3d" number="394" depends="VK_KHR_maintenance1+VK_KHR_get_physical_device_properties2" author="EXT" contact="Mike Blumenkrantz @zmike" specialuse="glemulation" type="device" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_IMAGE_2D_VIEW_OF_3D_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_image_2d_view_of_3d&quot;" name="VK_EXT_IMAGE_2D_VIEW_OF_3D_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceImage2DViewOf3DFeaturesEXT"/>
+ <enum extends="VkImageCreateFlagBits" bitpos="17" name="VK_IMAGE_CREATE_2D_VIEW_COMPATIBLE_BIT_EXT" comment="Image is created with a layout where individual slices are capable of being used as 2D images"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_portability_enumeration" number="395" author="KHR" contact="Charles Giessen @charles-lunarg" type="instance" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_portability_enumeration&quot;" name="VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME"/>
+ <enum bitpos="0" extends="VkInstanceCreateFlagBits" name="VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_shader_tile_image" number="396" type="device" author="EXT" depends="VK_VERSION_1_3" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_SHADER_TILE_IMAGE_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_shader_tile_image&quot;" name="VK_EXT_SHADER_TILE_IMAGE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT"/>
+ <type name="VkPhysicalDeviceShaderTileImageFeaturesEXT"/>
+ <type name="VkPhysicalDeviceShaderTileImagePropertiesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_opacity_micromap" number="397" type="device" depends="VK_KHR_acceleration_structure+VK_KHR_synchronization2" author="EXT" contact="Christoph Kubisch @pixeljetstream, Eric Werness" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="2" name="VK_EXT_OPACITY_MICROMAP_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_opacity_micromap&quot;" name="VK_EXT_OPACITY_MICROMAP_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MICROMAP_BUILD_INFO_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MICROMAP_VERSION_INFO_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_MICROMAP_INFO_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_MICROMAP_TO_MEMORY_INFO_EXT"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COPY_MEMORY_TO_MICROMAP_INFO_EXT"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_FEATURES_EXT"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_PROPERTIES_EXT"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MICROMAP_CREATE_INFO_EXT"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MICROMAP_BUILD_SIZES_INFO_EXT"/>
+ <enum offset="9" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT"/>
+ <enum bitpos="30" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT"/>
+ <enum bitpos="44" extends="VkAccessFlagBits2" name="VK_ACCESS_2_MICROMAP_READ_BIT_EXT"/>
+ <enum bitpos="45" extends="VkAccessFlagBits2" name="VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT"/>
+ <enum offset="0" extends="VkQueryType" name="VK_QUERY_TYPE_MICROMAP_SERIALIZATION_SIZE_EXT"/>
+ <enum offset="1" extends="VkQueryType" name="VK_QUERY_TYPE_MICROMAP_COMPACTED_SIZE_EXT"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_MICROMAP_EXT"/>
+ <enum bitpos="23" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT"/>
+ <enum bitpos="24" extends="VkBufferUsageFlagBits" name="VK_BUFFER_USAGE_MICROMAP_STORAGE_BIT_EXT"/>
+ <enum bitpos="24" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT"/>
+ <enum bitpos="4" extends="VkGeometryInstanceFlagBitsKHR" name="VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_EXT"/>
+ <enum bitpos="5" extends="VkGeometryInstanceFlagBitsKHR" name="VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_EXT"/>
+ <enum bitpos="6" extends="VkBuildAccelerationStructureFlagBitsKHR" name="VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_UPDATE_EXT"/>
+ <enum bitpos="7" extends="VkBuildAccelerationStructureFlagBitsKHR" name="VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_EXT"/>
+ <enum bitpos="8" extends="VkBuildAccelerationStructureFlagBitsKHR" name="VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_DATA_UPDATE_EXT"/>
+ <type name="VkMicromapTypeEXT"/>
+ <type name="VkMicromapBuildInfoEXT"/>
+ <type name="VkMicromapUsageEXT"/>
+ <type name="VkMicromapCreateInfoEXT"/>
+ <type name="VkMicromapEXT"/>
+ <type name="VkBuildMicromapFlagBitsEXT"/>
+ <type name="VkBuildMicromapFlagsEXT"/>
+ <type name="VkCopyMicromapModeEXT"/>
+ <type name="VkPhysicalDeviceOpacityMicromapFeaturesEXT"/>
+ <type name="VkPhysicalDeviceOpacityMicromapPropertiesEXT"/>
+ <type name="VkMicromapVersionInfoEXT"/>
+ <type name="VkCopyMicromapToMemoryInfoEXT"/>
+ <type name="VkCopyMemoryToMicromapInfoEXT"/>
+ <type name="VkCopyMicromapInfoEXT"/>
+ <type name="VkMicromapCreateFlagBitsEXT"/>
+ <type name="VkMicromapCreateFlagsEXT"/>
+ <type name="VkBuildMicromapModeEXT"/>
+ <type name="VkMicromapBuildSizesInfoEXT"/>
+ <type name="VkOpacityMicromapFormatEXT"/>
+ <type name="VkAccelerationStructureTrianglesOpacityMicromapEXT"/>
+ <type name="VkMicromapTriangleEXT"/>
+ <type name="VkOpacityMicromapSpecialIndexEXT"/>
+ <command name="vkCreateMicromapEXT"/>
+ <command name="vkDestroyMicromapEXT"/>
+ <command name="vkCmdBuildMicromapsEXT"/>
+ <command name="vkBuildMicromapsEXT"/>
+ <command name="vkCopyMicromapEXT"/>
+ <command name="vkCopyMicromapToMemoryEXT"/>
+ <command name="vkCopyMemoryToMicromapEXT"/>
+ <command name="vkWriteMicromapsPropertiesEXT"/>
+ <command name="vkCmdCopyMicromapEXT"/>
+ <command name="vkCmdCopyMicromapToMemoryEXT"/>
+ <command name="vkCmdCopyMemoryToMicromapEXT"/>
+ <command name="vkCmdWriteMicromapsPropertiesEXT"/>
+ <command name="vkGetDeviceMicromapCompatibilityEXT"/>
+ <command name="vkGetMicromapBuildSizesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_displacement_micromap" number="398" type="device" depends="VK_EXT_opacity_micromap" author="NV" contact="Christoph Kubisch @pixeljetstream, Eric Werness @ewerness-nv" supported="vulkan" provisional="true" platform="provisional">
+ <require>
+ <enum value="2" name="VK_NV_DISPLACEMENT_MICROMAP_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_displacement_micromap&quot;" name="VK_NV_DISPLACEMENT_MICROMAP_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum bitpos="28" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum bitpos="9" extends="VkBuildAccelerationStructureFlagBitsKHR" name="VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISPLACEMENT_MICROMAP_UPDATE_NV" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum offset="0" extends="VkMicromapTypeEXT" name="VK_MICROMAP_TYPE_DISPLACEMENT_MICROMAP_NV" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <type name="VkPhysicalDeviceDisplacementMicromapFeaturesNV"/>
+ <type name="VkPhysicalDeviceDisplacementMicromapPropertiesNV"/>
+ <type name="VkAccelerationStructureTrianglesDisplacementMicromapNV"/>
+ <type name="VkDisplacementMicromapFormatNV"/>
</require>
</extension>
<extension name="VK_JUICE_extension_399" number="399" author="JUICE" contact="Dean Beeler @canadacow" supported="disabled">
@@ -16607,12 +22420,12 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_JUICE_extension_400&quot;" name="VK_JUICE_EXTENSION_400_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_EXT_load_store_op_none" number="401" author="EXT" type="device" contact="Shahbaz Youssefi @syoussefi" supported="vulkan">
+ <extension name="VK_EXT_load_store_op_none" number="401" author="EXT" type="device" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" promotedto="VK_KHR_load_store_op_none" ratified="vulkan">
<require>
<enum value="1" name="VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_load_store_op_none&quot;" name="VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME"/>
- <enum offset="0" extends="VkAttachmentLoadOp" name="VK_ATTACHMENT_LOAD_OP_NONE_EXT"/>
- <enum offset="0" extends="VkAttachmentStoreOp" extnumber="302" name="VK_ATTACHMENT_STORE_OP_NONE_EXT"/>
+ <enum value="&quot;VK_EXT_load_store_op_none&quot;" name="VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME"/>
+ <enum extends="VkAttachmentLoadOp" name="VK_ATTACHMENT_LOAD_OP_NONE_EXT" alias="VK_ATTACHMENT_LOAD_OP_NONE_KHR"/>
+ <enum extends="VkAttachmentStoreOp" name="VK_ATTACHMENT_STORE_OP_NONE_EXT" alias="VK_ATTACHMENT_STORE_OP_NONE"/>
</require>
</extension>
<extension name="VK_FB_extension_402" number="402" author="FB" contact="Artem Bolgar @artyom17" supported="disabled">
@@ -16633,10 +22446,21 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_FB_extension_404&quot;" name="VK_FB_EXTENSION_404_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_HUAWEI_extension_405" number="405" author="HUAWEI" contact="Hueilong Wang @wyvernathuawei" supported="disabled">
+ <extension name="VK_HUAWEI_cluster_culling_shader" number="405" type="device" depends="VK_KHR_get_physical_device_properties2" author="HUAWEI" contact="Yuchang Wang @richard_Wang2" supported="vulkan">
<require>
- <enum value="0" name="VK_HUAWEI_EXTENSION_405_SPEC_VERSION"/>
- <enum value="&quot;VK_HUAWEI_extension_405&quot;" name="VK_HUAWEI_EXTENSION_405_EXTENSION_NAME"/>
+ <enum value="3" name="VK_HUAWEI_CLUSTER_CULLING_SHADER_SPEC_VERSION"/>
+ <enum value="&quot;VK_HUAWEI_cluster_culling_shader&quot;" name="VK_HUAWEI_CLUSTER_CULLING_SHADER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_VRS_FEATURES_HUAWEI"/>
+ <enum bitpos="41" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ <enum bitpos="19" extends="VkShaderStageFlagBits" name="VK_SHADER_STAGE_CLUSTER_CULLING_BIT_HUAWEI"/>
+ <enum bitpos="13" extends="VkQueryPipelineStatisticFlagBits" name="VK_QUERY_PIPELINE_STATISTIC_CLUSTER_CULLING_SHADER_INVOCATIONS_BIT_HUAWEI"/>
+ <command name="vkCmdDrawClusterHUAWEI"/>
+ <command name="vkCmdDrawClusterIndirectHUAWEI"/>
+ <type name="VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI"/>
+ <type name="VkPhysicalDeviceClusterCullingShaderPropertiesHUAWEI"/>
+ <type name="VkPhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI"/>
</require>
</extension>
<extension name="VK_HUAWEI_extension_406" number="406" author="HUAWEI" contact="Hueilong Wang @wyvernathuawei" supported="disabled">
@@ -16675,22 +22499,41 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_GGP_extension_411&quot;" name="VK_GGP_EXTENSION_411_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_NV_extension_412" number="412" author="NV" contact="Piers Daniell @pdaniell-nv" supported="disabled">
+ <extension name="VK_EXT_border_color_swizzle" number="412" type="device" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan" depends="VK_EXT_custom_border_color" specialuse="glemulation,d3demulation">
<require>
- <enum value="0" name="VK_NV_EXTENSION_412_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_412&quot;" name="VK_NV_EXTENSION_412_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_BORDER_COLOR_SWIZZLE_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_border_color_swizzle&quot;" name="VK_EXT_BORDER_COLOR_SWIZZLE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT"/>
+ <type name="VkPhysicalDeviceBorderColorSwizzleFeaturesEXT"/>
+ <type name="VkSamplerBorderColorComponentMappingCreateInfoEXT"/>
</require>
</extension>
- <extension name="VK_NV_extension_413" number="413" author="NV" contact="Piers Daniell @pdaniell-nv" supported="disabled">
+ <extension name="VK_EXT_pageable_device_local_memory" number="413" author="EXT" contact="Piers Daniell @pdaniell-nv" type="device" depends="VK_EXT_memory_priority" supported="vulkan">
<require>
- <enum value="0" name="VK_NV_EXTENSION_413_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_413&quot;" name="VK_NV_EXTENSION_413_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_pageable_device_local_memory&quot;" name="VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT"/>
+ <type name="VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT"/>
+ <command name="vkSetDeviceMemoryPriorityEXT"/>
</require>
</extension>
- <extension name="VK_NV_extension_414" number="414" author="NV" contact="Piers Daniell @pdaniell-nv" supported="disabled">
+ <extension name="VK_KHR_maintenance4" number="414" type="device" depends="VK_VERSION_1_1" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="vulkan" promotedto="VK_VERSION_1_3" ratified="vulkan">
<require>
- <enum value="0" name="VK_NV_EXTENSION_414_SPEC_VERSION"/>
- <enum value="&quot;VK_NV_extension_414&quot;" name="VK_NV_EXTENSION_414_EXTENSION_NAME"/>
+ <enum value="2" name="VK_KHR_MAINTENANCE_4_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_maintenance4&quot;" name="VK_KHR_MAINTENANCE_4_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR" alias="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR" alias="VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR" alias="VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS"/>
+ <enum extends="VkImageAspectFlagBits" name="VK_IMAGE_ASPECT_NONE_KHR" alias="VK_IMAGE_ASPECT_NONE"/>
+ <type name="VkPhysicalDeviceMaintenance4FeaturesKHR"/>
+ <type name="VkPhysicalDeviceMaintenance4PropertiesKHR"/>
+ <type name="VkDeviceBufferMemoryRequirementsKHR"/>
+ <type name="VkDeviceImageMemoryRequirementsKHR"/>
+ <command name="vkGetDeviceBufferMemoryRequirementsKHR"/>
+ <command name="vkGetDeviceImageMemoryRequirementsKHR"/>
+ <command name="vkGetDeviceImageSparseMemoryRequirementsKHR"/>
</require>
</extension>
<extension name="VK_HUAWEI_extension_415" number="415" author="HUAWEI" contact="Hueilong Wang @wyvernathuawei" supported="disabled">
@@ -16699,60 +22542,3145 @@ typedef void <name>CAMetalLayer</name>;
<enum value="&quot;VK_HUAWEI_extension_415&quot;" name="VK_HUAWEI_EXTENSION_415_EXTENSION_NAME"/>
</require>
</extension>
- <extension name="VK_ARM_extension_416" number="416" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
+ <extension name="VK_ARM_shader_core_properties" number="416" type="device" depends="VK_VERSION_1_1" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan">
<require>
- <enum value="0" name="VK_ARM_EXTENSION_416_SPEC_VERSION"/>
- <enum value="&quot;VK_ARM_extension_416&quot;" name="VK_ARM_EXTENSION_416_EXTENSION_NAME"/>
+ <enum value="1" name="VK_ARM_SHADER_CORE_PROPERTIES_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_shader_core_properties&quot;" name="VK_ARM_SHADER_CORE_PROPERTIES_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM"/>
+ <type name="VkPhysicalDeviceShaderCorePropertiesARM"/>
</require>
</extension>
- <extension name="VK_KHR_extension_417" number="417" author="KHR" contact="Kevin Petit @kevinpetit" supported="disabled">
+ <extension name="VK_KHR_shader_subgroup_rotate" number="417" author="KHR" contact="Kevin Petit @kpet" type="device" supported="vulkan" ratified="vulkan">
<require>
- <enum value="0" name="VK_ARM_EXTENSION_417_SPEC_VERSION"/>
- <enum value="&quot;VK_ARM_extension_417&quot;" name="VK_ARM_EXTENSION_417_EXTENSION_NAME"/>
+ <enum value="2" name="VK_KHR_SHADER_SUBGROUP_ROTATE_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_shader_subgroup_rotate&quot;" name="VK_KHR_SHADER_SUBGROUP_ROTATE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR"/>
+ <enum bitpos="9" extends="VkSubgroupFeatureFlagBits" name="VK_SUBGROUP_FEATURE_ROTATE_BIT_KHR"/>
+ <enum bitpos="10" extends="VkSubgroupFeatureFlagBits" name="VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT_KHR"/>
+ <type name="VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR"/>
</require>
</extension>
- <extension name="VK_ARM_extension_418" number="418" author="ARM" contact="Kevin Petit @kevinpetit" supported="disabled">
+ <extension name="VK_ARM_scheduling_controls" number="418" author="ARM" contact="Kevin Petit @kpet" type="device" depends="VK_ARM_shader_core_builtins" supported="vulkan">
<require>
- <enum value="0" name="VK_ARM_EXTENSION_418_SPEC_VERSION"/>
- <enum value="&quot;VK_ARM_extension_418&quot;" name="VK_ARM_EXTENSION_418_EXTENSION_NAME"/>
+ <enum value="1" name="VK_ARM_SCHEDULING_CONTROLS_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_scheduling_controls&quot;" name="VK_ARM_SCHEDULING_CONTROLS_EXTENSION_NAME"/>
+ <enum extends="VkStructureType" offset="0" name="VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM"/>
+ <enum extends="VkStructureType" offset="1" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM"/>
+ <enum extends="VkStructureType" offset="2" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM"/>
+ <type name="VkDeviceQueueShaderCoreControlCreateInfoARM"/>
+ <type name="VkPhysicalDeviceSchedulingControlsFeaturesARM"/>
+ <type name="VkPhysicalDeviceSchedulingControlsPropertiesARM"/>
+ <type name="VkPhysicalDeviceSchedulingControlsFlagsARM"/>
+ <type name="VkPhysicalDeviceSchedulingControlsFlagBitsARM"/>
</require>
</extension>
- <extension name="VK_EXT_extension_419" number="419" author="EXT" contact="Mike Blumenkrantz @zmike" type="device" supported="disabled">
+ <extension name="VK_EXT_image_sliced_view_of_3d" number="419" depends="VK_KHR_maintenance1+VK_KHR_get_physical_device_properties2" author="EXT" contact="Mike Blumenkrantz @zmike" specialuse="d3demulation" type="device" supported="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_419_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_419&quot;" name="VK_EXT_EXTENSION_419_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_IMAGE_SLICED_VIEW_OF_3D_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_image_sliced_view_of_3d&quot;" name="VK_EXT_IMAGE_SLICED_VIEW_OF_3D_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_VIEW_SLICED_CREATE_INFO_EXT"/>
+ <enum name="VK_REMAINING_3D_SLICES_EXT"/>
+ <type name="VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT"/>
+ <type name="VkImageViewSlicedCreateInfoEXT"/>
</require>
</extension>
<extension name="VK_EXT_extension_420" number="420" author="EXT" contact="Mike Blumenkrantz @zmike" type="device" supported="disabled">
<require>
<enum value="0" name="VK_EXT_EXTENSION_420_SPEC_VERSION"/>
<enum value="&quot;VK_EXT_extension_420&quot;" name="VK_EXT_EXTENSION_420_EXTENSION_NAME"/>
+ <enum bitpos="4" extends="VkSwapchainCreateFlagBitsKHR" name="VK_SWAPCHAIN_CREATE_RESERVED_4_BIT_EXT"/>
</require>
</extension>
- <extension name="VK_KHR_extension_421" number="421" author="KHR" contact="Hans-Kristian Arntzen @HansKristian-Work" supported="disabled">
+ <extension name="VK_VALVE_descriptor_set_host_mapping" number="421" type="device" author="VALVE" contact="Hans-Kristian Arntzen @HansKristian-Work" specialuse="d3demulation" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
- <enum value="0" name="VK_KHR_EXTENSION_421_SPEC_VERSION"/>
- <enum value="&quot;VK_KHR_extension_421&quot;" name="VK_KHR_EXTENSION_421_EXTENSION_NAME"/>
+ <enum value="1" name="VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_SPEC_VERSION"/>
+ <enum value="&quot;VK_VALVE_descriptor_set_host_mapping&quot;" name="VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE"/>
+ <type name="VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE"/>
+ <type name="VkDescriptorSetBindingReferenceVALVE"/>
+ <type name="VkDescriptorSetLayoutHostMappingInfoVALVE"/>
+ <command name="vkGetDescriptorSetLayoutHostMappingInfoVALVE"/>
+ <command name="vkGetDescriptorSetHostMappingVALVE"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_depth_clamp_zero_one" number="422" author="EXT" type="device" contact="Graeme Leese @gnl21" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_DEPTH_CLAMP_ZERO_ONE_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_depth_clamp_zero_one&quot;" name="VK_EXT_DEPTH_CLAMP_ZERO_ONE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceDepthClampZeroOneFeaturesEXT" />
</require>
</extension>
- <extension name="VK_EXT_extension_422" number="422" author="EXT" contact="Graeme Leese @gnl21" supported="disabled">
+ <extension name="VK_EXT_non_seamless_cube_map" number="423" author="EXT" type="device" contact="Georg Lehmann @DadSchoorse" specialuse="d3demulation,glemulation" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
<require>
- <enum value="0" name="VK_EXT_EXTENSION_422_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_extension_422&quot;" name="VK_EXT_EXTENSION_422_EXTENSION_NAME"/>
+ <enum value="1" name="VK_EXT_NON_SEAMLESS_CUBE_MAP_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_non_seamless_cube_map&quot;" name="VK_EXT_NON_SEAMLESS_CUBE_MAP_EXTENSION_NAME"/>
+ <enum bitpos="2" extends="VkSamplerCreateFlagBits" name="VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT"/>
</require>
</extension>
- <extension name="VK_EXT_disable_cube_map_wrap" number="423" author="EXT" contact="Georg Lehmann @DadSchoorse" supported="disabled">
+ <extension name="VK_ARM_extension_424" number="424" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="disabled">
<require>
- <enum value="0" name="VK_EXT_DISABLE_CUBE_MAP_WRAP_SPEC_VERSION"/>
- <enum value="&quot;VK_EXT_disable_cube_map_wrap&quot;" name="VK_EXT_DISABLE_CUBE_MAP_WRAP_EXTENSION_NAME"/>
- <enum bitpos="2" extends="VkSamplerCreateFlagBits" name="VK_SAMPLER_CREATE_RESERVED_2_BIT_EXT"/>
+ <enum value="0" name="VK_ARM_EXTENSION_424_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_extension_424&quot;" name="VK_ARM_EXTENSION_424_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_ARM_render_pass_striped" number="425" type="device" depends="VK_KHR_get_physical_device_properties2,VK_KHR_synchronization2" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_ARM_RENDER_PASS_STRIPED_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_render_pass_striped&quot;" name="VK_ARM_RENDER_PASS_STRIPED_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_PROPERTIES_ARM"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_BEGIN_INFO_ARM"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_INFO_ARM"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_SUBMIT_INFO_ARM"/>
+ <type name="VkPhysicalDeviceRenderPassStripedFeaturesARM"/>
+ <type name="VkPhysicalDeviceRenderPassStripedPropertiesARM"/>
+ <type name="VkRenderPassStripeBeginInfoARM"/>
+ <type name="VkRenderPassStripeInfoARM"/>
+ <type name="VkRenderPassStripeSubmitInfoARM"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_fragment_density_map_offset" number="426" type="device" depends="VK_KHR_get_physical_device_properties2+VK_EXT_fragment_density_map" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_fragment_density_map_offset&quot;" name="VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM"/>
+ <enum bitpos="15" extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_QCOM"/>
+ <type name="VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM"/>
+ <type name="VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM"/>
+ <type name="VkSubpassFragmentDensityMapOffsetEndInfoQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_copy_memory_indirect" number="427" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_buffer_device_address" author="NV" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_COPY_MEMORY_INDIRECT_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_copy_memory_indirect&quot;" name="VK_NV_COPY_MEMORY_INDIRECT_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_NV"/>
+ <type name="VkCopyMemoryIndirectCommandNV"/>
+ <type name="VkCopyMemoryToImageIndirectCommandNV"/>
+ <type name="VkPhysicalDeviceCopyMemoryIndirectFeaturesNV"/>
+ <type name="VkPhysicalDeviceCopyMemoryIndirectPropertiesNV"/>
+ <command name="vkCmdCopyMemoryIndirectNV"/>
+ <command name="vkCmdCopyMemoryToImageIndirectNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_memory_decompression" number="428" type="device" depends="VK_KHR_get_physical_device_properties2+VK_KHR_buffer_device_address" author="NV" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_MEMORY_DECOMPRESSION_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_memory_decompression&quot;" name="VK_NV_MEMORY_DECOMPRESSION_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_NV"/>
+ <type name="VkMemoryDecompressionMethodFlagBitsNV"/>
+ <type name="VkMemoryDecompressionMethodFlagsNV"/>
+ <type name="VkDecompressMemoryRegionNV"/>
+ <type name="VkPhysicalDeviceMemoryDecompressionFeaturesNV"/>
+ <type name="VkPhysicalDeviceMemoryDecompressionPropertiesNV"/>
+ <command name="vkCmdDecompressMemoryNV"/>
+ <command name="vkCmdDecompressMemoryIndirectCountNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_device_generated_commands_compute" number="429" type="device" depends="VK_NV_device_generated_commands" author="NV" contact="Vikram Kushwaha @vkushwaha-nv" supported="vulkan">
+ <require>
+ <enum value="2" name="VK_NV_DEVICE_GENERATED_COMMANDS_COMPUTE_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_device_generated_commands_compute&quot;" name="VK_NV_DEVICE_GENERATED_COMMANDS_COMPUTE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_INDIRECT_BUFFER_INFO_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_INDIRECT_DEVICE_ADDRESS_INFO_NV"/>
+ <enum offset="3" extends="VkIndirectCommandsTokenTypeNV" name="VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NV"/>
+ <enum offset="4" extends="VkIndirectCommandsTokenTypeNV" name="VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NV"/>
+ <enum bitpos="7" extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_INDIRECT_BINDABLE_BIT_NV"/>
+ <type name="VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV"/>
+ <type name="VkComputePipelineIndirectBufferInfoNV"/>
+ <type name="VkPipelineIndirectDeviceAddressInfoNV"/>
+ <type name="VkBindPipelineIndirectCommandNV"/>
+ <command name="vkGetPipelineIndirectMemoryRequirementsNV"/>
+ <command name="vkCmdUpdatePipelineIndirectBufferNV"/>
+ <command name="vkGetPipelineIndirectDeviceAddressNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_430" number="430" author="NV" contact="Vikram Kushwaha @vkushwaha-nv" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_430_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_430&quot;" name="VK_NV_EXTENSION_430_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_linear_color_attachment" number="431" type="device" author="NVIDIA" contact="sourav parmar @souravpNV" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_LINEAR_COLOR_ATTACHMENT_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_linear_color_attachment&quot;" name="VK_NV_LINEAR_COLOR_ATTACHMENT_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV"/>
+ <type name="VkPhysicalDeviceLinearColorAttachmentFeaturesNV"/>
+ </require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="38" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV" comment="Format support linear image as render target, it cannot be mixed with non linear attachment"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_432" number="432" author="NV" contact="Sourav Parmar @souravpNV" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_432_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_432&quot;" name="VK_NV_EXTENSION_432_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_433" number="433" author="NV" contact="Sourav Parmar @souravpNV" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_433_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_433&quot;" name="VK_NV_EXTENSION_433_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_GOOGLE_surfaceless_query" number="434" type="instance" depends="VK_KHR_surface" author="GOOGLE" contact="Shahbaz Youssefi @syoussefi" specialuse="glemulation" supported="vulkan">
+ <require>
+ <enum value="2" name="VK_GOOGLE_SURFACELESS_QUERY_SPEC_VERSION"/>
+ <enum value="&quot;VK_GOOGLE_surfaceless_query&quot;" name="VK_GOOGLE_SURFACELESS_QUERY_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_shader_maximal_reconvergence" number="435" type="device" author="KHR" depends="VK_VERSION_1_1" contact="Alan Baker @alan-baker" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_SHADER_MAXIMAL_RECONVERGENCE_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_shader_maximal_reconvergence&quot;" name="VK_KHR_SHADER_MAXIMAL_RECONVERGENCE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR"/>
+ <type name="VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_application_parameters" number="436" type="instance" author="EXT" contact="Daniel Koch @dgkoch" supported="vulkansc">
+ <require>
+ <enum value="1" name="VK_EXT_APPLICATION_PARAMETERS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_application_parameters&quot;" name="VK_EXT_APPLICATION_PARAMETERS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_APPLICATION_PARAMETERS_EXT"/>
+ <type name="VkApplicationParametersEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_437" number="437" author="EXT" contact="Jonathan Weinstein @Jonathan-Weinstein" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_437_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_437&quot;" name="VK_EXT_EXTENSION_437_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_image_compression_control_swapchain" number="438" type="device" depends="VK_EXT_image_compression_control" author="EXT" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_image_compression_control_swapchain&quot;" name="VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_SEC_extension_439" number="439" author="SEC" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <require>
+ <enum value="0" name="VK_SEC_EXTENSION_439_SPEC_VERSION"/>
+ <enum value="&quot;VK_SEC_extension_439&quot;" name="VK_SEC_EXTENSION_439_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_extension_440" number="440" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_QCOM_EXTENSION_440_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_440&quot;" name="VK_QCOM_EXTENSION_440_EXTENSION_NAME"/>
+ <enum bitpos="7" extends="VkQueueFlagBits" name="VK_QUEUE_RESERVED_7_BIT_QCOM"/>
+ <enum bitpos="1" extends="VkDeviceQueueCreateFlagBits" name="VK_DEVICE_QUEUE_CREATE_RESERVED_1_BIT_QCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_image_processing" number="441" type="device" depends="VK_KHR_format_feature_flags2" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_IMAGE_PROCESSING_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_image_processing&quot;" name="VK_QCOM_IMAGE_PROCESSING_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM"/>
+ <enum bitpos="4" extends="VkSamplerCreateFlagBits" name="VK_SAMPLER_CREATE_IMAGE_PROCESSING_BIT_QCOM"/>
+ <enum bitpos="20" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_SAMPLE_WEIGHT_BIT_QCOM"/>
+ <enum bitpos="21" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_SAMPLE_BLOCK_MATCH_BIT_QCOM"/>
+ <enum offset="0" extends="VkDescriptorType" name="VK_DESCRIPTOR_TYPE_SAMPLE_WEIGHT_IMAGE_QCOM"/>
+ <enum offset="1" extends="VkDescriptorType" name="VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM"/>
+ <type name="VkImageViewSampleWeightCreateInfoQCOM"/>
+ <type name="VkPhysicalDeviceImageProcessingFeaturesQCOM"/>
+ <type name="VkPhysicalDeviceImageProcessingPropertiesQCOM"/>
+ </require>
+ <require depends="VK_KHR_format_feature_flags2">
+ <enum bitpos="34" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_WEIGHT_IMAGE_BIT_QCOM"/>
+ <enum bitpos="35" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_WEIGHT_SAMPLED_IMAGE_BIT_QCOM"/>
+ <enum bitpos="36" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_BLOCK_MATCHING_BIT_QCOM"/>
+ <enum bitpos="37" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_BOX_FILTER_SAMPLED_BIT_QCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_COREAVI_extension_442" number="442" author="COREAVI" contact="Aidan Fabius @afabius" supported="disabled">
+ <require>
+ <enum value="0" name="VK_COREAVI_EXTENSION_442_SPEC_VERSION"/>
+ <enum value="&quot;VK_COREAVI_extension_442&quot;" name="VK_COREAVI_EXTENSION_442_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_COREAVI_extension_443" number="443" author="COREAVI" contact="Aidan Fabius @afabius" supported="disabled">
+ <require>
+ <enum value="0" name="VK_COREAVI_EXTENSION_443_SPEC_VERSION"/>
+ <enum value="&quot;VK_COREAVI_extension_443&quot;" name="VK_COREAVI_EXTENSION_443_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_COREAVI_extension_444" number="444" author="COREAVI" contact="Aidan Fabius @afabius" supported="disabled">
+ <require>
+ <enum value="0" name="VK_COREAVI_EXTENSION_444_SPEC_VERSION"/>
+ <enum value="&quot;VK_COREAVI_extension_444&quot;" name="VK_COREAVI_EXTENSION_444_EXTENSION_NAME"/>
+ <enum extends="VkCommandPoolResetFlagBits" bitpos="1" name="VK_COMMAND_POOL_RESET_RESERVED_1_BIT_COREAVI"/>
+ </require>
+ </extension>
+ <extension name="VK_COREAVI_extension_445" number="445" author="COREAVI" contact="Aidan Fabius @afabius" supported="disabled">
+ <require>
+ <enum value="0" name="VK_COREAVI_EXTENSION_445_SPEC_VERSION"/>
+ <enum value="&quot;VK_COREAVI_extension_445&quot;" name="VK_COREAVI_EXTENSION_445_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_COREAVI_extension_446" number="446" author="COREAVI" contact="Aidan Fabius @afabius" supported="disabled">
+ <require>
+ <enum value="0" name="VK_COREAVI_EXTENSION_446_SPEC_VERSION"/>
+ <enum value="&quot;VK_COREAVI_extension_446&quot;" name="VK_COREAVI_EXTENSION_446_EXTENSION_NAME"/>
+ <enum bitpos="24" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_24_BIT_COREAVI"/>
+ </require>
+ </extension>
+ <extension name="VK_COREAVI_extension_447" number="447" author="COREAVI" contact="Aidan Fabius @afabius" supported="disabled">
+ <require>
+ <enum value="0" name="VK_COREAVI_EXTENSION_447_SPEC_VERSION"/>
+ <enum value="&quot;VK_COREAVI_extension_447&quot;" name="VK_COREAVI_EXTENSION_447_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_SEC_extension_448" number="448" author="SEC" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <require>
+ <enum value="0" name="VK_SEC_EXTENSION_448_SPEC_VERSION"/>
+ <enum value="&quot;VK_SEC_extension_448&quot;" name="VK_SEC_EXTENSION_448_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_SEC_extension_449" number="449" author="SEC" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <require>
+ <enum value="0" name="VK_SEC_EXTENSION_449_SPEC_VERSION"/>
+ <enum value="&quot;VK_SEC_extension_449&quot;" name="VK_SEC_EXTENSION_449_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_SEC_extension_450" number="450" author="SEC" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <require>
+ <enum value="0" name="VK_SEC_EXTENSION_450_SPEC_VERSION"/>
+ <enum value="&quot;VK_SEC_extension_450&quot;" name="VK_SEC_EXTENSION_450_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_SEC_extension_451" number="451" author="SEC" contact="Ralph Potter gitlab:@r_potter" supported="disabled">
+ <require>
+ <enum value="0" name="VK_SEC_EXTENSION_451_SPEC_VERSION"/>
+ <enum value="&quot;VK_SEC_extension_451&quot;" name="VK_SEC_EXTENSION_451_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_nested_command_buffer" number="452" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_NESTED_COMMAND_BUFFER_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_nested_command_buffer&quot;" name="VK_EXT_NESTED_COMMAND_BUFFER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_PROPERTIES_EXT"/>
+ <enum offset="0" extends="VkSubpassContents" name="VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_EXT"/>
+ <enum bitpos="4" extends="VkRenderingFlagBits" name="VK_RENDERING_CONTENTS_INLINE_BIT_EXT"/>
+ <type name="VkPhysicalDeviceNestedCommandBufferFeaturesEXT"/>
+ <type name="VkPhysicalDeviceNestedCommandBufferPropertiesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_ARM_extension_453" number="453" author="Arm" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_ARM_EXTENSION_453_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_extension_453&quot;" name="VK_ARM_EXTENSION_453_EXTENSION_NAME"/>
+ <enum bitpos="11" extends="VkQueueFlagBits" name="VK_QUEUE_RESERVED_11_BIT_ARM"/>
+ <enum bitpos="43" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_RESERVED_43_BIT_ARM"/>
+ <enum bitpos="49" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_49_BIT_ARM"/>
+ <enum bitpos="50" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_50_BIT_ARM"/>
+ <enum bitpos="47" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_47_BIT_ARM" />
+ </require>
+ </extension>
+ <extension name="VK_EXT_external_memory_acquire_unmodified" number="454" type="device" depends="VK_KHR_external_memory" author="EXT" contact="Lina Versace @versalinyaa" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_external_memory_acquire_unmodified&quot;" name="VK_EXT_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXT"/>
+ <type name="VkExternalMemoryAcquireUnmodifiedEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_GOOGLE_extension_455" number="455" author="GOOGLE" contact="Lina Versace @versalinyaa" supported="disabled">
+ <require>
+ <enum value="0" name="VK_GOOGLE_EXTENSION_455_SPEC_VERSION"/>
+ <enum value="&quot;VK_GOOGLE_extension_455&quot;" name="VK_GOOGLE_EXTENSION_455_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extended_dynamic_state3" number="456" type="device" depends="VK_KHR_get_physical_device_properties2" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="2" name="VK_EXT_EXTENDED_DYNAMIC_STATE_3_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extended_dynamic_state3&quot;" name="VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_PROPERTIES_EXT"/>
+ <enum offset="3" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_CLAMP_ENABLE_EXT"/>
+ <enum offset="4" extends="VkDynamicState" name="VK_DYNAMIC_STATE_POLYGON_MODE_EXT"/>
+ <enum offset="5" extends="VkDynamicState" name="VK_DYNAMIC_STATE_RASTERIZATION_SAMPLES_EXT"/>
+ <enum offset="6" extends="VkDynamicState" name="VK_DYNAMIC_STATE_SAMPLE_MASK_EXT"/>
+ <enum offset="7" extends="VkDynamicState" name="VK_DYNAMIC_STATE_ALPHA_TO_COVERAGE_ENABLE_EXT"/>
+ <enum offset="8" extends="VkDynamicState" name="VK_DYNAMIC_STATE_ALPHA_TO_ONE_ENABLE_EXT"/>
+ <enum offset="9" extends="VkDynamicState" name="VK_DYNAMIC_STATE_LOGIC_OP_ENABLE_EXT"/>
+ <enum offset="10" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COLOR_BLEND_ENABLE_EXT"/>
+ <enum offset="11" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COLOR_BLEND_EQUATION_EXT"/>
+ <enum offset="12" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COLOR_WRITE_MASK_EXT"/>
+ <type name="VkPhysicalDeviceExtendedDynamicState3FeaturesEXT"/>
+ <type name="VkPhysicalDeviceExtendedDynamicState3PropertiesEXT"/>
+ <type name="VkColorBlendEquationEXT"/>
+ <type name="VkColorBlendAdvancedEXT"/>
+ <command name="vkCmdSetDepthClampEnableEXT"/>
+ <command name="vkCmdSetPolygonModeEXT"/>
+ <command name="vkCmdSetRasterizationSamplesEXT"/>
+ <command name="vkCmdSetSampleMaskEXT"/>
+ <command name="vkCmdSetAlphaToCoverageEnableEXT"/>
+ <command name="vkCmdSetAlphaToOneEnableEXT"/>
+ <command name="vkCmdSetLogicOpEnableEXT"/>
+ <command name="vkCmdSetColorBlendEnableEXT"/>
+ <command name="vkCmdSetColorBlendEquationEXT"/>
+ <command name="vkCmdSetColorWriteMaskEXT"/>
+ </require>
+ <require depends="VK_KHR_maintenance2,VK_VERSION_1_1">
+ <enum offset="2" extends="VkDynamicState" name="VK_DYNAMIC_STATE_TESSELLATION_DOMAIN_ORIGIN_EXT"/>
+ <command name="vkCmdSetTessellationDomainOriginEXT"/>
+ </require>
+ <require depends="VK_EXT_transform_feedback">
+ <enum offset="13" extends="VkDynamicState" name="VK_DYNAMIC_STATE_RASTERIZATION_STREAM_EXT"/>
+ <command name="vkCmdSetRasterizationStreamEXT"/>
+ </require>
+ <require depends="VK_EXT_conservative_rasterization">
+ <enum offset="14" extends="VkDynamicState" name="VK_DYNAMIC_STATE_CONSERVATIVE_RASTERIZATION_MODE_EXT"/>
+ <enum offset="15" extends="VkDynamicState" name="VK_DYNAMIC_STATE_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE_EXT"/>
+ <command name="vkCmdSetConservativeRasterizationModeEXT"/>
+ <command name="vkCmdSetExtraPrimitiveOverestimationSizeEXT"/>
+ </require>
+ <require depends="VK_EXT_depth_clip_enable">
+ <enum offset="16" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_CLIP_ENABLE_EXT"/>
+ <command name="vkCmdSetDepthClipEnableEXT"/>
+ </require>
+ <require depends="VK_EXT_sample_locations">
+ <enum offset="17" extends="VkDynamicState" name="VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_ENABLE_EXT"/>
+ <command name="vkCmdSetSampleLocationsEnableEXT"/>
+ </require>
+ <require depends="VK_EXT_blend_operation_advanced">
+ <enum offset="18" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COLOR_BLEND_ADVANCED_EXT"/>
+ <command name="vkCmdSetColorBlendAdvancedEXT"/>
+ </require>
+ <require depends="VK_EXT_provoking_vertex">
+ <enum offset="19" extends="VkDynamicState" name="VK_DYNAMIC_STATE_PROVOKING_VERTEX_MODE_EXT"/>
+ <command name="vkCmdSetProvokingVertexModeEXT"/>
+ </require>
+ <require depends="VK_EXT_line_rasterization">
+ <enum offset="20" extends="VkDynamicState" name="VK_DYNAMIC_STATE_LINE_RASTERIZATION_MODE_EXT"/>
+ <enum offset="21" extends="VkDynamicState" name="VK_DYNAMIC_STATE_LINE_STIPPLE_ENABLE_EXT"/>
+ <command name="vkCmdSetLineRasterizationModeEXT"/>
+ <command name="vkCmdSetLineStippleEnableEXT"/>
+ </require>
+ <require depends="VK_EXT_depth_clip_control">
+ <enum offset="22" extends="VkDynamicState" name="VK_DYNAMIC_STATE_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE_EXT"/>
+ <command name="vkCmdSetDepthClipNegativeOneToOneEXT"/>
+ </require>
+ <require depends="VK_NV_clip_space_w_scaling">
+ <enum offset="23" extends="VkDynamicState" name="VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_ENABLE_NV"/>
+ <command name="vkCmdSetViewportWScalingEnableNV"/>
+ </require>
+ <require depends="VK_NV_viewport_swizzle">
+ <enum offset="24" extends="VkDynamicState" name="VK_DYNAMIC_STATE_VIEWPORT_SWIZZLE_NV"/>
+ <command name="vkCmdSetViewportSwizzleNV"/>
+ </require>
+ <require depends="VK_NV_fragment_coverage_to_color">
+ <enum offset="25" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COVERAGE_TO_COLOR_ENABLE_NV"/>
+ <enum offset="26" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COVERAGE_TO_COLOR_LOCATION_NV"/>
+ <command name="vkCmdSetCoverageToColorEnableNV"/>
+ <command name="vkCmdSetCoverageToColorLocationNV"/>
+ </require>
+ <require depends="VK_NV_framebuffer_mixed_samples">
+ <enum offset="27" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COVERAGE_MODULATION_MODE_NV"/>
+ <enum offset="28" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COVERAGE_MODULATION_TABLE_ENABLE_NV"/>
+ <enum offset="29" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COVERAGE_MODULATION_TABLE_NV"/>
+ <command name="vkCmdSetCoverageModulationModeNV"/>
+ <command name="vkCmdSetCoverageModulationTableEnableNV"/>
+ <command name="vkCmdSetCoverageModulationTableNV"/>
+ </require>
+ <require depends="VK_NV_shading_rate_image">
+ <enum offset="30" extends="VkDynamicState" name="VK_DYNAMIC_STATE_SHADING_RATE_IMAGE_ENABLE_NV"/>
+ <command name="vkCmdSetShadingRateImageEnableNV"/>
+ </require>
+ <require depends="VK_NV_representative_fragment_test">
+ <enum offset="31" extends="VkDynamicState" name="VK_DYNAMIC_STATE_REPRESENTATIVE_FRAGMENT_TEST_ENABLE_NV"/>
+ <command name="vkCmdSetRepresentativeFragmentTestEnableNV"/>
+ </require>
+ <require depends="VK_NV_coverage_reduction_mode">
+ <enum offset="32" extends="VkDynamicState" name="VK_DYNAMIC_STATE_COVERAGE_REDUCTION_MODE_NV"/>
+ <command name="vkCmdSetCoverageReductionModeNV"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_457" number="457" author="RASTERGRID" contact="Daniel Rakos @aqnuep" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_457_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_457&quot;" name="VK_EXT_EXTENSION_457_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_458" number="458" author="RASTERGRID" contact="Daniel Rakos @aqnuep" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_458_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_458&quot;" name="VK_EXT_EXTENSION_458_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_subpass_merge_feedback" number="459" type="device" author="EXT" contact="Ting Wei @catweiting" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
+ <require>
+ <enum value="2" name="VK_EXT_SUBPASS_MERGE_FEEDBACK_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_subpass_merge_feedback&quot;" name="VK_EXT_SUBPASS_MERGE_FEEDBACK_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT"/>
+ <type name="VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT"/>
+ <type name="VkRenderPassCreationControlEXT"/>
+ <type name="VkRenderPassCreationFeedbackInfoEXT"/>
+ <type name="VkRenderPassCreationFeedbackCreateInfoEXT"/>
+ <type name="VkRenderPassSubpassFeedbackInfoEXT"/>
+ <type name="VkRenderPassSubpassFeedbackCreateInfoEXT"/>
+ <type name="VkSubpassMergeStatusEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_LUNARG_direct_driver_loading" number="460" type="instance" author="LUNARG" contact="Charles Giessen @charles-lunarg" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION"/>
+ <enum value="&quot;VK_LUNARG_direct_driver_loading&quot;" name="VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_INFO_LUNARG"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG"/>
+ <type name="VkDirectDriverLoadingFlagsLUNARG" comment="Will add VkDirectDriverLoadingFlagBitsLUNARG when bits are defined in the future"/>
+ <type name="VkDirectDriverLoadingModeLUNARG"/>
+ <type name="VkDirectDriverLoadingInfoLUNARG"/>
+ <type name="VkDirectDriverLoadingListLUNARG"/>
+ <type name="PFN_vkGetInstanceProcAddrLUNARG"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_461" number="461" author="EXT" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_461_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_461&quot;" name="VK_EXT_EXTENSION_461_EXTENSION_NAME"/>
+ <enum bitpos="39" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_39_BIT_EXT"/>
+ <enum bitpos="23" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_23_BIT_EXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_462" number="462" author="EXT" contact="Joshua Ashton @Joshua-Ashton,Liam Middlebrook @liam-middlebrook" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_462_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_462&quot;" name="VK_EXT_EXTENSION_462_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_shader_module_identifier" number="463" type="device" depends="VK_KHR_get_physical_device_properties2+VK_EXT_pipeline_creation_cache_control" author="EXT" contact="Hans-Kristian Arntzen @HansKristian-Work" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_SHADER_MODULE_IDENTIFIER_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_shader_module_identifier&quot;" name="VK_EXT_SHADER_MODULE_IDENTIFIER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT"/>
+ <enum name="VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT"/>
+ <type name="VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT"/>
+ <type name="VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT"/>
+ <type name="VkPipelineShaderStageModuleIdentifierCreateInfoEXT"/>
+ <type name="VkShaderModuleIdentifierEXT"/>
+ <command name="vkGetShaderModuleIdentifierEXT"/>
+ <command name="vkGetShaderModuleCreateInfoIdentifierEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_rasterization_order_attachment_access" number="464" type="device" depends="VK_KHR_get_physical_device_properties2" author="ARM" contact="Jan-Harald Fredriksen @janharaldfredriksen-arm" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_rasterization_order_attachment_access&quot;" name="VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="343" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT"/>
+ <type name="VkPipelineColorBlendStateCreateFlagBits"/>
+ <type name="VkPipelineDepthStencilStateCreateFlagBits"/>
+ <enum bitpos="0" extends="VkPipelineColorBlendStateCreateFlagBits" name="VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_EXT"/>
+ <enum bitpos="0" extends="VkPipelineDepthStencilStateCreateFlagBits" name="VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT"/>
+ <enum bitpos="1" extends="VkPipelineDepthStencilStateCreateFlagBits" name="VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT"/>
+ <enum bitpos="4" extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT"/>
+ <enum bitpos="5" extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT"/>
+ <enum bitpos="6" extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_optical_flow" number="465" depends="VK_KHR_get_physical_device_properties2+VK_KHR_format_feature_flags2+VK_KHR_synchronization2" type="device" author="NV" contact="Carsten Rohde @crohde" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_OPTICAL_FLOW_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_optical_flow&quot;" name="VK_NV_OPTICAL_FLOW_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_PROPERTIES_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_INFO_NV"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_PROPERTIES_NV"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_INFO_NV"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV"/>
+ <enum offset="10" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV"/><comment>NV internal use only</comment>
+ <enum offset="0" extends="VkFormat" name="VK_FORMAT_R16G16_S10_5_NV"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_OPTICAL_FLOW_SESSION_NV"/>
+ <enum bitpos="8" extends="VkQueueFlagBits" name="VK_QUEUE_OPTICAL_FLOW_BIT_NV"/>
+ <enum bitpos="29" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV"/>
+ <enum bitpos="42" extends="VkAccessFlagBits2" name="VK_ACCESS_2_OPTICAL_FLOW_READ_BIT_NV"/>
+ <enum bitpos="43" extends="VkAccessFlagBits2" name="VK_ACCESS_2_OPTICAL_FLOW_WRITE_BIT_NV"/>
+ <enum bitpos="40" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_OPTICAL_FLOW_IMAGE_BIT_NV"/>
+ <enum bitpos="41" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_OPTICAL_FLOW_VECTOR_BIT_NV"/>
+ <enum bitpos="42" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_OPTICAL_FLOW_COST_BIT_NV"/>
+ <type name="VkPhysicalDeviceOpticalFlowFeaturesNV"/>
+ <type name="VkPhysicalDeviceOpticalFlowPropertiesNV"/>
+ <type name="VkOpticalFlowUsageFlagBitsNV"/>
+ <type name="VkOpticalFlowUsageFlagsNV"/>
+ <type name="VkOpticalFlowImageFormatInfoNV"/>
+ <type name="VkOpticalFlowImageFormatPropertiesNV"/>
+ <type name="VkOpticalFlowGridSizeFlagBitsNV"/>
+ <type name="VkOpticalFlowGridSizeFlagsNV"/>
+ <type name="VkOpticalFlowPerformanceLevelNV"/>
+ <type name="VkOpticalFlowSessionBindingPointNV"/>
+ <type name="VkOpticalFlowSessionCreateFlagBitsNV"/>
+ <type name="VkOpticalFlowSessionCreateFlagsNV"/>
+ <type name="VkOpticalFlowExecuteFlagBitsNV"/>
+ <type name="VkOpticalFlowExecuteFlagsNV"/>
+ <type name="VkOpticalFlowSessionNV"/>
+ <type name="VkOpticalFlowSessionCreateInfoNV"/>
+ <type name="VkOpticalFlowSessionCreatePrivateDataInfoNV"/><comment>NV internal use only</comment>
+ <type name="VkOpticalFlowExecuteInfoNV"/>
+ <command name="vkGetPhysicalDeviceOpticalFlowImageFormatsNV"/>
+ <command name="vkCreateOpticalFlowSessionNV"/>
+ <command name="vkDestroyOpticalFlowSessionNV"/>
+ <command name="vkBindOpticalFlowSessionImageNV"/>
+ <command name="vkCmdOpticalFlowExecuteNV"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_legacy_dithering" number="466" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" specialuse="glemulation">
+ <require>
+ <enum value="1" name="VK_EXT_LEGACY_DITHERING_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_legacy_dithering&quot;" name="VK_EXT_LEGACY_DITHERING_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT"/>
+ <enum bitpos="7" extends="VkSubpassDescriptionFlagBits" name="VK_SUBPASS_DESCRIPTION_ENABLE_LEGACY_DITHERING_BIT_EXT"/>
+ <type name="VkPhysicalDeviceLegacyDitheringFeaturesEXT"/>
+ </require>
+ <require depends="VK_VERSION_1_3">
+ <enum bitpos="3" extends="VkRenderingFlagBits" name="VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT"/>
+ </require>
+ <require depends="VK_KHR_dynamic_rendering">
+ <enum bitpos="3" extends="VkRenderingFlagBits" name="VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_pipeline_protected_access" number="467" type="device" depends="VK_KHR_get_physical_device_properties2" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_PIPELINE_PROTECTED_ACCESS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_pipeline_protected_access&quot;" name="VK_EXT_PIPELINE_PROTECTED_ACCESS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT"/>
+ <enum bitpos="27" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT"/>
+ <enum bitpos="30" extends="VkPipelineCreateFlagBits" name="VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT"/>
+ <type name="VkPhysicalDevicePipelineProtectedAccessFeaturesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_468" number="468" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_468_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_468&quot;" name="VK_EXT_EXTENSION_468_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_ANDROID_external_format_resolve" number="469" type="device" depends="VK_ANDROID_external_memory_android_hardware_buffer" platform="android" author="ANDROID" contact="Chris Forbes @chrisforbes" specialuse="glemulation" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_ANDROID_EXTERNAL_FORMAT_RESOLVE_SPEC_VERSION"/>
+ <enum value="&quot;VK_ANDROID_external_format_resolve&quot;" name="VK_ANDROID_EXTERNAL_FORMAT_RESOLVE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_PROPERTIES_ANDROID"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_RESOLVE_PROPERTIES_ANDROID"/>
+ <type name="VkPhysicalDeviceExternalFormatResolveFeaturesANDROID"/>
+ <type name="VkPhysicalDeviceExternalFormatResolvePropertiesANDROID"/>
+ <type name="VkAndroidHardwareBufferFormatResolvePropertiesANDROID"/>
+ </require>
+ <require depends="VK_KHR_dynamic_rendering">
+ <enum bitpos="4" extends="VkResolveModeFlagBits" name="VK_RESOLVE_MODE_EXTERNAL_FORMAT_DOWNSAMPLE_ANDROID"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_470" number="470" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_470_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_470&quot;" name="VK_AMD_EXTENSION_470_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_maintenance5" number="471" type="device" depends="VK_VERSION_1_1+VK_KHR_dynamic_rendering" author="KHR" contact="Stu Smith @stu-s" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_MAINTENANCE_5_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_maintenance5&quot;" name="VK_KHR_MAINTENANCE_5_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR"/>
+ <type name="VkPhysicalDeviceMaintenance5FeaturesKHR"/>
+ <type name="VkPhysicalDeviceMaintenance5PropertiesKHR"/>
+ <enum offset="0" extends="VkFormat" name="VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR"/>
+ <enum offset="1" extends="VkFormat" name="VK_FORMAT_A8_UNORM_KHR"/>
+ <command name="vkCmdBindIndexBuffer2KHR"/>
+ <command name="vkGetRenderingAreaGranularityKHR"/>
+ <type name="VkRenderingAreaInfoKHR"/>
+ <command name="vkGetDeviceImageSubresourceLayoutKHR"/>
+ <command name="vkGetImageSubresourceLayout2KHR"/>
+ <type name="VkDeviceImageSubresourceInfoKHR"/>
+ <type name="VkImageSubresource2KHR"/>
+ <type name="VkSubresourceLayout2KHR"/>
+ <enum offset="2" extends="VkStructureType" extnumber="339" name="VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR"/>
+ <enum offset="3" extends="VkStructureType" extnumber="339" name="VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR"/>
+ </require>
+ <require comment="Split off new 64-bit flags separately, for the moment">
+ <type name="VkPipelineCreateFlags2KHR"/>
+ <type name="VkPipelineCreateFlagBits2KHR"/>
+ <type name="VkPipelineCreateFlags2CreateInfoKHR"/>
+ <type name="VkBufferUsageFlags2KHR"/>
+ <type name="VkBufferUsageFlagBits2KHR"/>
+ <type name="VkBufferUsageFlags2CreateInfoKHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR"/>
+ </require>
+ <require depends="VK_VERSION_1_1,VK_KHR_device_group">
+ <enum bitpos="3" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR"/>
+ <enum bitpos="4" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR"/>
+ </require>
+ <require depends="VK_NV_ray_tracing">
+ <enum bitpos="5" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV"/>
+ </require>
+ <require depends="VK_KHR_pipeline_executable_properties">
+ <enum bitpos="6" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR"/>
+ <enum bitpos="7" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR"/>
+ </require>
+ <require depends="VK_VERSION_1_3,VK_EXT_pipeline_creation_cache_control">
+ <enum bitpos="8" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR"/>
+ <enum bitpos="9" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR"/>
+ </require>
+ <require depends="VK_EXT_graphics_pipeline_library">
+ <enum bitpos="10" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT"/>
+ <enum bitpos="23" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT"/>
+ </require>
+ <require depends="VK_KHR_pipeline_library">
+ <enum bitpos="11" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_ray_tracing_pipeline">
+ <enum bitpos="12" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR"/>
+ <enum bitpos="13" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR"/>
+ <enum bitpos="14" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR"/>
+ <enum bitpos="15" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR"/>
+ <enum bitpos="16" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR"/>
+ <enum bitpos="17" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR"/>
+ <enum bitpos="19" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR"/>
+ </require>
+ <require depends="VK_NV_device_generated_commands">
+ <enum bitpos="18" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_NV"/>
+ </require>
+ <require depends="VK_NV_ray_tracing_motion_blur">
+ <enum bitpos="20" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_MOTION_BIT_NV"/>
+ </require>
+ <require depends="VK_KHR_dynamic_rendering+VK_KHR_fragment_shading_rate">
+ <enum bitpos="21" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_dynamic_rendering+VK_EXT_fragment_density_map">
+ <enum bitpos="22" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_opacity_micromap">
+ <enum bitpos="24" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_attachment_feedback_loop_layout">
+ <enum bitpos="25" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT"/>
+ <enum bitpos="26" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_pipeline_protected_access">
+ <enum bitpos="27" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT"/>
+ <enum bitpos="30" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT"/>
+ </require>
+ <require depends="VK_NV_displacement_micromap">
+ <enum bitpos="28" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV"/>
+ </require>
+ <require depends="VK_EXT_descriptor_buffer">
+ <enum bitpos="29" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_conditional_rendering">
+ <enum bitpos="9" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_CONDITIONAL_RENDERING_BIT_EXT"/>
+ </require>
+ <require depends="VK_KHR_ray_tracing_pipeline">
+ <enum bitpos="10" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR"/>
+ </require>
+ <require depends="VK_NV_ray_tracing">
+ <enum extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_RAY_TRACING_BIT_NV" alias="VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR"/>
+ </require>
+ <require depends="VK_EXT_transform_feedback">
+ <enum bitpos="11" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT"/>
+ <enum bitpos="12" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT"/>
+ </require>
+ <require depends="VK_KHR_video_decode_queue">
+ <enum bitpos="13" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_VIDEO_DECODE_SRC_BIT_KHR"/>
+ <enum bitpos="14" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_VIDEO_DECODE_DST_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_video_encode_queue">
+ <enum bitpos="15" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_VIDEO_ENCODE_DST_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ <enum bitpos="16" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_VIDEO_ENCODE_SRC_BIT_KHR" protect="VK_ENABLE_BETA_EXTENSIONS"/>
+ </require>
+ <require depends="VK_VERSION_1_2,VK_KHR_buffer_device_address,VK_EXT_buffer_device_address">
+ <enum bitpos="17" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT_KHR"/>
+ </require>
+ <require depends="VK_KHR_acceleration_structure">
+ <enum bitpos="19" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR"/>
+ <enum bitpos="20" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR"/>
+ </require>
+ <require depends="VK_EXT_descriptor_buffer">
+ <enum bitpos="21" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <enum bitpos="22" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ <enum bitpos="26" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_opacity_micromap">
+ <enum bitpos="23" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT"/>
+ <enum bitpos="24" extends="VkBufferUsageFlagBits2KHR" name="VK_BUFFER_USAGE_2_MICROMAP_STORAGE_BIT_EXT"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_472" number="472" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_472_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_472&quot;" name="VK_AMD_EXTENSION_472_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_473" number="473" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_473_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_473&quot;" name="VK_AMD_EXTENSION_473_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_474" number="474" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_474_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_474&quot;" name="VK_AMD_EXTENSION_474_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_475" number="475" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_475_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_475&quot;" name="VK_AMD_EXTENSION_475_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_476" number="476" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_476_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_476&quot;" name="VK_AMD_EXTENSION_476_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_477" number="477" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_477_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_477&quot;" name="VK_AMD_EXTENSION_477_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_478" number="478" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_478_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_478&quot;" name="VK_AMD_EXTENSION_478_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_AMD_extension_479" number="479" author="AMD" contact="Stu Smith" supported="disabled">
+ <require>
+ <enum value="0" name="VK_AMD_EXTENSION_479_SPEC_VERSION"/>
+ <enum value="&quot;VK_AMD_extension_479&quot;" name="VK_AMD_EXTENSION_479_EXTENSION_NAME"/>
+ <enum bitpos="32" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RESERVED_32_BIT_KHR"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_480" number="480" author="EXT" contact="Daniel Stone" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_480_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_480&quot;" name="VK_EXT_EXTENSION_480_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_481" number="481" author="EXT" contact="Daniel Stone" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_481_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_481&quot;" name="VK_EXT_EXTENSION_481_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_ray_tracing_position_fetch" number="482" type="device" depends="VK_KHR_acceleration_structure" author="KHR" contact="Eric Werness" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_RAY_TRACING_POSITION_FETCH_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_ray_tracing_position_fetch&quot;" name="VK_KHR_RAY_TRACING_POSITION_FETCH_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR"/>
+ <enum bitpos="11" extends="VkBuildAccelerationStructureFlagBitsKHR" name="VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DATA_ACCESS_KHR"/>
+ <type name="VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_shader_object" number="483" depends="(VK_KHR_get_physical_device_properties2,VK_VERSION_1_1)+(VK_KHR_dynamic_rendering,VK_VERSION_1_3)" type="device" author="EXT" contact="Daniel Story @daniel-story" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_SHADER_OBJECT_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_shader_object&quot;" name="VK_EXT_SHADER_OBJECT_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SHADER_CREATE_INFO_EXT"/>
+ <enum extnumber="353" offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT"/>
+ <enum extnumber="353" offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT"/>
+ <enum extends="VkStructureType" name="VK_STRUCTURE_TYPE_SHADER_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT" alias="VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_SHADER_EXT"/>
+ <enum offset="0" extends="VkResult" name="VK_INCOMPATIBLE_SHADER_BINARY_EXT"/>
+ <enum extends="VkResult" name="VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT" alias="VK_INCOMPATIBLE_SHADER_BINARY_EXT" deprecated="aliased"/>
+ <type name="VkShaderEXT"/>
+ <type name="VkShaderCreateFlagBitsEXT"/>
+ <type name="VkShaderCreateFlagsEXT"/>
+ <type name="VkShaderCodeTypeEXT"/>
+ <type name="VkPhysicalDeviceShaderObjectFeaturesEXT"/>
+ <type name="VkPhysicalDeviceShaderObjectPropertiesEXT"/>
+ <type name="VkShaderCreateInfoEXT"/>
+ <type name="VkShaderRequiredSubgroupSizeCreateInfoEXT"/>
+ <type name="VkVertexInputBindingDescription2EXT"/>
+ <type name="VkVertexInputAttributeDescription2EXT"/>
+ <type name="VkColorBlendEquationEXT"/>
+ <type name="VkColorBlendAdvancedEXT"/>
+ <command name="vkCreateShadersEXT"/>
+ <command name="vkDestroyShaderEXT"/>
+ <command name="vkGetShaderBinaryDataEXT"/>
+ <command name="vkCmdBindShadersEXT"/>
+ <command name="vkCmdSetCullModeEXT"/>
+ <command name="vkCmdSetFrontFaceEXT"/>
+ <command name="vkCmdSetPrimitiveTopologyEXT"/>
+ <command name="vkCmdSetViewportWithCountEXT"/>
+ <command name="vkCmdSetScissorWithCountEXT"/>
+ <command name="vkCmdBindVertexBuffers2EXT"/>
+ <command name="vkCmdSetDepthTestEnableEXT"/>
+ <command name="vkCmdSetDepthWriteEnableEXT"/>
+ <command name="vkCmdSetDepthCompareOpEXT"/>
+ <command name="vkCmdSetDepthBoundsTestEnableEXT"/>
+ <command name="vkCmdSetStencilTestEnableEXT"/>
+ <command name="vkCmdSetStencilOpEXT"/>
+ <command name="vkCmdSetVertexInputEXT"/>
+ <command name="vkCmdSetPatchControlPointsEXT"/>
+ <command name="vkCmdSetRasterizerDiscardEnableEXT"/>
+ <command name="vkCmdSetDepthBiasEnableEXT"/>
+ <command name="vkCmdSetLogicOpEXT"/>
+ <command name="vkCmdSetPrimitiveRestartEnableEXT"/>
+ <command name="vkCmdSetTessellationDomainOriginEXT"/>
+ <command name="vkCmdSetDepthClampEnableEXT"/>
+ <command name="vkCmdSetPolygonModeEXT"/>
+ <command name="vkCmdSetRasterizationSamplesEXT"/>
+ <command name="vkCmdSetSampleMaskEXT"/>
+ <command name="vkCmdSetAlphaToCoverageEnableEXT"/>
+ <command name="vkCmdSetAlphaToOneEnableEXT"/>
+ <command name="vkCmdSetLogicOpEnableEXT"/>
+ <command name="vkCmdSetColorBlendEnableEXT"/>
+ <command name="vkCmdSetColorBlendEquationEXT"/>
+ <command name="vkCmdSetColorWriteMaskEXT"/>
+ </require>
+ <require depends="VK_EXT_transform_feedback">
+ <command name="vkCmdSetRasterizationStreamEXT"/>
+ </require>
+ <require depends="VK_EXT_conservative_rasterization">
+ <command name="vkCmdSetConservativeRasterizationModeEXT"/>
+ <command name="vkCmdSetExtraPrimitiveOverestimationSizeEXT"/>
+ </require>
+ <require depends="VK_EXT_depth_clip_enable">
+ <command name="vkCmdSetDepthClipEnableEXT"/>
+ </require>
+ <require depends="VK_EXT_sample_locations">
+ <command name="vkCmdSetSampleLocationsEnableEXT"/>
+ </require>
+ <require depends="VK_EXT_blend_operation_advanced">
+ <command name="vkCmdSetColorBlendAdvancedEXT"/>
+ </require>
+ <require depends="VK_EXT_provoking_vertex">
+ <command name="vkCmdSetProvokingVertexModeEXT"/>
+ </require>
+ <require depends="VK_EXT_line_rasterization">
+ <command name="vkCmdSetLineRasterizationModeEXT"/>
+ <command name="vkCmdSetLineStippleEnableEXT"/>
+ </require>
+ <require depends="VK_EXT_depth_clip_control">
+ <command name="vkCmdSetDepthClipNegativeOneToOneEXT"/>
+ </require>
+ <require depends="VK_EXT_subgroup_size_control,VK_VERSION_1_3">
+ <enum bitpos="1" extends="VkShaderCreateFlagBitsEXT" name="VK_SHADER_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT"/>
+ <enum bitpos="2" extends="VkShaderCreateFlagBitsEXT" name="VK_SHADER_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_mesh_shader,VK_NV_mesh_shader">
+ <enum bitpos="3" extends="VkShaderCreateFlagBitsEXT" name="VK_SHADER_CREATE_NO_TASK_SHADER_BIT_EXT"/>
+ </require>
+ <require depends="VK_KHR_device_group,VK_VERSION_1_1">
+ <enum bitpos="4" extends="VkShaderCreateFlagBitsEXT" name="VK_SHADER_CREATE_DISPATCH_BASE_BIT_EXT"/>
+ </require>
+ <require depends="VK_KHR_fragment_shading_rate">
+ <enum bitpos="5" extends="VkShaderCreateFlagBitsEXT" name="VK_SHADER_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_EXT"/>
+ </require>
+ <require depends="VK_EXT_fragment_density_map">
+ <enum bitpos="6" extends="VkShaderCreateFlagBitsEXT" name="VK_SHADER_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT"/>
+ </require>
+ <require depends="VK_NV_clip_space_w_scaling">
+ <command name="vkCmdSetViewportWScalingEnableNV"/>
+ </require>
+ <require depends="VK_NV_viewport_swizzle">
+ <command name="vkCmdSetViewportSwizzleNV"/>
+ </require>
+ <require depends="VK_NV_fragment_coverage_to_color">
+ <command name="vkCmdSetCoverageToColorEnableNV"/>
+ <command name="vkCmdSetCoverageToColorLocationNV"/>
+ </require>
+ <require depends="VK_NV_framebuffer_mixed_samples">
+ <command name="vkCmdSetCoverageModulationModeNV"/>
+ <command name="vkCmdSetCoverageModulationTableEnableNV"/>
+ <command name="vkCmdSetCoverageModulationTableNV"/>
+ </require>
+ <require depends="VK_NV_shading_rate_image">
+ <command name="vkCmdSetShadingRateImageEnableNV"/>
+ </require>
+ <require depends="VK_NV_representative_fragment_test">
+ <command name="vkCmdSetRepresentativeFragmentTestEnableNV"/>
+ </require>
+ <require depends="VK_NV_coverage_reduction_mode">
+ <command name="vkCmdSetCoverageReductionModeNV"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_484" number="484" author="KHR" contact="Chris Glover @cdglove" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_484_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_484&quot;" name="VK_EXT_EXTENSION_484_EXTENSION_NAME"/>
+ <enum bitpos="31" extends="VkPipelineCreateFlagBits2KHR" name="VK_PIPELINE_CREATE_2_RESERVED_31_BIT_KHR"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_tile_properties" number="485" type="device" depends="VK_KHR_get_physical_device_properties2" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_TILE_PROPERTIES_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_tile_properties&quot;" name="VK_QCOM_TILE_PROPERTIES_EXTENSION_NAME"/>
+ <command name="vkGetFramebufferTilePropertiesQCOM"/>
+ <command name="vkGetDynamicRenderingTilePropertiesQCOM"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM"/>
+ <type name="VkPhysicalDeviceTilePropertiesFeaturesQCOM"/>
+ <type name="VkTilePropertiesQCOM"/>
+ </require>
+ <require depends="VK_KHR_dynamic_rendering">
+ <type name="VkRenderingInfoKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_SEC_amigo_profiling" number="486" type="device" depends="VK_KHR_get_physical_device_properties2" author="SEC" contact="Ralph Potter gitlab:@r_potter" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_SEC_AMIGO_PROFILING_SPEC_VERSION"/>
+ <enum value="&quot;VK_SEC_amigo_profiling&quot;" name="VK_SEC_AMIGO_PROFILING_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_AMIGO_PROFILING_SUBMIT_INFO_SEC"/>
+ <type name="VkPhysicalDeviceAmigoProfilingFeaturesSEC"/>
+ <type name="VkAmigoProfilingSubmitInfoSEC"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_487" number="487" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_487_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_487&quot;" name="VK_EXT_EXTENSION_487_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_488" number="488" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_488_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_488&quot;" name="VK_EXT_EXTENSION_488_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_multiview_per_view_viewports" number="489" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_MULTIVIEW_PER_VIEW_VIEWPORTS_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_multiview_per_view_viewports&quot;" name="VK_QCOM_MULTIVIEW_PER_VIEW_VIEWPORTS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_VIEWPORTS_FEATURES_QCOM"/>
+ <type name="VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_external_sci_sync2" number="490" author="NV" depends="VK_VERSION_1_1" platform="sci" type="device" contact="Kai Zhang @kazhang" supported="vulkansc">
+ <require>
+ <enum value="1" name="VK_NV_EXTERNAL_SCI_SYNC_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_external_sci_sync2&quot;" name="VK_NV_EXTERNAL_SCI_SYNC_2_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkObjectType" name="VK_OBJECT_TYPE_SEMAPHORE_SCI_SYNC_POOL_NV" comment="VkSemaphoreSciSyncPoolNV"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SEMAPHORE_SCI_SYNC_POOL_CREATE_INFO_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SEMAPHORE_SCI_SYNC_CREATE_INFO_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SCI_SYNC_2_FEATURES_NV"/>
+ <type name="VkSemaphoreSciSyncPoolNV"/>
+ <type name="VkPhysicalDeviceExternalSciSync2FeaturesNV"/>
+ <type name="VkSemaphoreSciSyncPoolCreateInfoNV"/>
+ <type name="VkSemaphoreSciSyncCreateInfoNV"/>
+ <command name="vkCreateSemaphoreSciSyncPoolNV"/>
+ <command name="vkDestroySemaphoreSciSyncPoolNV"/>
+ </require>
+ <require comment="functionality re-used unmodified from VK_NV_external_sci_sync">
+ <enum extnumber="374" offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_FENCE_SCI_SYNC_INFO_NV"/>
+ <enum extnumber="374" offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXPORT_FENCE_SCI_SYNC_INFO_NV"/>
+ <enum extnumber="374" offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_FENCE_GET_SCI_SYNC_INFO_NV"/>
+ <enum extnumber="374" offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SCI_SYNC_ATTRIBUTES_INFO_NV"/>
+ <enum bitpos="4" extends="VkExternalFenceHandleTypeFlagBits" name="VK_EXTERNAL_FENCE_HANDLE_TYPE_SCI_SYNC_OBJ_BIT_NV"/>
+ <enum bitpos="5" extends="VkExternalFenceHandleTypeFlagBits" name="VK_EXTERNAL_FENCE_HANDLE_TYPE_SCI_SYNC_FENCE_BIT_NV"/>
+ <type name="VkSciSyncClientTypeNV"/>
+ <type name="VkSciSyncPrimitiveTypeNV"/>
+ <type name="VkExportFenceSciSyncInfoNV"/>
+ <type name="VkImportFenceSciSyncInfoNV"/>
+ <type name="VkFenceGetSciSyncInfoNV"/>
+ <type name="VkSciSyncAttributesInfoNV"/>
+ <command name="vkGetFenceSciSyncFenceNV"/>
+ <command name="vkGetFenceSciSyncObjNV"/>
+ <command name="vkImportFenceSciSyncFenceNV"/>
+ <command name="vkImportFenceSciSyncObjNV"/>
+ <command name="vkGetPhysicalDeviceSciSyncAttributesNV"/>
+ </require>
+ <require depends="VKSC_VERSION_1_0" api="vulkansc">
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_DEVICE_SEMAPHORE_SCI_SYNC_POOL_RESERVATION_CREATE_INFO_NV"/>
+ <type name="VkDeviceSemaphoreSciSyncPoolReservationCreateInfoNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_ray_tracing_invocation_reorder" number="491" type="device" depends="VK_KHR_ray_tracing_pipeline" author="NV" contact="Eric Werness @ewerness-nv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_RAY_TRACING_INVOCATION_REORDER_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_ray_tracing_invocation_reorder&quot;" name="VK_NV_RAY_TRACING_INVOCATION_REORDER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_PROPERTIES_NV"/>
+ <type name="VkRayTracingInvocationReorderModeNV"/>
+ <type name="VkPhysicalDeviceRayTracingInvocationReorderPropertiesNV"/>
+ <type name="VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_492" number="492" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_492_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_492&quot;" name="VK_NV_EXTENSION_492_EXTENSION_NAME"/>
+ <enum bitpos="44" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_RESERVED_44_BIT_NV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extended_sparse_address_space" number="493" type="device" author="NV" contact="Russell Chou @russellcnv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_EXTENDED_SPARSE_ADDRESS_SPACE_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extended_sparse_address_space&quot;" name="VK_NV_EXTENDED_SPARSE_ADDRESS_SPACE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_FEATURES_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_PROPERTIES_NV"/>
+ <type name="VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV"/>
+ <type name="VkPhysicalDeviceExtendedSparseAddressSpacePropertiesNV"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_494" number="494" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_494_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_494&quot;" name="VK_NV_EXTENSION_494_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_mutable_descriptor_type" number="495" type="device" supported="vulkan" author="EXT" contact="Joshua Ashton @Joshua-Ashton,Hans-Kristian Arntzen @HansKristian-Work" specialuse="d3demulation" depends="VK_KHR_maintenance3">
+ <require>
+ <enum value="1" name="VK_EXT_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_mutable_descriptor_type&quot;" name="VK_EXT_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="352" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT"/>
+ <enum offset="2" extends="VkStructureType" extnumber="352" name="VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT"/>
+ <enum offset="0" extends="VkDescriptorType" extnumber="352" name="VK_DESCRIPTOR_TYPE_MUTABLE_EXT"/>
+ <enum bitpos="2" extends="VkDescriptorPoolCreateFlagBits" name="VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT"/>
+ <enum bitpos="2" extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT"/>
+ <type name="VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT"/>
+ <type name="VkMutableDescriptorTypeListEXT"/>
+ <type name="VkMutableDescriptorTypeCreateInfoEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_496" number="496" author="EXT" contact="Mike Blumenkrantz @zmike" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_496_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_496&quot;" name="VK_EXT_EXTENSION_496_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_layer_settings" number="497" author="EXT" contact="Christophe Riccio @christophe" type="instance" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="2" name="VK_EXT_LAYER_SETTINGS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_layer_settings&quot;" name="VK_EXT_LAYER_SETTINGS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT"/>
+ <type name="VkLayerSettingsCreateInfoEXT"/>
+ <type name="VkLayerSettingEXT"/>
+ <type name="VkLayerSettingTypeEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_ARM_shader_core_builtins" number="498" author="ARM" contact="Kevin Petit @kpet" type="device" depends="VK_KHR_get_physical_device_properties2" supported="vulkan">
+ <require>
+ <enum value="2" name="VK_ARM_SHADER_CORE_BUILTINS_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_shader_core_builtins&quot;" name="VK_ARM_SHADER_CORE_BUILTINS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM"/>
+ <type name="VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM"/>
+ <type name="VkPhysicalDeviceShaderCoreBuiltinsPropertiesARM"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_pipeline_library_group_handles" number="499" type="device" depends="VK_KHR_ray_tracing_pipeline+VK_KHR_pipeline_library" author="EXT" contact="Hans-Kristian Arntzen @HansKristian-Work" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_PIPELINE_LIBRARY_GROUP_HANDLES_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_pipeline_library_group_handles&quot;" name="VK_EXT_PIPELINE_LIBRARY_GROUP_HANDLES_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT"/>
+ <type name="VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_dynamic_rendering_unused_attachments" number="500" author="EXT" contact="Piers Daniell @pdaniell-nv" type="device" depends="(VK_KHR_get_physical_device_properties2,VK_VERSION_1_1)+(VK_KHR_dynamic_rendering,VK_VERSION_1_3)" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_dynamic_rendering_unused_attachments&quot;" name="VK_EXT_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT"/>
+ <type name="VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_501" number="501" author="SEC" contact="Chris Hambacher @chambacher" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_501_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_501&quot;" name="VK_EXT_EXTENSION_501_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_502" number="502" author="HUAWEI" contact="Pan Gao @PanGao-h" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_502_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_502&quot;" name="VK_EXT_EXTENSION_502_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_503" number="503" author="HUAWEI" contact="Pan Gao @PanGao-h" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_503_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_503&quot;" name="VK_EXT_EXTENSION_503_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_504" number="504" author="NV" contact="Piers Daniell @pdaniell-nv" type="instance" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_504_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_504&quot;" name="VK_NV_EXTENSION_504_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_505" number="505" author="EXT" contact="Jamie Madill @jmadill" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_505_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_505&quot;" name="VK_EXT_EXTENSION_505_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_low_latency2" number="506" author="NV" depends="VK_VERSION_1_2,VK_KHR_timeline_semaphore" contact="Charles Hansen @cshansen" type="device" supported="vulkan">
+ <require>
+ <enum value="2" name="VK_NV_LOW_LATENCY_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_low_latency2&quot;" name="VK_NV_LOW_LATENCY_2_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_LATENCY_SLEEP_MODE_INFO_NV"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_LATENCY_SLEEP_INFO_NV"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SET_LATENCY_MARKER_INFO_NV"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_GET_LATENCY_MARKER_INFO_NV"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_LATENCY_TIMINGS_FRAME_REPORT_NV"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_LATENCY_SUBMISSION_PRESENT_ID_NV"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_OUT_OF_BAND_QUEUE_TYPE_INFO_NV"/>
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SWAPCHAIN_LATENCY_CREATE_INFO_NV"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_LATENCY_SURFACE_CAPABILITIES_NV"/>
+ <type name="VkLatencySleepModeInfoNV"/>
+ <type name="VkLatencySleepInfoNV"/>
+ <type name="VkSetLatencyMarkerInfoNV"/>
+ <type name="VkGetLatencyMarkerInfoNV"/>
+ <type name="VkLatencyTimingsFrameReportNV"/>
+ <type name="VkLatencyMarkerNV"/>
+ <type name="VkLatencySubmissionPresentIdNV"/>
+ <type name="VkSwapchainLatencyCreateInfoNV"/>
+ <type name="VkOutOfBandQueueTypeInfoNV"/>
+ <type name="VkOutOfBandQueueTypeNV"/>
+ <type name="VkLatencySurfaceCapabilitiesNV"/>
+ <command name="vkSetLatencySleepModeNV"/>
+ <command name="vkLatencySleepNV"/>
+ <command name="vkSetLatencyMarkerNV"/>
+ <command name="vkGetLatencyTimingsNV"/>
+ <command name="vkQueueNotifyOutOfBandNV"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_cooperative_matrix" number="507" type="device" depends="VK_KHR_get_physical_device_properties2" author="KHR" contact="Kevin Petit @kpet" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="2" name="VK_KHR_COOPERATIVE_MATRIX_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_cooperative_matrix&quot;" name="VK_KHR_COOPERATIVE_MATRIX_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_KHR"/>
+ <type name="VkCooperativeMatrixPropertiesKHR"/>
+ <type name="VkScopeKHR"/>
+ <type name="VkComponentTypeKHR"/>
+ <type name="VkPhysicalDeviceCooperativeMatrixFeaturesKHR"/>
+ <type name="VkPhysicalDeviceCooperativeMatrixPropertiesKHR"/>
+ <command name="vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_508" number="508" author="EXT" contact="Kevin Petit @kpet" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_508_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_508&quot;" name="VK_EXT_EXTENSION_508_EXTENSION_NAME"/>
+ <enum bitpos="10" extends="VkQueueFlagBits" name="VK_QUEUE_RESERVED_10_BIT_EXT" />
+ <enum bitpos="42" extends="VkPipelineStageFlagBits2" name="VK_PIPELINE_STAGE_2_RESERVED_42_BIT_EXT" />
+ <enum bitpos="47" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_47_BIT_EXT" />
+ <enum bitpos="48" extends="VkAccessFlagBits2" name="VK_ACCESS_2_RESERVED_48_BIT_EXT" />
+ <enum bitpos="48" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_48_BIT_EXT" />
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_509" number="509" author="EXT" contact="Kevin Petit @kpet" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_509_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_509&quot;" name="VK_EXT_EXTENSION_509_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_MESA_extension_510" number="510" author="MESA" contact="Dave Airlie @airlied" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_MESA_EXTENSION_510_SPEC_VERSION"/>
+ <enum value="&quot;VK_MESA_extension_510&quot;" name="VK_MESA_EXTENSION_510_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_multiview_per_view_render_areas" number="511" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_MULTIVIEW_PER_VIEW_RENDER_AREAS_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_multiview_per_view_render_areas&quot;" name="VK_QCOM_MULTIVIEW_PER_VIEW_RENDER_AREAS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM"/>
+ <type name="VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM"/>
+ <type name="VkMultiviewPerViewRenderAreasRenderPassBeginInfoQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_512" number="512" author="EXT" contact="Jean-Noe Morissette @MagicPoncho" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_512_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_512&quot;" name="VK_EXT_EXTENSION_512_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_video_decode_av1" number="513" author="KHR" depends="VK_KHR_video_decode_queue" contact="Daniel Rakos @aqnuep" type="device" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_VIDEO_DECODE_AV1_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_video_decode_av1&quot;" name="VK_KHR_VIDEO_DECODE_AV1_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_CAPABILITIES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PICTURE_INFO_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PROFILE_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_SESSION_PARAMETERS_CREATE_INFO_KHR"/>
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_DPB_SLOT_INFO_KHR"/>
+ <enum bitpos="2" extends="VkVideoCodecOperationFlagBitsKHR" name="VK_VIDEO_CODEC_OPERATION_DECODE_AV1_BIT_KHR"/>
+ <enum name="VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR"/>
+ <type name="VkVideoDecodeAV1ProfileInfoKHR"/>
+ <type name="VkVideoDecodeAV1CapabilitiesKHR"/>
+ <type name="VkVideoDecodeAV1SessionParametersCreateInfoKHR"/>
+ <type name="VkVideoDecodeAV1PictureInfoKHR"/>
+ <type name="VkVideoDecodeAV1DpbSlotInfoKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_514" number="514" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_514_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_514&quot;" name="VK_KHR_EXTENSION_514_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_515" number="515" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_515_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_515&quot;" name="VK_KHR_EXTENSION_515_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_video_maintenance1" number="516" author="KHR" contact="Daniel Rakos @aqnuep" type="device" depends="VK_KHR_video_queue" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_VIDEO_MAINTENANCE_1_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_video_maintenance1&quot;" name="VK_KHR_VIDEO_MAINTENANCE_1_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_1_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_VIDEO_INLINE_QUERY_INFO_KHR"/>
+ <enum bitpos="20" extends="VkImageCreateFlagBits" name="VK_IMAGE_CREATE_VIDEO_PROFILE_INDEPENDENT_BIT_KHR"/>
+ <enum bitpos="6" extends="VkBufferCreateFlagBits" name="VK_BUFFER_CREATE_VIDEO_PROFILE_INDEPENDENT_BIT_KHR"/>
+ <enum bitpos="2" extends="VkVideoSessionCreateFlagBitsKHR" name="VK_VIDEO_SESSION_CREATE_INLINE_QUERIES_BIT_KHR"/>
+ <type name="VkPhysicalDeviceVideoMaintenance1FeaturesKHR"/>
+ <type name="VkVideoInlineQueryInfoKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_per_stage_descriptor_set" number="517" depends="VK_KHR_maintenance6" type="device" author="NV" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_PER_STAGE_DESCRIPTOR_SET_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_per_stage_descriptor_set&quot;" name="VK_NV_PER_STAGE_DESCRIPTOR_SET_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PER_STAGE_DESCRIPTOR_SET_FEATURES_NV"/>
+ <enum bitpos="6" extends="VkDescriptorSetLayoutCreateFlagBits" name="VK_DESCRIPTOR_SET_LAYOUT_CREATE_PER_STAGE_BIT_NV"/>
+ <type name="VkPhysicalDevicePerStageDescriptorSetFeaturesNV"/>
+ </require>
+ </extension>
+ <extension name="VK_MESA_extension_518" number="518" author="MESA" contact="Dave Airlie @airlied" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_MESA_EXTENSION_518_SPEC_VERSION"/>
+ <enum value="&quot;VK_MESA_extension_518&quot;" name="VK_MESA_EXTENSION_518_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_image_processing2" number="519" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan" depends="VK_QCOM_image_processing">
+ <require>
+ <enum value="1" name="VK_QCOM_IMAGE_PROCESSING_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_image_processing2&quot;" name="VK_QCOM_IMAGE_PROCESSING_2_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_FEATURES_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_PROPERTIES_QCOM"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SAMPLER_BLOCK_MATCH_WINDOW_CREATE_INFO_QCOM"/>
+ <type name="VkPhysicalDeviceImageProcessing2FeaturesQCOM"/>
+ <type name="VkPhysicalDeviceImageProcessing2PropertiesQCOM"/>
+ <type name="VkSamplerBlockMatchWindowCreateInfoQCOM"/>
+ <type name="VkBlockMatchWindowCompareModeQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_filter_cubic_weights" number="520" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan" depends="VK_EXT_filter_cubic">
+ <require>
+ <enum value="1" name="VK_QCOM_FILTER_CUBIC_WEIGHTS_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_filter_cubic_weights&quot;" name="VK_QCOM_FILTER_CUBIC_WEIGHTS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SAMPLER_CUBIC_WEIGHTS_CREATE_INFO_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_WEIGHTS_FEATURES_QCOM"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BLIT_IMAGE_CUBIC_WEIGHTS_INFO_QCOM"/>
+ <type name="VkPhysicalDeviceCubicWeightsFeaturesQCOM"/>
+ <type name="VkSamplerCubicWeightsCreateInfoQCOM"/>
+ <type name="VkBlitImageCubicWeightsInfoQCOM"/>
+ <type name="VkCubicFilterWeightsQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_ycbcr_degamma" number="521" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_YCBCR_DEGAMMA_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_ycbcr_degamma&quot;" name="VK_QCOM_YCBCR_DEGAMMA_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_DEGAMMA_FEATURES_QCOM"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_YCBCR_DEGAMMA_CREATE_INFO_QCOM"/>
+ <type name="VkPhysicalDeviceYcbcrDegammaFeaturesQCOM"/>
+ <type name="VkSamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_filter_cubic_clamp" number="522" type="device" author="QCOM" depends="(VK_EXT_filter_cubic)+(VK_VERSION_1_2,VK_EXT_sampler_filter_minmax)" contact="Matthew Netsch @mnetsch" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_QCOM_FILTER_CUBIC_CLAMP_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_filter_cubic_clamp&quot;" name="VK_QCOM_FILTER_CUBIC_CLAMP_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM"/>
+ <enum offset="0" extends="VkSamplerReductionMode" name="VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_RANGECLAMP_QCOM"/>
+ <type name="VkPhysicalDeviceCubicClampFeaturesQCOM"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_523" number="523" author="EXT" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_523_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_523&quot;" name="VK_EXT_EXTENSION_523_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_524" number="524" author="EXT" contact="Tony Zlatinski @tzlatinski" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_524_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_524&quot;" name="VK_EXT_EXTENSION_524_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_attachment_feedback_loop_dynamic_state" number="525" type="device" author="EXT" depends="VK_KHR_get_physical_device_properties2+VK_EXT_attachment_feedback_loop_layout" contact="Mike Blumenkrantz @zmike" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_EXT_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_attachment_feedback_loop_dynamic_state&quot;" name="VK_EXT_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT"/>
+ <enum offset="0" extends="VkDynamicState" name="VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT"/>
+ <type name="VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT"/>
+ <command name="vkCmdSetAttachmentFeedbackLoopEnableEXT"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_vertex_attribute_divisor" number="526" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="KHR" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_vertex_attribute_divisor&quot;" name="VK_KHR_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR"/>
+ <enum offset="1" extends="VkStructureType" extnumber="191" name="VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" extnumber="191" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR"/>
+ <type name="VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR"/>
+ <type name="VkVertexInputBindingDivisorDescriptionKHR"/>
+ <type name="VkPipelineVertexInputDivisorStateCreateInfoKHR"/>
+ <type name="VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_load_store_op_none" number="527" author="KHR" type="device" contact="Shahbaz Youssefi @syoussefi" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_LOAD_STORE_OP_NONE_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_load_store_op_none&quot;" name="VK_KHR_LOAD_STORE_OP_NONE_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkAttachmentLoadOp" extnumber="401" name="VK_ATTACHMENT_LOAD_OP_NONE_KHR"/>
+ <enum extends="VkAttachmentStoreOp" name="VK_ATTACHMENT_STORE_OP_NONE_KHR" alias="VK_ATTACHMENT_STORE_OP_NONE"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_528" number="528" author="EXT" contact="Shahbaz Youssefi @syoussefi" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_528_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_528&quot;" name="VK_EXT_EXTENSION_528_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_shader_float_controls2" number="529" type="device" depends="VK_VERSION_1_1+VK_KHR_shader_float_controls" author="KHR" contact="Graeme Leese @gnl21" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_SHADER_FLOAT_CONTROLS_2_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_shader_float_controls2&quot;" name="VK_KHR_SHADER_FLOAT_CONTROLS_2_EXTENSION_NAME"/>
+ <type name="VkPhysicalDeviceShaderFloatControls2FeaturesKHR"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR"/>
+ </require>
+ </extension>
+ <extension name="VK_QNX_external_memory_screen_buffer" number="530" type="device" author="QNX" depends="((VK_KHR_sampler_ycbcr_conversion+VK_KHR_external_memory+VK_KHR_dedicated_allocation),VK_VERSION_1_1)+VK_EXT_queue_family_foreign" platform="screen" contact="Mike Gorchak @mgorchak-blackberry, Aaron Ruby @aruby-blackberry" supported="vulkan,vulkansc">
+ <require>
+ <enum value="1" name="VK_QNX_EXTERNAL_MEMORY_SCREEN_BUFFER_SPEC_VERSION"/>
+ <enum value="&quot;VK_QNX_external_memory_screen_buffer&quot;" name="VK_QNX_EXTERNAL_MEMORY_SCREEN_BUFFER_EXTENSION_NAME"/>
+ <enum bitpos="14" extends="VkExternalMemoryHandleTypeFlagBits" name="VK_EXTERNAL_MEMORY_HANDLE_TYPE_SCREEN_BUFFER_BIT_QNX"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SCREEN_BUFFER_PROPERTIES_QNX"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SCREEN_BUFFER_FORMAT_PROPERTIES_QNX"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_IMPORT_SCREEN_BUFFER_INFO_QNX"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_QNX"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX"/>
+ <type name="VkScreenBufferPropertiesQNX"/>
+ <type name="VkScreenBufferFormatPropertiesQNX"/>
+ <type name="VkImportScreenBufferInfoQNX"/>
+ <type name="VkExternalFormatQNX"/>
+ <type name="VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX"/>
+ <command name="vkGetScreenBufferPropertiesQNX"/>
+ </require>
+ </extension>
+ <extension name="VK_MSFT_layered_driver" number="531" type="device" depends="VK_KHR_get_physical_device_properties2" author="MSFT" contact="Jesse Natalie @jenatali" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_MSFT_LAYERED_DRIVER_SPEC_VERSION"/>
+ <enum value="&quot;VK_MSFT_layered_driver&quot;" name="VK_MSFT_LAYERED_DRIVER_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT"/>
+ <type name="VkLayeredDriverUnderlyingApiMSFT"/>
+ <type name="VkPhysicalDeviceLayeredDriverPropertiesMSFT"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_532" number="532" author="KHR" contact="Tobias Hector @tobias" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_532_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_532&quot;" name="VK_KHR_EXTENSION_532_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_533" number="533" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_533_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_533&quot;" name="VK_EXT_EXTENSION_533_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_index_type_uint8" number="534" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Piers Daniell @pdaniell-nv" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_INDEX_TYPE_UINT8_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_index_type_uint8&quot;" name="VK_KHR_INDEX_TYPE_UINT8_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="266" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR"/>
+ <enum offset="0" extends="VkIndexType" extnumber="266" name="VK_INDEX_TYPE_UINT8_KHR"/>
+ <type name="VkPhysicalDeviceIndexTypeUint8FeaturesKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_line_rasterization" number="535" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" contact="Piers Daniell @pdaniell-nv" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_LINE_RASTERIZATION_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_line_rasterization&quot;" name="VK_KHR_LINE_RASTERIZATION_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="260" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" extnumber="260" name="VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR"/>
+ <enum offset="2" extends="VkStructureType" extnumber="260" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR"/>
+ <enum offset="0" extends="VkDynamicState" extnumber="260" name="VK_DYNAMIC_STATE_LINE_STIPPLE_KHR"/>
+ <type name="VkPhysicalDeviceLineRasterizationFeaturesKHR"/>
+ <type name="VkPhysicalDeviceLineRasterizationPropertiesKHR"/>
+ <type name="VkPipelineRasterizationLineStateCreateInfoKHR"/>
+ <type name="VkLineRasterizationModeKHR"/>
+ <command name="vkCmdSetLineStippleKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_extension_536" number="536" type="device" author="QCOM" contact="Matthew Netsch @mnetsch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_QCOM_EXTENSION_536_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_536&quot;" name="VK_QCOM_EXTENSION_536_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_537" number="537" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_537_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_537&quot;" name="VK_EXT_EXTENSION_537_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_538" number="538" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_538_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_538&quot;" name="VK_EXT_EXTENSION_538_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_539" number="539" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_539_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_539&quot;" name="VK_EXT_EXTENSION_539_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_540" number="540" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_540_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_540&quot;" name="VK_EXT_EXTENSION_540_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_541" number="541" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_541_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_541&quot;" name="VK_EXT_EXTENSION_541_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_542" number="542" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_542_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_542&quot;" name="VK_EXT_EXTENSION_542_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_543" number="543" author="EXT" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_543_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_543&quot;" name="VK_EXT_EXTENSION_543_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_calibrated_timestamps" number="544" type="device" depends="VK_KHR_get_physical_device_properties2,VK_VERSION_1_1" author="KHR" contact="Daniel Rakos @aqnuep" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_CALIBRATED_TIMESTAMPS_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_calibrated_timestamps&quot;" name="VK_KHR_CALIBRATED_TIMESTAMPS_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" extnumber="185" name="VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR"/>
+ <type name="VkTimeDomainKHR"/>
+ <type name="VkCalibratedTimestampInfoKHR"/>
+ <command name="vkGetPhysicalDeviceCalibrateableTimeDomainsKHR"/>
+ <command name="vkGetCalibratedTimestampsKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_shader_expect_assume" number="545" type="device" author="KHR" depends="VK_KHR_get_physical_device_properties2" contact="Kevin Petit @kpet" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_SHADER_EXPECT_ASSUME_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_shader_expect_assume&quot;" name="VK_KHR_SHADER_EXPECT_ASSUME_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR"/>
+ <type name="VkPhysicalDeviceShaderExpectAssumeFeaturesKHR"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_maintenance6" number="546" type="device" depends="VK_VERSION_1_1" author="KHR" contact="Jon Leech @oddhack" supported="vulkan" ratified="vulkan">
+ <require>
+ <enum value="1" name="VK_KHR_MAINTENANCE_6_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_maintenance6&quot;" name="VK_KHR_MAINTENANCE_6_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR"/>
+ <enum offset="1" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR"/>
+ <enum offset="2" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR"/>
+ <enum offset="3" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR"/>
+ <enum offset="4" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR"/>
+ <type name="VkPhysicalDeviceMaintenance6FeaturesKHR"/>
+ <type name="VkPhysicalDeviceMaintenance6PropertiesKHR"/>
+ <type name="VkBindMemoryStatusKHR"/>
+ <type name="VkBindDescriptorSetsInfoKHR"/>
+ <type name="VkPushConstantsInfoKHR"/>
+ <command name="vkCmdBindDescriptorSets2KHR"/>
+ <command name="vkCmdPushConstants2KHR"/>
+ </require>
+ <require depends="VK_KHR_push_descriptor">
+ <enum offset="5" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR"/>
+ <enum offset="6" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR"/>
+ <type name="VkPushDescriptorSetInfoKHR"/>
+ <type name="VkPushDescriptorSetWithTemplateInfoKHR"/>
+ <command name="vkCmdPushDescriptorSet2KHR"/>
+ <command name="vkCmdPushDescriptorSetWithTemplate2KHR"/>
+ </require>
+ <require depends="VK_EXT_descriptor_buffer">
+ <enum offset="7" extends="VkStructureType" name="VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT"/>
+ <enum offset="8" extends="VkStructureType" name="VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT"/>
+ <type name="VkSetDescriptorBufferOffsetsInfoEXT"/>
+ <type name="VkBindDescriptorBufferEmbeddedSamplersInfoEXT"/>
+ <command name="vkCmdSetDescriptorBufferOffsets2EXT"/>
+ <command name="vkCmdBindDescriptorBufferEmbeddedSamplers2EXT"/>
+ </require>
+ <require comment="Individual APIs with dependencies on specific versions/extensions should get their own require blocks with depends= attribute set appropriately">
+ </require>
+ </extension>
+ <extension name="VK_NV_descriptor_pool_overallocation" number="547" type="device" author="NV" depends="VK_VERSION_1_1" contact="Piers Daniell @pdaniell-nv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_DESCRIPTOR_POOL_OVERALLOCATION_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_descriptor_pool_overallocation&quot;" name="VK_NV_DESCRIPTOR_POOL_OVERALLOCATION_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV"/>
+ <enum bitpos="3" extends="VkDescriptorPoolCreateFlagBits" name="VK_DESCRIPTOR_POOL_CREATE_ALLOW_OVERALLOCATION_SETS_BIT_NV"/>
+ <enum bitpos="4" extends="VkDescriptorPoolCreateFlagBits" name="VK_DESCRIPTOR_POOL_CREATE_ALLOW_OVERALLOCATION_POOLS_BIT_NV"/>
+ <type name="VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV"/>
+ </require>
+ </extension>
+ <extension name="VK_QCOM_extension_548" number="548" type="device" author="QCOM" contact="Patrick Boyle @pboyleQCOM" supported="disabled">
+ <require>
+ <enum value="0" name="VK_QCOM_EXTENSION_548_SPEC_VERSION"/>
+ <enum value="&quot;VK_QCOM_extension_548&quot;" name="VK_QCOM_EXTENSION_548_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_549" number="549" author="NV" contact="Piers Daniell @pdaniell-nv" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_549_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_549&quot;" name="VK_NV_EXTENSION_549_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_550" number="550" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_550_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_550&quot;" name="VK_NV_EXTENSION_550_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_551" number="551" author="NV" contact="Daniel Koch @dgkoch" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_551_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_551&quot;" name="VK_NV_EXTENSION_551_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_552" number="552" author="NV" contact="Russell Chou @russellcnv" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_552_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_552&quot;" name="VK_NV_EXTENSION_552_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_553" number="553" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_553_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_553&quot;" name="VK_KHR_EXTENSION_553_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_554" number="554" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" type="device" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_554_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_554&quot;" name="VK_KHR_EXTENSION_554_EXTENSION_NAME"/>
+ <enum bitpos="2" extends="VkVideoEncodeCapabilityFlagBitsKHR" name="VK_VIDEO_ENCODE_CAPABILITY_RESERVED_2_BIT_KHR"/>
+ <enum bitpos="3" extends="VkVideoEncodeCapabilityFlagBitsKHR" name="VK_VIDEO_ENCODE_CAPABILITY_RESERVED_3_BIT_KHR"/>
+ <enum bitpos="3" extends="VkVideoSessionCreateFlagBitsKHR" name="VK_VIDEO_SESSION_CREATE_RESERVED_3_BIT_KHR"/>
+ <enum bitpos="4" extends="VkVideoSessionCreateFlagBitsKHR" name="VK_VIDEO_SESSION_CREATE_RESERVED_4_BIT_KHR"/>
+ <enum bitpos="0" extends="VkVideoEncodeFlagBitsKHR" name="VK_VIDEO_ENCODE_RESERVED_0_BIT_KHR"/>
+ <enum bitpos="1" extends="VkVideoEncodeFlagBitsKHR" name="VK_VIDEO_ENCODE_RESERVED_1_BIT_KHR"/>
+ <enum bitpos="25" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_25_BIT_KHR"/>
+ <enum bitpos="26" extends="VkImageUsageFlagBits" name="VK_IMAGE_USAGE_RESERVED_26_BIT_KHR"/>
+ <enum bitpos="49" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_49_BIT_KHR"/>
+ <enum bitpos="50" extends="VkFormatFeatureFlagBits2" name="VK_FORMAT_FEATURE_2_RESERVED_50_BIT_KHR"/>
+ </require>
+ </extension>
+ <extension name="VK_IMG_extension_555" number="555" author="IMG" contact="Jarred Davies" supported="disabled">
+ <require>
+ <enum value="0" name="VK_IMG_EXTENSION_555_SPEC_VERSION"/>
+ <enum value="&quot;VK_IMG_extension_555&quot;" name="VK_IMG_EXTENSION_555_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_556" number="556" type="device" author="NV" contact="Rodrigo Locatti @rlocatti" supported="disabled">
+ <require>
+ <enum value="1" name="VK_NV_EXTENSION_556_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_556&quot;" name="VK_NV_EXTENSION_556_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_557" number="557" type="device" author="NV" contact="Chris Lentini @clentini" supported="disabled">
+ <require>
+ <enum value="1" name="VK_NV_EXTENSION_557_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_557&quot;" name="VK_NV_EXTENSION_557_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_558" number="558" type="device" author="KHR" contact="Ahmed Abdelkhalek @aabdelkh" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_558_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_558&quot;" name="VK_KHR_EXTENSION_558_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_559" number="559" type="device" author="KHR" contact="Nathan Gauër @Keenuts1" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_559_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_559&quot;" name="VK_KHR_EXTENSION_559_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_extension_560" number="560" author="NV" contact="Lujin Wang @lwnv" supported="disabled">
+ <require>
+ <enum value="0" name="VK_NV_EXTENSION_560_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_extension_560&quot;" name="VK_NV_EXTENSION_560_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_561" number="561" author="EXT" contact="Piers Daniell @pdaniell-nv" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_561_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_561&quot;" name="VK_EXT_EXTENSION_561_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_562" number="562" author="KHR" contact="Piers Daniell @pdaniell-nv" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_562_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_562&quot;" name="VK_KHR_EXTENSION_562_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_KHR_extension_563" number="563" author="KHR" contact="Jon Leech @oddhack" supported="disabled">
+ <require>
+ <enum value="0" name="VK_KHR_EXTENSION_563_SPEC_VERSION"/>
+ <enum value="&quot;VK_KHR_extension_563&quot;" name="VK_KHR_EXTENSION_563_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_NV_shader_atomic_float16_vector" number="564" type="device" author="NV" contact="Jeff Bolz @jeffbolznv" supported="vulkan">
+ <require>
+ <enum value="1" name="VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_SPEC_VERSION"/>
+ <enum value="&quot;VK_NV_shader_atomic_float16_vector&quot;" name="VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_EXTENSION_NAME"/>
+ <enum offset="0" extends="VkStructureType" name="VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV"/>
+ <type name="VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV"/>
+ </require>
+ </extension>
+ <extension name="VK_EXT_extension_565" number="565" author="EXT" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_EXT_EXTENSION_565_SPEC_VERSION"/>
+ <enum value="&quot;VK_EXT_extension_565&quot;" name="VK_EXT_EXTENSION_565_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_ARM_extension_566" number="566" author="ARM" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_ARM_EXTENSION_566_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_extension_566&quot;" name="VK_ARM_EXTENSION_566_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_ARM_extension_567" number="567" author="ARM" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_ARM_EXTENSION_567_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_extension_567&quot;" name="VK_ARM_EXTENSION_567_EXTENSION_NAME"/>
+ </require>
+ </extension>
+ <extension name="VK_ARM_extension_568" number="568" author="ARM" contact="Kevin Petit @kpet" supported="disabled">
+ <require>
+ <enum value="0" name="VK_ARM_EXTENSION_568_SPEC_VERSION"/>
+ <enum value="&quot;VK_ARM_extension_568&quot;" name="VK_ARM_EXTENSION_568_EXTENSION_NAME"/>
</require>
</extension>
-
</extensions>
+ <formats>
+ <format name="VK_FORMAT_R4G4_UNORM_PACK8" class="8-bit" blockSize="1" texelsPerBlock="1" packed="8">
+ <component name="R" bits="4" numericFormat="UNORM"/>
+ <component name="G" bits="4" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R4G4B4A4_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="R" bits="4" numericFormat="UNORM"/>
+ <component name="G" bits="4" numericFormat="UNORM"/>
+ <component name="B" bits="4" numericFormat="UNORM"/>
+ <component name="A" bits="4" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B4G4R4A4_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="B" bits="4" numericFormat="UNORM"/>
+ <component name="G" bits="4" numericFormat="UNORM"/>
+ <component name="R" bits="4" numericFormat="UNORM"/>
+ <component name="A" bits="4" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R5G6B5_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="R" bits="5" numericFormat="UNORM"/>
+ <component name="G" bits="6" numericFormat="UNORM"/>
+ <component name="B" bits="5" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B5G6R5_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="B" bits="5" numericFormat="UNORM"/>
+ <component name="G" bits="6" numericFormat="UNORM"/>
+ <component name="R" bits="5" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R5G5B5A1_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="R" bits="5" numericFormat="UNORM"/>
+ <component name="G" bits="5" numericFormat="UNORM"/>
+ <component name="B" bits="5" numericFormat="UNORM"/>
+ <component name="A" bits="1" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B5G5R5A1_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="B" bits="5" numericFormat="UNORM"/>
+ <component name="G" bits="5" numericFormat="UNORM"/>
+ <component name="R" bits="5" numericFormat="UNORM"/>
+ <component name="A" bits="1" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A1R5G5B5_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="A" bits="1" numericFormat="UNORM"/>
+ <component name="R" bits="5" numericFormat="UNORM"/>
+ <component name="G" bits="5" numericFormat="UNORM"/>
+ <component name="B" bits="5" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="A" bits="1" numericFormat="UNORM"/>
+ <component name="B" bits="5" numericFormat="UNORM"/>
+ <component name="G" bits="5" numericFormat="UNORM"/>
+ <component name="R" bits="5" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A8_UNORM_KHR" class="8-bit alpha" blockSize="1" texelsPerBlock="1">
+ <component name="A" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R8_UNORM" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ <spirvimageformat name="R8"/>
+ </format>
+ <format name="VK_FORMAT_R8_SNORM" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ <spirvimageformat name="R8Snorm"/>
+ </format>
+ <format name="VK_FORMAT_R8_USCALED" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8_SSCALED" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8_UINT" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UINT"/>
+ <spirvimageformat name="R8ui"/>
+ </format>
+ <format name="VK_FORMAT_R8_SINT" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SINT"/>
+ <spirvimageformat name="R8i"/>
+ </format>
+ <format name="VK_FORMAT_R8_SRGB" class="8-bit" blockSize="1" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_UNORM" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <spirvimageformat name="Rg8"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_SNORM" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ <component name="G" bits="8" numericFormat="SNORM"/>
+ <spirvimageformat name="Rg8Snorm"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_USCALED" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ <component name="G" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_SSCALED" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ <component name="G" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_UINT" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UINT"/>
+ <component name="G" bits="8" numericFormat="UINT"/>
+ <spirvimageformat name="Rg8ui"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_SINT" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SINT"/>
+ <component name="G" bits="8" numericFormat="SINT"/>
+ <spirvimageformat name="Rg8i"/>
+ </format>
+ <format name="VK_FORMAT_R8G8_SRGB" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ <component name="G" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_UNORM" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_SNORM" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ <component name="G" bits="8" numericFormat="SNORM"/>
+ <component name="B" bits="8" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_USCALED" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ <component name="G" bits="8" numericFormat="USCALED"/>
+ <component name="B" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_SSCALED" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ <component name="G" bits="8" numericFormat="SSCALED"/>
+ <component name="B" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_UINT" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UINT"/>
+ <component name="G" bits="8" numericFormat="UINT"/>
+ <component name="B" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_SINT" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SINT"/>
+ <component name="G" bits="8" numericFormat="SINT"/>
+ <component name="B" bits="8" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8_SRGB" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ <component name="G" bits="8" numericFormat="SRGB"/>
+ <component name="B" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_UNORM" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_SNORM" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SNORM"/>
+ <component name="G" bits="8" numericFormat="SNORM"/>
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_USCALED" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="USCALED"/>
+ <component name="G" bits="8" numericFormat="USCALED"/>
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_SSCALED" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SSCALED"/>
+ <component name="G" bits="8" numericFormat="SSCALED"/>
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_UINT" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="UINT"/>
+ <component name="G" bits="8" numericFormat="UINT"/>
+ <component name="R" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_SINT" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SINT"/>
+ <component name="G" bits="8" numericFormat="SINT"/>
+ <component name="R" bits="8" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8_SRGB" class="24-bit" blockSize="3" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SRGB"/>
+ <component name="G" bits="8" numericFormat="SRGB"/>
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_UNORM" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ <component name="A" bits="8" numericFormat="UNORM"/>
+ <spirvimageformat name="Rgba8"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_SNORM" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ <component name="G" bits="8" numericFormat="SNORM"/>
+ <component name="B" bits="8" numericFormat="SNORM"/>
+ <component name="A" bits="8" numericFormat="SNORM"/>
+ <spirvimageformat name="Rgba8Snorm"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_USCALED" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ <component name="G" bits="8" numericFormat="USCALED"/>
+ <component name="B" bits="8" numericFormat="USCALED"/>
+ <component name="A" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_SSCALED" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ <component name="G" bits="8" numericFormat="SSCALED"/>
+ <component name="B" bits="8" numericFormat="SSCALED"/>
+ <component name="A" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_UINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="UINT"/>
+ <component name="G" bits="8" numericFormat="UINT"/>
+ <component name="B" bits="8" numericFormat="UINT"/>
+ <component name="A" bits="8" numericFormat="UINT"/>
+ <spirvimageformat name="Rgba8ui"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_SINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SINT"/>
+ <component name="G" bits="8" numericFormat="SINT"/>
+ <component name="B" bits="8" numericFormat="SINT"/>
+ <component name="A" bits="8" numericFormat="SINT"/>
+ <spirvimageformat name="Rgba8i"/>
+ </format>
+ <format name="VK_FORMAT_R8G8B8A8_SRGB" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ <component name="G" bits="8" numericFormat="SRGB"/>
+ <component name="B" bits="8" numericFormat="SRGB"/>
+ <component name="A" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_UNORM" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ <component name="A" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_SNORM" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SNORM"/>
+ <component name="G" bits="8" numericFormat="SNORM"/>
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ <component name="A" bits="8" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_USCALED" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="USCALED"/>
+ <component name="G" bits="8" numericFormat="USCALED"/>
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ <component name="A" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_SSCALED" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SSCALED"/>
+ <component name="G" bits="8" numericFormat="SSCALED"/>
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ <component name="A" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_UINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="UINT"/>
+ <component name="G" bits="8" numericFormat="UINT"/>
+ <component name="R" bits="8" numericFormat="UINT"/>
+ <component name="A" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_SINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SINT"/>
+ <component name="G" bits="8" numericFormat="SINT"/>
+ <component name="R" bits="8" numericFormat="SINT"/>
+ <component name="A" bits="8" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8A8_SRGB" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="B" bits="8" numericFormat="SRGB"/>
+ <component name="G" bits="8" numericFormat="SRGB"/>
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ <component name="A" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_UNORM_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="UNORM"/>
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_SNORM_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="SNORM"/>
+ <component name="B" bits="8" numericFormat="SNORM"/>
+ <component name="G" bits="8" numericFormat="SNORM"/>
+ <component name="R" bits="8" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_USCALED_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="USCALED"/>
+ <component name="B" bits="8" numericFormat="USCALED"/>
+ <component name="G" bits="8" numericFormat="USCALED"/>
+ <component name="R" bits="8" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_SSCALED_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="SSCALED"/>
+ <component name="B" bits="8" numericFormat="SSCALED"/>
+ <component name="G" bits="8" numericFormat="SSCALED"/>
+ <component name="R" bits="8" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_UINT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="UINT"/>
+ <component name="B" bits="8" numericFormat="UINT"/>
+ <component name="G" bits="8" numericFormat="UINT"/>
+ <component name="R" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_SINT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="SINT"/>
+ <component name="B" bits="8" numericFormat="SINT"/>
+ <component name="G" bits="8" numericFormat="SINT"/>
+ <component name="R" bits="8" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_A8B8G8R8_SRGB_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="8" numericFormat="SRGB"/>
+ <component name="B" bits="8" numericFormat="SRGB"/>
+ <component name="G" bits="8" numericFormat="SRGB"/>
+ <component name="R" bits="8" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_A2R10G10B10_UNORM_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="UNORM"/>
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ <component name="B" bits="10" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A2R10G10B10_SNORM_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="SNORM"/>
+ <component name="R" bits="10" numericFormat="SNORM"/>
+ <component name="G" bits="10" numericFormat="SNORM"/>
+ <component name="B" bits="10" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_A2R10G10B10_USCALED_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="USCALED"/>
+ <component name="R" bits="10" numericFormat="USCALED"/>
+ <component name="G" bits="10" numericFormat="USCALED"/>
+ <component name="B" bits="10" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_A2R10G10B10_SSCALED_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="SSCALED"/>
+ <component name="R" bits="10" numericFormat="SSCALED"/>
+ <component name="G" bits="10" numericFormat="SSCALED"/>
+ <component name="B" bits="10" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_A2R10G10B10_UINT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="UINT"/>
+ <component name="R" bits="10" numericFormat="UINT"/>
+ <component name="G" bits="10" numericFormat="UINT"/>
+ <component name="B" bits="10" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_A2R10G10B10_SINT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="SINT"/>
+ <component name="R" bits="10" numericFormat="SINT"/>
+ <component name="G" bits="10" numericFormat="SINT"/>
+ <component name="B" bits="10" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_A2B10G10R10_UNORM_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="UNORM"/>
+ <component name="B" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ <spirvimageformat name="Rgb10A2"/>
+ </format>
+ <format name="VK_FORMAT_A2B10G10R10_SNORM_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="SNORM"/>
+ <component name="B" bits="10" numericFormat="SNORM"/>
+ <component name="G" bits="10" numericFormat="SNORM"/>
+ <component name="R" bits="10" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_A2B10G10R10_USCALED_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="USCALED"/>
+ <component name="B" bits="10" numericFormat="USCALED"/>
+ <component name="G" bits="10" numericFormat="USCALED"/>
+ <component name="R" bits="10" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_A2B10G10R10_SSCALED_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="SSCALED"/>
+ <component name="B" bits="10" numericFormat="SSCALED"/>
+ <component name="G" bits="10" numericFormat="SSCALED"/>
+ <component name="R" bits="10" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_A2B10G10R10_UINT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="UINT"/>
+ <component name="B" bits="10" numericFormat="UINT"/>
+ <component name="G" bits="10" numericFormat="UINT"/>
+ <component name="R" bits="10" numericFormat="UINT"/>
+ <spirvimageformat name="Rgb10a2ui"/>
+ </format>
+ <format name="VK_FORMAT_A2B10G10R10_SINT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="A" bits="2" numericFormat="SINT"/>
+ <component name="B" bits="10" numericFormat="SINT"/>
+ <component name="G" bits="10" numericFormat="SINT"/>
+ <component name="R" bits="10" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R16_UNORM" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UNORM"/>
+ <spirvimageformat name="R16"/>
+ </format>
+ <format name="VK_FORMAT_R16_SNORM" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SNORM"/>
+ <spirvimageformat name="R16Snorm"/>
+ </format>
+ <format name="VK_FORMAT_R16_USCALED" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16_SSCALED" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16_UINT" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UINT"/>
+ <spirvimageformat name="R16ui"/>
+ </format>
+ <format name="VK_FORMAT_R16_SINT" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SINT"/>
+ <spirvimageformat name="R16i"/>
+ </format>
+ <format name="VK_FORMAT_R16_SFLOAT" class="16-bit" blockSize="2" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SFLOAT"/>
+ <spirvimageformat name="R16f"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_UNORM" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UNORM"/>
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ <spirvimageformat name="Rg16"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_SNORM" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SNORM"/>
+ <component name="G" bits="16" numericFormat="SNORM"/>
+ <spirvimageformat name="Rg16Snorm"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_USCALED" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="USCALED"/>
+ <component name="G" bits="16" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_SSCALED" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SSCALED"/>
+ <component name="G" bits="16" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_UINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UINT"/>
+ <component name="G" bits="16" numericFormat="UINT"/>
+ <spirvimageformat name="Rg16ui"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_SINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SINT"/>
+ <component name="G" bits="16" numericFormat="SINT"/>
+ <spirvimageformat name="Rg16i"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_SFLOAT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SFLOAT"/>
+ <component name="G" bits="16" numericFormat="SFLOAT"/>
+ <spirvimageformat name="Rg16f"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_UNORM" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UNORM"/>
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ <component name="B" bits="16" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_SNORM" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SNORM"/>
+ <component name="G" bits="16" numericFormat="SNORM"/>
+ <component name="B" bits="16" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_USCALED" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="USCALED"/>
+ <component name="G" bits="16" numericFormat="USCALED"/>
+ <component name="B" bits="16" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_SSCALED" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SSCALED"/>
+ <component name="G" bits="16" numericFormat="SSCALED"/>
+ <component name="B" bits="16" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_UINT" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UINT"/>
+ <component name="G" bits="16" numericFormat="UINT"/>
+ <component name="B" bits="16" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_SINT" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SINT"/>
+ <component name="G" bits="16" numericFormat="SINT"/>
+ <component name="B" bits="16" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16_SFLOAT" class="48-bit" blockSize="6" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SFLOAT"/>
+ <component name="G" bits="16" numericFormat="SFLOAT"/>
+ <component name="B" bits="16" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_UNORM" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UNORM"/>
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ <component name="B" bits="16" numericFormat="UNORM"/>
+ <component name="A" bits="16" numericFormat="UNORM"/>
+ <spirvimageformat name="Rgba16"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_SNORM" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SNORM"/>
+ <component name="G" bits="16" numericFormat="SNORM"/>
+ <component name="B" bits="16" numericFormat="SNORM"/>
+ <component name="A" bits="16" numericFormat="SNORM"/>
+ <spirvimageformat name="Rgba16Snorm"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_USCALED" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="USCALED"/>
+ <component name="G" bits="16" numericFormat="USCALED"/>
+ <component name="B" bits="16" numericFormat="USCALED"/>
+ <component name="A" bits="16" numericFormat="USCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_SSCALED" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SSCALED"/>
+ <component name="G" bits="16" numericFormat="SSCALED"/>
+ <component name="B" bits="16" numericFormat="SSCALED"/>
+ <component name="A" bits="16" numericFormat="SSCALED"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_UINT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="UINT"/>
+ <component name="G" bits="16" numericFormat="UINT"/>
+ <component name="B" bits="16" numericFormat="UINT"/>
+ <component name="A" bits="16" numericFormat="UINT"/>
+ <spirvimageformat name="Rgba16ui"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_SINT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SINT"/>
+ <component name="G" bits="16" numericFormat="SINT"/>
+ <component name="B" bits="16" numericFormat="SINT"/>
+ <component name="A" bits="16" numericFormat="SINT"/>
+ <spirvimageformat name="Rgba16i"/>
+ </format>
+ <format name="VK_FORMAT_R16G16B16A16_SFLOAT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SFLOAT"/>
+ <component name="G" bits="16" numericFormat="SFLOAT"/>
+ <component name="B" bits="16" numericFormat="SFLOAT"/>
+ <component name="A" bits="16" numericFormat="SFLOAT"/>
+ <spirvimageformat name="Rgba16f"/>
+ </format>
+ <format name="VK_FORMAT_R32_UINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="UINT"/>
+ <spirvimageformat name="R32ui"/>
+ </format>
+ <format name="VK_FORMAT_R32_SINT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SINT"/>
+ <spirvimageformat name="R32i"/>
+ </format>
+ <format name="VK_FORMAT_R32_SFLOAT" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SFLOAT"/>
+ <spirvimageformat name="R32f"/>
+ </format>
+ <format name="VK_FORMAT_R32G32_UINT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="UINT"/>
+ <component name="G" bits="32" numericFormat="UINT"/>
+ <spirvimageformat name="Rg32ui"/>
+ </format>
+ <format name="VK_FORMAT_R32G32_SINT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SINT"/>
+ <component name="G" bits="32" numericFormat="SINT"/>
+ <spirvimageformat name="Rg32i"/>
+ </format>
+ <format name="VK_FORMAT_R32G32_SFLOAT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SFLOAT"/>
+ <component name="G" bits="32" numericFormat="SFLOAT"/>
+ <spirvimageformat name="Rg32f"/>
+ </format>
+ <format name="VK_FORMAT_R32G32B32_UINT" class="96-bit" blockSize="12" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="UINT"/>
+ <component name="G" bits="32" numericFormat="UINT"/>
+ <component name="B" bits="32" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_R32G32B32_SINT" class="96-bit" blockSize="12" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SINT"/>
+ <component name="G" bits="32" numericFormat="SINT"/>
+ <component name="B" bits="32" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R32G32B32_SFLOAT" class="96-bit" blockSize="12" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SFLOAT"/>
+ <component name="G" bits="32" numericFormat="SFLOAT"/>
+ <component name="B" bits="32" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_R32G32B32A32_UINT" class="128-bit" blockSize="16" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="UINT"/>
+ <component name="G" bits="32" numericFormat="UINT"/>
+ <component name="B" bits="32" numericFormat="UINT"/>
+ <component name="A" bits="32" numericFormat="UINT"/>
+ <spirvimageformat name="Rgba32ui"/>
+ </format>
+ <format name="VK_FORMAT_R32G32B32A32_SINT" class="128-bit" blockSize="16" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SINT"/>
+ <component name="G" bits="32" numericFormat="SINT"/>
+ <component name="B" bits="32" numericFormat="SINT"/>
+ <component name="A" bits="32" numericFormat="SINT"/>
+ <spirvimageformat name="Rgba32i"/>
+ </format>
+ <format name="VK_FORMAT_R32G32B32A32_SFLOAT" class="128-bit" blockSize="16" texelsPerBlock="1">
+ <component name="R" bits="32" numericFormat="SFLOAT"/>
+ <component name="G" bits="32" numericFormat="SFLOAT"/>
+ <component name="B" bits="32" numericFormat="SFLOAT"/>
+ <component name="A" bits="32" numericFormat="SFLOAT"/>
+ <spirvimageformat name="Rgba32f"/>
+ </format>
+ <format name="VK_FORMAT_R64_UINT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="UINT"/>
+ <spirvimageformat name="R64ui"/>
+ </format>
+ <format name="VK_FORMAT_R64_SINT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SINT"/>
+ <spirvimageformat name="R64i"/>
+ </format>
+ <format name="VK_FORMAT_R64_SFLOAT" class="64-bit" blockSize="8" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64_UINT" class="128-bit" blockSize="16" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="UINT"/>
+ <component name="G" bits="64" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64_SINT" class="128-bit" blockSize="16" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SINT"/>
+ <component name="G" bits="64" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64_SFLOAT" class="128-bit" blockSize="16" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SFLOAT"/>
+ <component name="G" bits="64" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64B64_UINT" class="192-bit" blockSize="24" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="UINT"/>
+ <component name="G" bits="64" numericFormat="UINT"/>
+ <component name="B" bits="64" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64B64_SINT" class="192-bit" blockSize="24" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SINT"/>
+ <component name="G" bits="64" numericFormat="SINT"/>
+ <component name="B" bits="64" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64B64_SFLOAT" class="192-bit" blockSize="24" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SFLOAT"/>
+ <component name="G" bits="64" numericFormat="SFLOAT"/>
+ <component name="B" bits="64" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64B64A64_UINT" class="256-bit" blockSize="32" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="UINT"/>
+ <component name="G" bits="64" numericFormat="UINT"/>
+ <component name="B" bits="64" numericFormat="UINT"/>
+ <component name="A" bits="64" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64B64A64_SINT" class="256-bit" blockSize="32" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SINT"/>
+ <component name="G" bits="64" numericFormat="SINT"/>
+ <component name="B" bits="64" numericFormat="SINT"/>
+ <component name="A" bits="64" numericFormat="SINT"/>
+ </format>
+ <format name="VK_FORMAT_R64G64B64A64_SFLOAT" class="256-bit" blockSize="32" texelsPerBlock="1">
+ <component name="R" bits="64" numericFormat="SFLOAT"/>
+ <component name="G" bits="64" numericFormat="SFLOAT"/>
+ <component name="B" bits="64" numericFormat="SFLOAT"/>
+ <component name="A" bits="64" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_B10G11R11_UFLOAT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="B" bits="10" numericFormat="UFLOAT"/>
+ <component name="G" bits="11" numericFormat="UFLOAT"/>
+ <component name="R" bits="11" numericFormat="UFLOAT"/>
+ <spirvimageformat name="R11fG11fB10f"/>
+ </format>
+ <format name="VK_FORMAT_E5B9G9R9_UFLOAT_PACK32" class="32-bit" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="B" bits="9" numericFormat="UFLOAT"/>
+ <component name="G" bits="9" numericFormat="UFLOAT"/>
+ <component name="R" bits="9" numericFormat="UFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_D16_UNORM" class="D16" blockSize="2" texelsPerBlock="1">
+ <component name="D" bits="16" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_X8_D24_UNORM_PACK32" class="D24" blockSize="4" texelsPerBlock="1" packed="32">
+ <component name="D" bits="24" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_D32_SFLOAT" class="D32" blockSize="4" texelsPerBlock="1">
+ <component name="D" bits="32" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_S8_UINT" class="S8" blockSize="1" texelsPerBlock="1">
+ <component name="S" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_D16_UNORM_S8_UINT" class="D16S8" blockSize="3" texelsPerBlock="1">
+ <component name="D" bits="16" numericFormat="UNORM"/>
+ <component name="S" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_D24_UNORM_S8_UINT" class="D24S8" blockSize="4" texelsPerBlock="1">
+ <component name="D" bits="24" numericFormat="UNORM"/>
+ <component name="S" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_D32_SFLOAT_S8_UINT" class="D32S8" blockSize="5" texelsPerBlock="1">
+ <component name="D" bits="32" numericFormat="SFLOAT"/>
+ <component name="S" bits="8" numericFormat="UINT"/>
+ </format>
+ <format name="VK_FORMAT_BC1_RGB_UNORM_BLOCK" class="BC1_RGB" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC1_RGB_SRGB_BLOCK" class="BC1_RGB" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_BC1_RGBA_UNORM_BLOCK" class="BC1_RGBA" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC1_RGBA_SRGB_BLOCK" class="BC1_RGBA" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_BC2_UNORM_BLOCK" class="BC2" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC2_SRGB_BLOCK" class="BC2" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_BC3_UNORM_BLOCK" class="BC3" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC3_SRGB_BLOCK" class="BC3" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_BC4_UNORM_BLOCK" class="BC4" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC4_SNORM_BLOCK" class="BC4" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC5_UNORM_BLOCK" class="BC5" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC5_SNORM_BLOCK" class="BC5" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SNORM"/>
+ <component name="G" bits="compressed" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC6H_UFLOAT_BLOCK" class="BC6H" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="UFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="UFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_BC6H_SFLOAT_BLOCK" class="BC6H" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_BC7_UNORM_BLOCK" class="BC7" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_BC7_SRGB_BLOCK" class="BC7" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="BC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK" class="ETC2_RGB" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="ETC2">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK" class="ETC2_RGB" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="ETC2">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK" class="ETC2_RGBA" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="ETC2">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK" class="ETC2_RGBA" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="ETC2">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK" class="ETC2_EAC_RGBA" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="ETC2">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK" class="ETC2_EAC_RGBA" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="ETC2">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_EAC_R11_UNORM_BLOCK" class="EAC_R" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="EAC">
+ <component name="R" bits="11" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_EAC_R11_SNORM_BLOCK" class="EAC_R" blockSize="8" texelsPerBlock="16" blockExtent="4,4,1" compressed="EAC">
+ <component name="R" bits="11" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_EAC_R11G11_UNORM_BLOCK" class="EAC_RG" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="EAC">
+ <component name="R" bits="11" numericFormat="UNORM"/>
+ <component name="G" bits="11" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_EAC_R11G11_SNORM_BLOCK" class="EAC_RG" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="EAC">
+ <component name="R" bits="11" numericFormat="SNORM"/>
+ <component name="G" bits="11" numericFormat="SNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_4x4_UNORM_BLOCK" class="ASTC_4x4" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_4x4_SRGB_BLOCK" class="ASTC_4x4" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_5x4_UNORM_BLOCK" class="ASTC_5x4" blockSize="16" texelsPerBlock="20" blockExtent="5,4,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_5x4_SRGB_BLOCK" class="ASTC_5x4" blockSize="16" texelsPerBlock="20" blockExtent="5,4,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_5x5_UNORM_BLOCK" class="ASTC_5x5" blockSize="16" texelsPerBlock="25" blockExtent="5,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_5x5_SRGB_BLOCK" class="ASTC_5x5" blockSize="16" texelsPerBlock="25" blockExtent="5,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_6x5_UNORM_BLOCK" class="ASTC_6x5" blockSize="16" texelsPerBlock="30" blockExtent="6,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_6x5_SRGB_BLOCK" class="ASTC_6x5" blockSize="16" texelsPerBlock="30" blockExtent="6,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_6x6_UNORM_BLOCK" class="ASTC_6x6" blockSize="16" texelsPerBlock="36" blockExtent="6,6,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_6x6_SRGB_BLOCK" class="ASTC_6x6" blockSize="16" texelsPerBlock="36" blockExtent="6,6,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x5_UNORM_BLOCK" class="ASTC_8x5" blockSize="16" texelsPerBlock="40" blockExtent="8,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x5_SRGB_BLOCK" class="ASTC_8x5" blockSize="16" texelsPerBlock="40" blockExtent="8,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x6_UNORM_BLOCK" class="ASTC_8x6" blockSize="16" texelsPerBlock="48" blockExtent="8,6,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x6_SRGB_BLOCK" class="ASTC_8x6" blockSize="16" texelsPerBlock="48" blockExtent="8,6,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x8_UNORM_BLOCK" class="ASTC_8x8" blockSize="16" texelsPerBlock="64" blockExtent="8,8,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x8_SRGB_BLOCK" class="ASTC_8x8" blockSize="16" texelsPerBlock="64" blockExtent="8,8,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x5_UNORM_BLOCK" class="ASTC_10x5" blockSize="16" texelsPerBlock="50" blockExtent="10,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x5_SRGB_BLOCK" class="ASTC_10x5" blockSize="16" texelsPerBlock="50" blockExtent="10,5,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x6_UNORM_BLOCK" class="ASTC_10x6" blockSize="16" texelsPerBlock="60" blockExtent="10,6,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x6_SRGB_BLOCK" class="ASTC_10x6" blockSize="16" texelsPerBlock="60" blockExtent="10,6,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x8_UNORM_BLOCK" class="ASTC_10x8" blockSize="16" texelsPerBlock="80" blockExtent="10,8,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x8_SRGB_BLOCK" class="ASTC_10x8" blockSize="16" texelsPerBlock="80" blockExtent="10,8,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x10_UNORM_BLOCK" class="ASTC_10x10" blockSize="16" texelsPerBlock="100" blockExtent="10,10,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x10_SRGB_BLOCK" class="ASTC_10x10" blockSize="16" texelsPerBlock="100" blockExtent="10,10,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_12x10_UNORM_BLOCK" class="ASTC_12x10" blockSize="16" texelsPerBlock="120" blockExtent="12,10,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_12x10_SRGB_BLOCK" class="ASTC_12x10" blockSize="16" texelsPerBlock="120" blockExtent="12,10,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_12x12_UNORM_BLOCK" class="ASTC_12x12" blockSize="16" texelsPerBlock="144" blockExtent="12,12,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_12x12_SRGB_BLOCK" class="ASTC_12x12" blockSize="16" texelsPerBlock="144" blockExtent="12,12,1" compressed="ASTC LDR">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_G8B8G8R8_422_UNORM" class="32-bit G8B8G8R8" blockSize="4" texelsPerBlock="1" blockExtent="2,1,1" chroma="422">
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B8G8R8G8_422_UNORM" class="32-bit B8G8R8G8" blockSize="4" texelsPerBlock="1" blockExtent="2,1,1" chroma="422">
+ <component name="B" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ <component name="R" bits="8" numericFormat="UNORM"/>
+ <component name="G" bits="8" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM" class="8-bit 3-plane 420" blockSize="3" texelsPerBlock="1" chroma="420">
+ <component name="G" bits="8" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="8" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="2" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R8_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G8_B8R8_2PLANE_420_UNORM" class="8-bit 2-plane 420" blockSize="3" texelsPerBlock="1" chroma="420">
+ <component name="G" bits="8" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R8G8_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM" class="8-bit 3-plane 422" blockSize="3" texelsPerBlock="1" chroma="422">
+ <component name="G" bits="8" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="8" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="2" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G8_B8R8_2PLANE_422_UNORM" class="8-bit 2-plane 422" blockSize="3" texelsPerBlock="1" chroma="422">
+ <component name="G" bits="8" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R8G8_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM" class="8-bit 3-plane 444" blockSize="3" texelsPerBlock="1" chroma="444">
+ <component name="G" bits="8" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="8" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="2" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R10X6_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R10X6G10X6_UNORM_2PACK16" class="32-bit" blockSize="4" texelsPerBlock="1" packed="16">
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16" class="64-bit R10G10B10A10" blockSize="8" texelsPerBlock="1" packed="16" chroma="444">
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ <component name="B" bits="10" numericFormat="UNORM"/>
+ <component name="A" bits="10" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16" class="64-bit G10B10G10R10" blockSize="8" texelsPerBlock="1" blockExtent="2,1,1" packed="16" chroma="422">
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ <component name="B" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16" class="64-bit B10G10R10G10" blockSize="8" texelsPerBlock="1" blockExtent="2,1,1" packed="16" chroma="422">
+ <component name="B" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ <component name="R" bits="10" numericFormat="UNORM"/>
+ <component name="G" bits="10" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16" class="10-bit 3-plane 420" blockSize="6" texelsPerBlock="1" packed="16" chroma="420">
+ <component name="G" bits="10" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="10" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="2" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16" class="10-bit 2-plane 420" blockSize="6" texelsPerBlock="1" packed="16" chroma="420">
+ <component name="G" bits="10" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R10X6G10X6_UNORM_2PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16" class="10-bit 3-plane 422" blockSize="6" texelsPerBlock="1" packed="16" chroma="422">
+ <component name="G" bits="10" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="10" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="2" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16" class="10-bit 2-plane 422" blockSize="6" texelsPerBlock="1" packed="16" chroma="422">
+ <component name="G" bits="10" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R10X6G10X6_UNORM_2PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16" class="10-bit 3-plane 444" blockSize="6" texelsPerBlock="1" packed="16" chroma="444">
+ <component name="G" bits="10" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="10" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="2" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ </format>
+ <format name="VK_FORMAT_R12X4_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="R" bits="12" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R12X4G12X4_UNORM_2PACK16" class="32-bit" blockSize="4" texelsPerBlock="1" packed="16">
+ <component name="R" bits="12" numericFormat="UNORM"/>
+ <component name="G" bits="12" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16" class="64-bit R12G12B12A12" blockSize="8" texelsPerBlock="1" packed="16" chroma="444">
+ <component name="R" bits="12" numericFormat="UNORM"/>
+ <component name="G" bits="12" numericFormat="UNORM"/>
+ <component name="B" bits="12" numericFormat="UNORM"/>
+ <component name="A" bits="12" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16" class="64-bit G12B12G12R12" blockSize="8" texelsPerBlock="1" blockExtent="2,1,1" packed="16" chroma="422">
+ <component name="G" bits="12" numericFormat="UNORM"/>
+ <component name="B" bits="12" numericFormat="UNORM"/>
+ <component name="G" bits="12" numericFormat="UNORM"/>
+ <component name="R" bits="12" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16" class="64-bit B12G12R12G12" blockSize="8" texelsPerBlock="1" blockExtent="2,1,1" packed="16" chroma="422">
+ <component name="B" bits="12" numericFormat="UNORM"/>
+ <component name="G" bits="12" numericFormat="UNORM"/>
+ <component name="R" bits="12" numericFormat="UNORM"/>
+ <component name="G" bits="12" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16" class="12-bit 3-plane 420" blockSize="6" texelsPerBlock="1" packed="16" chroma="420">
+ <component name="G" bits="12" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="12" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="2" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16" class="12-bit 2-plane 420" blockSize="6" texelsPerBlock="1" packed="16" chroma="420">
+ <component name="G" bits="12" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R12X4G12X4_UNORM_2PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16" class="12-bit 3-plane 422" blockSize="6" texelsPerBlock="1" packed="16" chroma="422">
+ <component name="G" bits="12" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="12" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="2" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16" class="12-bit 2-plane 422" blockSize="6" texelsPerBlock="1" packed="16" chroma="422">
+ <component name="G" bits="12" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R12X4G12X4_UNORM_2PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16" class="12-bit 3-plane 444" blockSize="6" texelsPerBlock="1" packed="16" chroma="444">
+ <component name="G" bits="12" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="12" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="2" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G16B16G16R16_422_UNORM" class="64-bit G16B16G16R16" blockSize="8" texelsPerBlock="1" blockExtent="2,1,1" chroma="422">
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ <component name="B" bits="16" numericFormat="UNORM"/>
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ <component name="R" bits="16" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_B16G16R16G16_422_UNORM" class="64-bit B16G16R16G16" blockSize="8" texelsPerBlock="1" blockExtent="2,1,1" chroma="422">
+ <component name="B" bits="16" numericFormat="UNORM"/>
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ <component name="R" bits="16" numericFormat="UNORM"/>
+ <component name="G" bits="16" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM" class="16-bit 3-plane 420" blockSize="6" texelsPerBlock="1" chroma="420">
+ <component name="G" bits="16" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="16" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="2" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R16_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G16_B16R16_2PLANE_420_UNORM" class="16-bit 2-plane 420" blockSize="6" texelsPerBlock="1" chroma="420">
+ <component name="G" bits="16" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="2" compatible="VK_FORMAT_R16G16_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM" class="16-bit 3-plane 422" blockSize="6" texelsPerBlock="1" chroma="422">
+ <component name="G" bits="16" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="16" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="2" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G16_B16R16_2PLANE_422_UNORM" class="16-bit 2-plane 422" blockSize="6" texelsPerBlock="1" chroma="422">
+ <component name="G" bits="16" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="1" widthDivisor="2" heightDivisor="1" compatible="VK_FORMAT_R16G16_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM" class="16-bit 3-plane 444" blockSize="6" texelsPerBlock="1" chroma="444">
+ <component name="G" bits="16" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="16" numericFormat="UNORM" planeIndex="2"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="2" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG" class="PVRTC1_2BPP" blockSize="8" texelsPerBlock="1" blockExtent="8,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG" class="PVRTC1_4BPP" blockSize="8" texelsPerBlock="1" blockExtent="4,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG" class="PVRTC2_2BPP" blockSize="8" texelsPerBlock="1" blockExtent="8,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG" class="PVRTC2_4BPP" blockSize="8" texelsPerBlock="1" blockExtent="4,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="UNORM"/>
+ <component name="G" bits="compressed" numericFormat="UNORM"/>
+ <component name="B" bits="compressed" numericFormat="UNORM"/>
+ <component name="A" bits="compressed" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG" class="PVRTC1_2BPP" blockSize="8" texelsPerBlock="1" blockExtent="8,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG" class="PVRTC1_4BPP" blockSize="8" texelsPerBlock="1" blockExtent="4,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG" class="PVRTC2_2BPP" blockSize="8" texelsPerBlock="1" blockExtent="8,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG" class="PVRTC2_4BPP" blockSize="8" texelsPerBlock="1" blockExtent="4,4,1" compressed="PVRTC">
+ <component name="R" bits="compressed" numericFormat="SRGB"/>
+ <component name="G" bits="compressed" numericFormat="SRGB"/>
+ <component name="B" bits="compressed" numericFormat="SRGB"/>
+ <component name="A" bits="compressed" numericFormat="SRGB"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK" class="ASTC_4x4" blockSize="16" texelsPerBlock="16" blockExtent="4,4,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK" class="ASTC_5x4" blockSize="16" texelsPerBlock="20" blockExtent="5,4,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK" class="ASTC_5x5" blockSize="16" texelsPerBlock="25" blockExtent="5,5,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK" class="ASTC_6x5" blockSize="16" texelsPerBlock="30" blockExtent="6,5,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK" class="ASTC_6x6" blockSize="16" texelsPerBlock="36" blockExtent="6,6,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK" class="ASTC_8x5" blockSize="16" texelsPerBlock="40" blockExtent="8,5,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK" class="ASTC_8x6" blockSize="16" texelsPerBlock="48" blockExtent="8,6,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK" class="ASTC_8x8" blockSize="16" texelsPerBlock="64" blockExtent="8,8,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK" class="ASTC_10x5" blockSize="16" texelsPerBlock="50" blockExtent="10,5,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK" class="ASTC_10x6" blockSize="16" texelsPerBlock="60" blockExtent="10,6,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK" class="ASTC_10x8" blockSize="16" texelsPerBlock="80" blockExtent="10,8,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK" class="ASTC_10x10" blockSize="16" texelsPerBlock="100" blockExtent="10,10,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK" class="ASTC_12x10" blockSize="16" texelsPerBlock="120" blockExtent="12,10,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK" class="ASTC_12x12" blockSize="16" texelsPerBlock="144" blockExtent="12,12,1" compressed="ASTC HDR">
+ <component name="R" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="G" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="B" bits="compressed" numericFormat="SFLOAT"/>
+ <component name="A" bits="compressed" numericFormat="SFLOAT"/>
+ </format>
+ <format name="VK_FORMAT_G8_B8R8_2PLANE_444_UNORM" class="8-bit 2-plane 444" blockSize="3" texelsPerBlock="1" chroma="444">
+ <component name="G" bits="8" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="8" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8_UNORM"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R8G8_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16" class="10-bit 2-plane 444" blockSize="6" texelsPerBlock="1" packed="16" chroma="444">
+ <component name="G" bits="10" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="10" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R10X6G10X6_UNORM_2PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16" class="12-bit 2-plane 444" blockSize="6" texelsPerBlock="1" packed="16" chroma="444">
+ <component name="G" bits="12" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="12" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4_UNORM_PACK16"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R12X4G12X4_UNORM_2PACK16"/>
+ </format>
+ <format name="VK_FORMAT_G16_B16R16_2PLANE_444_UNORM" class="16-bit 2-plane 444" blockSize="6" texelsPerBlock="1" chroma="444">
+ <component name="G" bits="16" numericFormat="UNORM" planeIndex="0"/>
+ <component name="B" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <component name="R" bits="16" numericFormat="UNORM" planeIndex="1"/>
+ <plane index="0" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16_UNORM"/>
+ <plane index="1" widthDivisor="1" heightDivisor="1" compatible="VK_FORMAT_R16G16_UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A4R4G4B4_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="A" bits="4" numericFormat="UNORM"/>
+ <component name="R" bits="4" numericFormat="UNORM"/>
+ <component name="G" bits="4" numericFormat="UNORM"/>
+ <component name="B" bits="4" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_A4B4G4R4_UNORM_PACK16" class="16-bit" blockSize="2" texelsPerBlock="1" packed="16">
+ <component name="A" bits="4" numericFormat="UNORM"/>
+ <component name="B" bits="4" numericFormat="UNORM"/>
+ <component name="G" bits="4" numericFormat="UNORM"/>
+ <component name="R" bits="4" numericFormat="UNORM"/>
+ </format>
+ <format name="VK_FORMAT_R16G16_S10_5_NV" class="32-bit" blockSize="4" texelsPerBlock="1">
+ <component name="R" bits="16" numericFormat="SINT"/>
+ <component name="G" bits="16" numericFormat="SINT"/>
+ </format>
+ </formats>
<spirvextensions comment="SPIR-V Extensions allowed in Vulkan and what is required to use it">
<spirvextension name="SPV_KHR_variable_pointers">
- <enable version="VK_API_VERSION_1_1"/>
+ <enable version="VK_VERSION_1_1"/>
<enable extension="VK_KHR_variable_pointers"/>
</spirvextension>
<spirvextension name="SPV_AMD_shader_explicit_vertex_parameter">
@@ -16782,27 +25710,30 @@ typedef void <name>CAMetalLayer</name>;
<spirvextension name="SPV_AMD_texture_gather_bias_lod">
<enable extension="VK_AMD_texture_gather_bias_lod"/>
</spirvextension>
+ <spirvextension name="SPV_AMD_shader_early_and_late_fragment_tests">
+ <enable extension="VK_AMD_shader_early_and_late_fragment_tests"/>
+ </spirvextension>
<spirvextension name="SPV_KHR_shader_draw_parameters">
- <enable version="VK_API_VERSION_1_1"/>
+ <enable version="VK_VERSION_1_1"/>
<enable extension="VK_KHR_shader_draw_parameters"/>
</spirvextension>
<spirvextension name="SPV_KHR_8bit_storage">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_KHR_8bit_storage"/>
</spirvextension>
<spirvextension name="SPV_KHR_16bit_storage">
- <enable version="VK_API_VERSION_1_1"/>
+ <enable version="VK_VERSION_1_1"/>
<enable extension="VK_KHR_16bit_storage"/>
</spirvextension>
<spirvextension name="SPV_KHR_shader_clock">
<enable extension="VK_KHR_shader_clock"/>
</spirvextension>
<spirvextension name="SPV_KHR_float_controls">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_KHR_shader_float_controls"/>
</spirvextension>
<spirvextension name="SPV_KHR_storage_buffer_storage_class">
- <enable version="VK_API_VERSION_1_1"/>
+ <enable version="VK_VERSION_1_1"/>
<enable extension="VK_KHR_storage_buffer_storage_class"/>
</spirvextension>
<spirvextension name="SPV_KHR_post_depth_coverage">
@@ -16832,19 +25763,22 @@ typedef void <name>CAMetalLayer</name>;
<spirvextension name="SPV_NV_shader_subgroup_partitioned">
<enable extension="VK_NV_shader_subgroup_partitioned"/>
</spirvextension>
+ <spirvextension name="SPV_NV_shader_invocation_reorder">
+ <enable extension="VK_NV_ray_tracing_invocation_reorder"/>
+ </spirvextension>
<spirvextension name="SPV_EXT_shader_viewport_index_layer">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_EXT_shader_viewport_index_layer"/>
</spirvextension>
<spirvextension name="SPV_NVX_multiview_per_view_attributes">
<enable extension="VK_NVX_multiview_per_view_attributes"/>
</spirvextension>
<spirvextension name="SPV_EXT_descriptor_indexing">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_EXT_descriptor_indexing"/>
</spirvextension>
<spirvextension name="SPV_KHR_vulkan_memory_model">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_KHR_vulkan_memory_model"/>
</spirvextension>
<spirvextension name="SPV_NV_compute_shader_derivatives">
@@ -16868,6 +25802,9 @@ typedef void <name>CAMetalLayer</name>;
<spirvextension name="SPV_KHR_ray_query">
<enable extension="VK_KHR_ray_query"/>
</spirvextension>
+ <spirvextension name="SPV_KHR_ray_cull_mask">
+ <enable extension="VK_KHR_ray_tracing_maintenance1"/>
+ </spirvextension>
<spirvextension name="SPV_GOOGLE_hlsl_functionality1">
<enable extension="VK_GOOGLE_hlsl_functionality1"/>
</spirvextension>
@@ -16881,7 +25818,7 @@ typedef void <name>CAMetalLayer</name>;
<enable extension="VK_EXT_fragment_density_map"/>
</spirvextension>
<spirvextension name="SPV_KHR_physical_storage_buffer">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_KHR_buffer_device_address"/>
</spirvextension>
<spirvextension name="SPV_EXT_physical_storage_buffer">
@@ -16897,22 +25834,25 @@ typedef void <name>CAMetalLayer</name>;
<enable extension="VK_EXT_fragment_shader_interlock"/>
</spirvextension>
<spirvextension name="SPV_EXT_demote_to_helper_invocation">
+ <enable version="VK_VERSION_1_3"/>
<enable extension="VK_EXT_shader_demote_to_helper_invocation"/>
</spirvextension>
<spirvextension name="SPV_KHR_fragment_shading_rate">
<enable extension="VK_KHR_fragment_shading_rate"/>
</spirvextension>
<spirvextension name="SPV_KHR_non_semantic_info">
+ <enable version="VK_VERSION_1_3"/>
<enable extension="VK_KHR_shader_non_semantic_info"/>
</spirvextension>
<spirvextension name="SPV_EXT_shader_image_int64">
<enable extension="VK_EXT_shader_image_atomic_int64"/>
</spirvextension>
<spirvextension name="SPV_KHR_terminate_invocation">
+ <enable version="VK_VERSION_1_3"/>
<enable extension="VK_KHR_shader_terminate_invocation"/>
</spirvextension>
<spirvextension name="SPV_KHR_multiview">
- <enable version="VK_API_VERSION_1_1"/>
+ <enable version="VK_VERSION_1_1"/>
<enable extension="VK_KHR_multiview"/>
</spirvextension>
<spirvextension name="SPV_KHR_workgroup_memory_explicit_layout">
@@ -16921,7 +25861,11 @@ typedef void <name>CAMetalLayer</name>;
<spirvextension name="SPV_EXT_shader_atomic_float_add">
<enable extension="VK_EXT_shader_atomic_float"/>
</spirvextension>
+ <spirvextension name="SPV_KHR_fragment_shader_barycentric">
+ <enable extension="VK_KHR_fragment_shader_barycentric"/>
+ </spirvextension>
<spirvextension name="SPV_KHR_subgroup_uniform_control_flow">
+ <enable version="VK_VERSION_1_3"/>
<enable extension="VK_KHR_shader_subgroup_uniform_control_flow"/>
</spirvextension>
<spirvextension name="SPV_EXT_shader_atomic_float_min_max">
@@ -16930,37 +25874,102 @@ typedef void <name>CAMetalLayer</name>;
<spirvextension name="SPV_EXT_shader_atomic_float16_add">
<enable extension="VK_EXT_shader_atomic_float2"/>
</spirvextension>
+ <spirvextension name="SPV_NV_shader_atomic_fp16_vector">
+ <enable extension="VK_NV_shader_atomic_float16_vector"/>
+ </spirvextension>
+ <spirvextension name="SPV_EXT_fragment_fully_covered">
+ <enable extension="VK_EXT_conservative_rasterization"/>
+ </spirvextension>
<spirvextension name="SPV_KHR_integer_dot_product">
+ <enable version="VK_VERSION_1_3"/>
<enable extension="VK_KHR_shader_integer_dot_product"/>
</spirvextension>
+ <spirvextension name="SPV_INTEL_shader_integer_functions2">
+ <enable extension="VK_INTEL_shader_integer_functions2"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_device_group">
+ <enable version="VK_VERSION_1_1"/>
+ <enable extension="VK_KHR_device_group"/>
+ </spirvextension>
+ <spirvextension name="SPV_QCOM_image_processing">
+ <enable extension="VK_QCOM_image_processing"/>
+ </spirvextension>
+ <spirvextension name="SPV_QCOM_image_processing2">
+ <enable extension="VK_QCOM_image_processing2"/>
+ </spirvextension>
+ <spirvextension name="SPV_EXT_mesh_shader">
+ <enable extension="VK_EXT_mesh_shader"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_ray_tracing_position_fetch">
+ <enable extension="VK_KHR_ray_tracing_position_fetch"/>
+ </spirvextension>
+ <spirvextension name="SPV_EXT_shader_tile_image">
+ <enable extension="VK_EXT_shader_tile_image"/>
+ </spirvextension>
+ <spirvextension name="SPV_EXT_opacity_micromap">
+ <enable extension="VK_EXT_opacity_micromap"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_cooperative_matrix">
+ <enable extension="VK_KHR_cooperative_matrix"/>
+ </spirvextension>
+ <spirvextension name="SPV_ARM_core_builtins">
+ <enable extension="VK_ARM_shader_core_builtins"/>
+ </spirvextension>
+ <spirvextension name="SPV_AMDX_shader_enqueue">
+ <enable extension="VK_AMDX_shader_enqueue"/>
+ </spirvextension>
+ <spirvextension name="SPV_HUAWEI_cluster_culling_shader">
+ <enable extension="VK_HUAWEI_cluster_culling_shader"/>
+ </spirvextension>
+ <spirvextension name="SPV_HUAWEI_subpass_shading">
+ <enable extension="VK_HUAWEI_subpass_shading"/>
+ </spirvextension>
+ <spirvextension name="SPV_NV_ray_tracing_motion_blur">
+ <enable extension="VK_NV_ray_tracing_motion_blur"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_maximal_reconvergence">
+ <enable extension="VK_KHR_shader_maximal_reconvergence"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_subgroup_rotate">
+ <enable extension="VK_KHR_shader_subgroup_rotate"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_expect_assume">
+ <enable extension="VK_KHR_shader_expect_assume"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_float_controls2">
+ <enable extension="VK_KHR_shader_float_controls2"/>
+ </spirvextension>
+ <spirvextension name="SPV_KHR_quad_control">
+ <enable extension="VK_KHR_shader_quad_control"/>
+ </spirvextension>
</spirvextensions>
<spirvcapabilities comment="SPIR-V Capabilities allowed in Vulkan and what is required to use it">
<spirvcapability name="Matrix">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="Shader">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="InputAttachment">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="Sampled1D">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="Image1D">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="SampledBuffer">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="ImageBuffer">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="ImageQuery">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="DerivativeControl">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="Geometry">
<enable struct="VkPhysicalDeviceFeatures" feature="geometryShader" requires="VK_VERSION_1_0"/>
@@ -17005,6 +26014,9 @@ typedef void <name>CAMetalLayer</name>;
<enable struct="VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT" feature="shaderBufferFloat64AtomicMinMax" requires="VK_EXT_shader_atomic_float2"/>
<enable struct="VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT" feature="shaderSharedFloat64AtomicMinMax" requires="VK_EXT_shader_atomic_float2"/>
</spirvcapability>
+ <spirvcapability name="AtomicFloat16VectorNV">
+ <enable struct="VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV" feature="shaderFloat16VectorAtomics" requires="VK_NV_shader_atomic_float16_vector"/>
+ </spirvcapability>
<spirvcapability name="Int64ImageEXT">
<enable struct="VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT" feature="shaderImageInt64Atomics" requires="VK_EXT_shader_image_atomic_int64"/>
</spirvcapability>
@@ -17060,16 +26072,20 @@ typedef void <name>CAMetalLayer</name>;
<enable struct="VkPhysicalDeviceFeatures" feature="shaderStorageImageMultisample" requires="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="StorageImageExtendedFormats">
- <enable version="VK_API_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="InterpolationFunction">
<enable struct="VkPhysicalDeviceFeatures" feature="sampleRateShading" requires="VK_VERSION_1_0"/>
</spirvcapability>
<spirvcapability name="StorageImageReadWithoutFormat">
<enable struct="VkPhysicalDeviceFeatures" feature="shaderStorageImageReadWithoutFormat" requires="VK_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_3"/>
+ <enable extension="VK_KHR_format_feature_flags2"/>
</spirvcapability>
<spirvcapability name="StorageImageWriteWithoutFormat">
<enable struct="VkPhysicalDeviceFeatures" feature="shaderStorageImageWriteWithoutFormat" requires="VK_VERSION_1_0"/>
+ <enable version="VK_VERSION_1_3"/>
+ <enable extension="VK_KHR_format_feature_flags2"/>
</spirvcapability>
<spirvcapability name="MultiViewport">
<enable struct="VkPhysicalDeviceFeatures" feature="multiViewport" requires="VK_VERSION_1_0"/>
@@ -17084,7 +26100,7 @@ typedef void <name>CAMetalLayer</name>;
<enable struct="VkPhysicalDeviceMultiviewFeatures" feature="multiview" requires="VK_KHR_multiview"/>
</spirvcapability>
<spirvcapability name="DeviceGroup">
- <enable version="VK_API_VERSION_1_1"/>
+ <enable version="VK_VERSION_1_1"/>
<enable extension="VK_KHR_device_group"/>
</spirvcapability>
<spirvcapability name="VariablePointersStorageBuffer">
@@ -17187,7 +26203,7 @@ typedef void <name>CAMetalLayer</name>;
<enable extension="VK_EXT_post_depth_coverage"/>
</spirvcapability>
<spirvcapability name="ShaderNonUniform">
- <enable version="VK_API_VERSION_1_2"/>
+ <enable version="VK_VERSION_1_2"/>
<enable extension="VK_EXT_descriptor_indexing"/>
</spirvcapability>
<spirvcapability name="RuntimeDescriptorArray">
@@ -17223,6 +26239,9 @@ typedef void <name>CAMetalLayer</name>;
<spirvcapability name="StorageTexelBufferArrayNonUniformIndexing">
<enable struct="VkPhysicalDeviceVulkan12Features" feature="shaderStorageTexelBufferArrayNonUniformIndexing" requires="VK_VERSION_1_2,VK_EXT_descriptor_indexing"/>
</spirvcapability>
+ <spirvcapability name="FragmentFullyCoveredEXT">
+ <enable extension="VK_EXT_conservative_rasterization"/>
+ </spirvcapability>
<spirvcapability name="Float16">
<enable struct="VkPhysicalDeviceVulkan12Features" feature="shaderFloat16" requires="VK_VERSION_1_2,VK_KHR_shader_float16_int8"/>
<enable extension="VK_AMD_gpu_shader_half_float"/>
@@ -17296,6 +26315,10 @@ typedef void <name>CAMetalLayer</name>;
</spirvcapability>
<spirvcapability name="RayTraversalPrimitiveCullingKHR">
<enable struct="VkPhysicalDeviceRayTracingPipelineFeaturesKHR" feature="rayTraversalPrimitiveCulling" requires="VK_KHR_ray_tracing_pipeline"/>
+ <enable struct="VkPhysicalDeviceRayQueryFeaturesKHR" feature="rayQuery" requires="VK_KHR_ray_query"/>
+ </spirvcapability>
+ <spirvcapability name="RayCullMaskKHR">
+ <enable struct="VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR" feature="rayTracingMaintenance1" requires="VK_KHR_ray_tracing_maintenance1"/>
</spirvcapability>
<spirvcapability name="RayTracingNV">
<enable extension="VK_NV_ray_tracing"/>
@@ -17336,6 +26359,7 @@ typedef void <name>CAMetalLayer</name>;
<enable struct="VkPhysicalDeviceShadingRateImageFeaturesNV" feature="shadingRateImage" requires="VK_NV_shading_rate_image"/>
</spirvcapability>
<spirvcapability name="DemoteToHelperInvocationEXT">
+ <enable struct="VkPhysicalDeviceVulkan13Features" feature="shaderDemoteToHelperInvocation" requires="VK_VERSION_1_3,VK_EXT_shader_demote_to_helper_invocation"/>
<enable struct="VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT" feature="shaderDemoteToHelperInvocation" requires="VK_EXT_shader_demote_to_helper_invocation"/>
</spirvcapability>
<spirvcapability name="FragmentShadingRateKHR">
@@ -17353,16 +26377,418 @@ typedef void <name>CAMetalLayer</name>;
<enable struct="VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR" feature="workgroupMemoryExplicitLayout16BitAccess" requires="VK_KHR_workgroup_memory_explicit_layout"/>
</spirvcapability>
<spirvcapability name="DotProductInputAllKHR">
+ <enable struct="VkPhysicalDeviceVulkan13Features" feature="shaderIntegerDotProduct" requires="VK_VERSION_1_3,VK_KHR_shader_integer_dot_product"/>
<enable struct="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR" feature="shaderIntegerDotProduct" requires="VK_KHR_shader_integer_dot_product"/>
</spirvcapability>
<spirvcapability name="DotProductInput4x8BitKHR">
+ <enable struct="VkPhysicalDeviceVulkan13Features" feature="shaderIntegerDotProduct" requires="VK_VERSION_1_3,VK_KHR_shader_integer_dot_product"/>
<enable struct="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR" feature="shaderIntegerDotProduct" requires="VK_KHR_shader_integer_dot_product"/>
</spirvcapability>
<spirvcapability name="DotProductInput4x8BitPackedKHR">
+ <enable struct="VkPhysicalDeviceVulkan13Features" feature="shaderIntegerDotProduct" requires="VK_VERSION_1_3,VK_KHR_shader_integer_dot_product"/>
<enable struct="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR" feature="shaderIntegerDotProduct" requires="VK_KHR_shader_integer_dot_product"/>
</spirvcapability>
<spirvcapability name="DotProductKHR">
+ <enable struct="VkPhysicalDeviceVulkan13Features" feature="shaderIntegerDotProduct" requires="VK_VERSION_1_3,VK_KHR_shader_integer_dot_product"/>
<enable struct="VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR" feature="shaderIntegerDotProduct" requires="VK_KHR_shader_integer_dot_product"/>
</spirvcapability>
+ <spirvcapability name="FragmentBarycentricKHR">
+ <enable struct="VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR" feature="fragmentShaderBarycentric" requires="VK_KHR_fragment_shader_barycentric"/>
+ </spirvcapability>
+ <spirvcapability name="TextureSampleWeightedQCOM">
+ <enable struct="VkPhysicalDeviceImageProcessingFeaturesQCOM" feature="textureSampleWeighted" requires="VK_QCOM_image_processing"/>
+ </spirvcapability>
+ <spirvcapability name="TextureBoxFilterQCOM">
+ <enable struct="VkPhysicalDeviceImageProcessingFeaturesQCOM" feature="textureBoxFilter" requires="VK_QCOM_image_processing"/>
+ </spirvcapability>
+ <spirvcapability name="TextureBlockMatchQCOM">
+ <enable struct="VkPhysicalDeviceImageProcessingFeaturesQCOM" feature="textureBlockMatch" requires="VK_QCOM_image_processing"/>
+ </spirvcapability>
+ <spirvcapability name="TextureBlockMatch2QCOM">
+ <enable struct="VkPhysicalDeviceImageProcessing2FeaturesQCOM" feature="textureBlockMatch2" requires="VK_QCOM_image_processing2"/>
+ </spirvcapability>
+ <spirvcapability name="MeshShadingEXT">
+ <enable extension="VK_EXT_mesh_shader"/>
+ </spirvcapability>
+ <spirvcapability name="RayTracingOpacityMicromapEXT">
+ <enable extension="VK_EXT_opacity_micromap"/>
+ </spirvcapability>
+ <spirvcapability name="CoreBuiltinsARM">
+ <enable struct="VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM" feature="shaderCoreBuiltins" requires="VK_ARM_shader_core_builtins"/>
+ </spirvcapability>
+ <spirvcapability name="ShaderInvocationReorderNV">
+ <enable extension="VK_NV_ray_tracing_invocation_reorder"/>
+ </spirvcapability>
+ <spirvcapability name="ClusterCullingShadingHUAWEI">
+ <enable struct="VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI" feature="clustercullingShader" requires="VK_HUAWEI_cluster_culling_shader"/>
+ </spirvcapability>
+ <spirvcapability name="RayTracingPositionFetchKHR">
+ <enable struct="VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR" feature="rayTracingPositionFetch" requires="VK_KHR_ray_tracing_position_fetch"/>
+ </spirvcapability>
+ <spirvcapability name="TileImageColorReadAccessEXT">
+ <enable struct="VkPhysicalDeviceShaderTileImageFeaturesEXT" feature="shaderTileImageColorReadAccess" requires="VK_EXT_shader_tile_image"/>
+ </spirvcapability>
+ <spirvcapability name="TileImageDepthReadAccessEXT">
+ <enable struct="VkPhysicalDeviceShaderTileImageFeaturesEXT" feature="shaderTileImageDepthReadAccess" requires="VK_EXT_shader_tile_image"/>
+ </spirvcapability>
+ <spirvcapability name="TileImageStencilReadAccessEXT">
+ <enable struct="VkPhysicalDeviceShaderTileImageFeaturesEXT" feature="shaderTileImageStencilReadAccess" requires="VK_EXT_shader_tile_image"/>
+ </spirvcapability>
+ <spirvcapability name="CooperativeMatrixKHR">
+ <enable struct="VkPhysicalDeviceCooperativeMatrixFeaturesKHR" feature="cooperativeMatrix" requires="VK_KHR_cooperative_matrix"/>
+ </spirvcapability>
+ <spirvcapability name="ShaderEnqueueAMDX">
+ <enable struct="VkPhysicalDeviceShaderEnqueueFeaturesAMDX" feature="shaderEnqueue" requires="VK_AMDX_shader_enqueue"/>
+ </spirvcapability>
+ <spirvcapability name="GroupNonUniformRotateKHR">
+ <enable struct="VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR" feature="shaderSubgroupRotate" requires="VK_KHR_shader_subgroup_rotate"/>
+ </spirvcapability>
+ <spirvcapability name="ExpectAssumeKHR">
+ <enable struct="VkPhysicalDeviceShaderExpectAssumeFeaturesKHR" feature="shaderExpectAssume" requires="VK_KHR_shader_expect_assume"/>
+ </spirvcapability>
+ <spirvcapability name="FloatControls2">
+ <enable struct="VkPhysicalDeviceShaderFloatControls2FeaturesKHR" feature="shaderFloatControls2" requires="VK_KHR_shader_float_controls2"/>
+ </spirvcapability>
+ <spirvcapability name="QuadControlKHR">
+ <enable struct="VkPhysicalDeviceShaderQuadControlFeaturesKHR" feature="shaderQuadControl" requires="VK_KHR_shader_quad_control"/>
+ </spirvcapability>
+ <spirvcapability name="MaximallyReconvergesKHR">
+ <enable struct="VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR" feature="shaderMaximalReconvergence" requires="VK_KHR_shader_maximal_reconvergence"/>
+ </spirvcapability>
</spirvcapabilities>
+ <sync comment="Machine readable representation of the synchronization objects and their mappings">
+ <syncstage name="VK_PIPELINE_STAGE_2_NONE" alias="VK_PIPELINE_STAGE_NONE">
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT" alias="VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT">
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT" alias="VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT">
+ <syncsupport queues="graphics,compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT" alias="VK_PIPELINE_STAGE_VERTEX_INPUT_BIT">
+ <syncsupport queues="graphics"/>
+ <syncequivalent stage="VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT,VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT" alias="VK_PIPELINE_STAGE_VERTEX_SHADER_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT" alias="VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT" alias="VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT" alias="VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT" alias="VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT" alias="VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT" alias="VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT" alias="VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT" alias="VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT">
+ <syncsupport queues="compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT" alias="VK_PIPELINE_STAGE_TRANSFER_BIT">
+ <syncsupport queues="graphics,compute,transfer"/>
+ <syncequivalent stage="VK_PIPELINE_STAGE_2_COPY_BIT,VK_PIPELINE_STAGE_2_BLIT_BIT,VK_PIPELINE_STAGE_2_RESOLVE_BIT,VK_PIPELINE_STAGE_2_CLEAR_BIT,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT" alias="VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT">
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_HOST_BIT" alias="VK_PIPELINE_STAGE_HOST_BIT">
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT" alias="VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT">
+ <syncsupport queues="graphics"/>
+ <syncequivalent stage="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT,VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT,VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT,VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT,VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT,VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT" alias="VK_PIPELINE_STAGE_ALL_COMMANDS_BIT">
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_COPY_BIT">
+ <syncsupport queues="graphics,compute,transfer"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_RESOLVE_BIT">
+ <syncsupport queues="graphics,compute,transfer"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_BLIT_BIT">
+ <syncsupport queues="graphics,compute,transfer"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_CLEAR_BIT">
+ <syncsupport queues="graphics,compute,transfer"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT">
+ <syncsupport queues="graphics"/>
+ <syncequivalent stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR">
+ <syncsupport queues="decode"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR">
+ <syncsupport queues="encode"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT" alias="VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT" alias="VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT">
+ <syncsupport queues="graphics,compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV" alias="VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV">
+ <syncsupport queues="graphics,compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR" alias="VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR" alias="VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR">
+ <syncsupport queues="compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR" alias="VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR">
+ <syncsupport queues="compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT" alias="VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT" alias="VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT" alias="VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR">
+ <syncsupport queues="graphics,compute,transfer"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT">
+ <syncsupport queues="compute"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI">
+ <syncsupport queues="graphics"/>
+ </syncstage>
+ <syncstage name="VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV">
+ <syncsupport queues="opticalflow"/>
+ </syncstage>
+ <syncaccess name="VK_ACCESS_2_NONE" alias="VK_ACCESS_NONE">
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT" alias="VK_ACCESS_INDIRECT_COMMAND_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_INDEX_READ_BIT" alias="VK_ACCESS_INDEX_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT,VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT" alias="VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT,VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_UNIFORM_READ_BIT" alias="VK_ACCESS_UNIFORM_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT" alias="VK_ACCESS_INPUT_ATTACHMENT_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_SHADER_READ_BIT" alias="VK_ACCESS_SHADER_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT,VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ <syncequivalent access="VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,VK_ACCESS_2_SHADER_STORAGE_READ_BIT,VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_SHADER_WRITE_BIT" alias="VK_ACCESS_SHADER_WRITE_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ <syncequivalent access="VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT" alias="VK_ACCESS_COLOR_ATTACHMENT_READ_BIT">
+ <comment>Fragment shader stage is added by the VK_EXT_shader_tile_image extension</comment>
+ <syncsupport stage="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT" alias="VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT" alias="VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT">
+ <comment>Fragment shader stage is added by the VK_EXT_shader_tile_image extension</comment>
+ <syncsupport stage="VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT,VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT" alias="VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT,VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_TRANSFER_READ_BIT" alias="VK_ACCESS_TRANSFER_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT,VK_PIPELINE_STAGE_2_COPY_BIT,VK_PIPELINE_STAGE_2_RESOLVE_BIT,VK_PIPELINE_STAGE_2_BLIT_BIT,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR,VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_TRANSFER_WRITE_BIT" alias="VK_ACCESS_TRANSFER_WRITE_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT,VK_PIPELINE_STAGE_2_COPY_BIT,VK_PIPELINE_STAGE_2_RESOLVE_BIT,VK_PIPELINE_STAGE_2_BLIT_BIT,VK_PIPELINE_STAGE_2_CLEAR_BIT,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR,VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_HOST_READ_BIT" alias="VK_ACCESS_HOST_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_HOST_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_HOST_WRITE_BIT" alias="VK_ACCESS_HOST_WRITE_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_HOST_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_MEMORY_READ_BIT" alias="VK_ACCESS_MEMORY_READ_BIT">
+ <comment>TODO/Suggestion. Introduce 'synclist' (could be a different name) element
+ that specifies the list of stages, accesses, etc. This list can be used by
+ 'syncaccess' or 'syncstage' elements. For example, 'syncsupport' in addition to the
+ 'stage' attribute can support 'list' attribute to reference 'synclist'.
+ We can have the lists defined for ALL stages and it can be shared between MEMORY_READ
+ and MEMORY_WRITE accesses. Similarly, ALL shader stages list is often used. This proposal
+ is a way to fix duplication problem. When new stage is added multiple places needs to be
+ updated. It is potential source of bugs. The expectation such setup will produce more
+ robust system and also more simple structure to review and validate.
+ </comment>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_MEMORY_WRITE_BIT" alias="VK_ACCESS_MEMORY_WRITE_BIT">
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_SHADER_SAMPLED_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_SHADER_STORAGE_READ_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT" alias="VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT" alias="VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT,VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT" alias="VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT" alias="VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV" alias="VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV" alias="VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR" alias="VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR" alias="VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR" alias="VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT" alias="VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT" alias="VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_DESCRIPTOR_BUFFER_READ_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT,VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT,VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT,VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR,VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT,VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI,VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_MICROMAP_READ_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT,VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_OPTICAL_FLOW_READ_BIT_NV">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV"/>
+ </syncaccess>
+ <syncaccess name="VK_ACCESS_2_OPTICAL_FLOW_WRITE_BIT_NV">
+ <syncsupport stage="VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV"/>
+ </syncaccess>
+ <syncpipeline name="graphics primitive">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR</syncpipelinestage>
+ <syncpipelinestage order="None" before="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT">VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT</syncpipelinestage>
+ <syncpipelinestage order="None">VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="graphics mesh" depends="VK_NV_mesh_shader,VK_EXT_mesh_shader">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR</syncpipelinestage>
+ <syncpipelinestage order="None" before="VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT">VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT</syncpipelinestage>
+ <syncpipelinestage order="None">VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="compute">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT</syncpipelinestage>
+ <syncpipelinestage order="None">VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="transfer">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_TRANSFER_BIT</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="host">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_HOST_BIT</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="subpass shading" depends="VK_HUAWEI_subpass_shading">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="command preprocessing" depends="VK_NV_device_generated_commands">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="acceleration structure build" depends="VK_KHR_acceleration_structure,VK_NV_ray_tracing">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="acceleration structure copy" depends="VK_KHR_acceleration_structure,VK_NV_ray_tracing">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="opacity micromap" depends="VK_EXT_opacity_micromap">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="ray tracing" depends="VK_KHR_ray_tracing_pipeline,VK_NV_ray_tracing">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT</syncpipelinestage>
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="video decode" depends="VK_KHR_video_decode_queue">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="video encode" depends="VK_KHR_video_encode_queue">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR</syncpipelinestage>
+ </syncpipeline>
+ <syncpipeline name="optical flow" depends="VK_NV_optical_flow">
+ <syncpipelinestage>VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV</syncpipelinestage>
+ </syncpipeline>
+ </sync>
</registry>
diff --git a/src/vulkan/runtime/meson.build b/src/vulkan/runtime/meson.build
new file mode 100644
index 00000000000..762c29e97e7
--- /dev/null
+++ b/src/vulkan/runtime/meson.build
@@ -0,0 +1,326 @@
+# Copyright © 2017 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# Mesa-local imports in the Python files must be declared here for correct
+# dependency tracking.
+
+vulkan_lite_runtime_files = files(
+ 'rmv/vk_rmv_common.c',
+ 'rmv/vk_rmv_exporter.c',
+ 'vk_acceleration_structure.c',
+ 'vk_blend.c',
+ 'vk_buffer.c',
+ 'vk_buffer_view.c',
+ 'vk_cmd_copy.c',
+ 'vk_cmd_enqueue.c',
+ 'vk_command_buffer.c',
+ 'vk_command_pool.c',
+ 'vk_debug_report.c',
+ 'vk_debug_utils.c',
+ 'vk_deferred_operation.c',
+ 'vk_descriptor_set_layout.c',
+ 'vk_descriptors.c',
+ 'vk_descriptor_update_template.c',
+ 'vk_device.c',
+ 'vk_device_memory.c',
+ 'vk_fence.c',
+ 'vk_framebuffer.c',
+ 'vk_graphics_state.c',
+ 'vk_image.c',
+ 'vk_log.c',
+ 'vk_object.c',
+ 'vk_physical_device.c',
+ 'vk_pipeline_layout.c',
+ 'vk_query_pool.c',
+ 'vk_queue.c',
+ 'vk_render_pass.c',
+ 'vk_sampler.c',
+ 'vk_semaphore.c',
+ 'vk_standard_sample_locations.c',
+ 'vk_sync.c',
+ 'vk_sync_binary.c',
+ 'vk_sync_dummy.c',
+ 'vk_sync_timeline.c',
+ 'vk_synchronization.c',
+ 'vk_video.c',
+ 'vk_ycbcr_conversion.c',
+)
+
+vulkan_lite_runtime_deps = [
+ vulkan_wsi_deps,
+ idep_mesautil,
+ idep_nir_headers,
+ idep_vulkan_util,
+]
+
+if dep_libdrm.found()
+ vulkan_lite_runtime_files += files('vk_drm_syncobj.c')
+ vulkan_lite_runtime_deps += dep_libdrm
+endif
+
+if with_platform_android
+ vulkan_lite_runtime_files += files('vk_android.c')
+ vulkan_lite_runtime_deps += dep_android
+endif
+
+vk_common_entrypoints = custom_target(
+ 'vk_common_entrypoints',
+ input : [vk_entrypoints_gen, vk_api_xml],
+ output : ['vk_common_entrypoints.h', 'vk_common_entrypoints.c'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--proto', '--weak',
+ '--out-h', '@OUTPUT0@', '--out-c', '@OUTPUT1@', '--prefix', 'vk_common',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_entrypoints_gen_depend_files,
+)
+
+vk_cmd_queue = custom_target(
+ 'vk_cmd_queue',
+ input : [vk_cmd_queue_gen, vk_api_xml],
+ output : ['vk_cmd_queue.c', 'vk_cmd_queue.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@',
+ '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_cmd_queue_gen_depend_files,
+)
+
+vk_cmd_enqueue_entrypoints = custom_target(
+ 'vk_cmd_enqueue_entrypoints',
+ input : [vk_entrypoints_gen, vk_api_xml],
+ output : ['vk_cmd_enqueue_entrypoints.h', 'vk_cmd_enqueue_entrypoints.c'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--proto', '--weak',
+ '--out-h', '@OUTPUT0@', '--out-c', '@OUTPUT1@',
+ '--prefix', 'vk_cmd_enqueue', '--prefix', 'vk_cmd_enqueue_unless_primary',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_entrypoints_gen_depend_files,
+)
+
+vk_dispatch_trampolines = custom_target(
+ 'vk_dispatch_trampolines',
+ input : [vk_dispatch_trampolines_gen, vk_api_xml],
+ output : ['vk_dispatch_trampolines.c', 'vk_dispatch_trampolines.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@',
+ '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_dispatch_trampolines_gen_depend_files,
+)
+
+vk_physical_device_features = custom_target(
+ 'vk_physical_device_features',
+ input : [vk_physical_device_features_gen, vk_api_xml],
+ output : ['vk_physical_device_features.c', 'vk_physical_device_features.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@',
+ '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_physical_device_features_gen_depend_files,
+)
+
+vk_physical_device_properties = custom_target(
+ 'vk_physical_device_properties',
+ input : [vk_physical_device_properties_gen, vk_api_xml],
+ output : ['vk_physical_device_properties.c', 'vk_physical_device_properties.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@',
+ '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_physical_device_properties_gen_depend_files,
+)
+
+vk_synchronization_helpers = custom_target(
+ 'vk_synchronization_helpers',
+ input : [vk_synchronization_helpers_gen, vk_api_xml],
+ output : 'vk_synchronization_helpers.c',
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@',
+ '--out-c', '@OUTPUT0@',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_synchronization_helpers_gen_depend_files,
+)
+
+vk_format_info = custom_target(
+ 'vk_format_info',
+ input : ['vk_format_info_gen.py', vk_api_xml],
+ output : ['vk_format_info.c', 'vk_format_info.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@',
+ '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@'
+ ],
+)
+
+vulkan_lite_runtime_files += [
+ vk_cmd_enqueue_entrypoints,
+ vk_cmd_queue,
+ vk_common_entrypoints,
+ vk_dispatch_trampolines,
+ vk_format_info,
+ vk_physical_device_features,
+ vk_physical_device_properties,
+ vk_synchronization_helpers,
+]
+
+# as a runtime library dep to ensure header gen order
+vulkan_lite_runtime_header_gen_deps = declare_dependency(
+ sources : [
+ vk_cmd_enqueue_entrypoints[0],
+ vk_cmd_queue[1],
+ vk_common_entrypoints[0],
+ vk_dispatch_trampolines[1],
+ vk_format_info[1],
+ vk_physical_device_features[1],
+ vk_physical_device_properties[1],
+ ],
+)
+
+vulkan_lite_runtime_deps += vulkan_lite_runtime_header_gen_deps
+
+libvulkan_lite_runtime = static_library(
+ 'vulkan_lite_runtime',
+ vulkan_lite_runtime_files,
+ include_directories : [inc_include, inc_src],
+ dependencies : vulkan_lite_runtime_deps,
+ c_args : c_msvc_compat_args,
+ gnu_symbol_visibility : 'hidden',
+ build_by_default : false,
+)
+
+libvulkan_lite_instance = static_library(
+ 'vulkan_lite_instance',
+ ['vk_instance.c'],
+ include_directories : [inc_include, inc_src],
+ dependencies : vulkan_lite_runtime_deps,
+ c_args : ['-DVK_LITE_RUNTIME_INSTANCE=1', c_msvc_compat_args],
+ gnu_symbol_visibility : 'hidden',
+ build_by_default : false,
+)
+
+# The sources part is to ensure those generated headers used externally are
+# indeed generated before being compiled with, as long as either one of below
+# is included as a dependency:
+# - idep_vulkan_lite_runtime_headers
+# - idep_vulkan_lite_runtime
+# - idep_vulkan_runtime_headers
+# - idep_vulkan_runtime
+idep_vulkan_lite_runtime_headers = declare_dependency(
+ sources : [
+ vk_cmd_enqueue_entrypoints[0],
+ vk_cmd_queue[1],
+ vk_common_entrypoints[0],
+ vk_physical_device_features[1],
+ vk_physical_device_properties[1],
+ ],
+ include_directories : include_directories('.'),
+)
+
+# This is likely a bug in the Meson VS backend, as MSVC with ninja works fine.
+# See this discussion here:
+# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10506
+if get_option('backend').startswith('vs')
+ idep_vulkan_lite_runtime = declare_dependency(
+ link_with : [libvulkan_lite_runtime, libvulkan_lite_instance],
+ dependencies : idep_vulkan_lite_runtime_headers
+ )
+else
+ idep_vulkan_lite_runtime = declare_dependency(
+ # Instruct users of this library to link with --whole-archive. Otherwise,
+ # our weak function overloads may not resolve properly.
+ link_whole : [libvulkan_lite_runtime, libvulkan_lite_instance],
+ dependencies : idep_vulkan_lite_runtime_headers
+ )
+endif
+
+vulkan_runtime_files = files(
+ 'vk_meta.c',
+ 'vk_meta_blit_resolve.c',
+ 'vk_meta_clear.c',
+ 'vk_meta_draw_rects.c',
+ 'vk_nir.c',
+ 'vk_nir_convert_ycbcr.c',
+ 'vk_pipeline.c',
+ 'vk_pipeline_cache.c',
+ 'vk_shader.c',
+ 'vk_shader_module.c',
+ 'vk_texcompress_etc2.c',
+)
+
+vulkan_runtime_deps = [
+ vulkan_lite_runtime_deps,
+ idep_nir,
+ idep_vtn,
+]
+
+if prog_glslang.found()
+ vulkan_runtime_files += files('vk_texcompress_astc.c')
+ vulkan_runtime_files += custom_target(
+ 'astc_spv.h',
+ input : astc_decoder_glsl_file,
+ output : 'astc_spv.h',
+ command : [prog_glslang, '-V', '-S', 'comp', '-x', '-o', '@OUTPUT@', '@INPUT@'] + glslang_quiet,
+ )
+endif
+
+libvulkan_runtime = static_library(
+ 'vulkan_runtime',
+ [vulkan_runtime_files],
+ include_directories : [inc_include, inc_src],
+ dependencies : vulkan_runtime_deps,
+ c_args : c_msvc_compat_args,
+ gnu_symbol_visibility : 'hidden',
+ build_by_default : false,
+)
+
+libvulkan_instance = static_library(
+ 'vulkan_instance',
+ ['vk_instance.c'],
+ include_directories : [inc_include, inc_src],
+ dependencies : vulkan_runtime_deps,
+ c_args : ['-DVK_LITE_RUNTIME_INSTANCE=0', c_msvc_compat_args],
+ gnu_symbol_visibility : 'hidden',
+ build_by_default : false,
+)
+
+if get_option('backend').startswith('vs')
+ idep_vulkan_runtime_body = declare_dependency(
+ link_with : [libvulkan_lite_runtime, libvulkan_runtime, libvulkan_instance],
+ )
+else
+ idep_vulkan_runtime_body = declare_dependency(
+ link_whole : [libvulkan_lite_runtime, libvulkan_runtime, libvulkan_instance],
+ )
+endif
+
+idep_vulkan_runtime_headers = idep_vulkan_lite_runtime_headers
+
+idep_vulkan_runtime = declare_dependency(
+ dependencies : [
+ idep_vulkan_runtime_headers,
+ idep_vulkan_runtime_body,
+ ]
+)
diff --git a/src/vulkan/runtime/rmv/vk_rmv_common.c b/src/vulkan/runtime/rmv/vk_rmv_common.c
new file mode 100644
index 00000000000..48873d463c3
--- /dev/null
+++ b/src/vulkan/runtime/rmv/vk_rmv_common.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2022 Friedrich Vock
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_rmv_common.h"
+#include "vk_buffer.h"
+#include "vk_device.h"
+
+void
+vk_memory_trace_init(struct vk_device *device, const struct vk_rmv_device_info *device_info)
+{
+ device->memory_trace_data.device_info = *device_info;
+ device->memory_trace_data.is_enabled = true;
+ util_dynarray_init(&device->memory_trace_data.tokens, NULL);
+ simple_mtx_init(&device->memory_trace_data.token_mtx, mtx_plain);
+
+ device->memory_trace_data.next_resource_id = 1;
+ device->memory_trace_data.handle_table = _mesa_hash_table_u64_create(NULL);
+}
+
+void
+vk_memory_trace_finish(struct vk_device *device)
+{
+ if (!device->memory_trace_data.is_enabled)
+ return;
+ util_dynarray_foreach (&device->memory_trace_data.tokens, struct vk_rmv_token, token) {
+ switch (token->type) {
+ case VK_RMV_TOKEN_TYPE_RESOURCE_CREATE: {
+ struct vk_rmv_resource_create_token *create_token = &token->data.resource_create;
+ if (create_token->type == VK_RMV_RESOURCE_TYPE_DESCRIPTOR_POOL) {
+ free(create_token->descriptor_pool.pool_sizes);
+ }
+ break;
+ }
+ case VK_RMV_TOKEN_TYPE_USERDATA:
+ free(token->data.userdata.name);
+ break;
+ default:
+ break;
+ }
+ }
+ util_dynarray_fini(&device->memory_trace_data.tokens);
+ if (_mesa_hash_table_num_entries(device->memory_trace_data.handle_table->table))
+ fprintf(stderr,
+ "mesa: Unfreed resources detected at device destroy, there may be memory leaks!\n");
+ _mesa_hash_table_u64_destroy(device->memory_trace_data.handle_table);
+ device->memory_trace_data.is_enabled = false;
+}
+
+void
+vk_rmv_emit_token(struct vk_memory_trace_data *data, enum vk_rmv_token_type type, void *token_data)
+{
+ struct vk_rmv_token token;
+ token.type = type;
+ token.timestamp = (uint64_t)os_time_get_nano();
+ memcpy(&token.data, token_data, vk_rmv_token_size_from_type(type));
+ util_dynarray_append(&data->tokens, struct vk_rmv_token, token);
+}
+
+uint32_t
+vk_rmv_get_resource_id_locked(struct vk_device *device, uint64_t handle)
+{
+ void *entry = _mesa_hash_table_u64_search(device->memory_trace_data.handle_table, handle);
+ if (!entry) {
+ uint32_t id = device->memory_trace_data.next_resource_id++;
+ _mesa_hash_table_u64_insert(device->memory_trace_data.handle_table, handle,
+ (void *)(uintptr_t)id);
+ return id;
+ }
+ return (uint32_t)(uintptr_t)entry;
+}
+
+void
+vk_rmv_destroy_resource_id_locked(struct vk_device *device, uint64_t handle)
+{
+ _mesa_hash_table_u64_remove(device->memory_trace_data.handle_table, handle);
+}
+
+void
+vk_rmv_log_buffer_create(struct vk_device *device, bool is_internal, VkBuffer _buffer)
+{
+ if (!device->memory_trace_data.is_enabled)
+ return;
+
+ VK_FROM_HANDLE(vk_buffer, buffer, _buffer);
+ simple_mtx_lock(&device->memory_trace_data.token_mtx);
+ struct vk_rmv_resource_create_token token = {0};
+ token.is_driver_internal = is_internal;
+ token.resource_id = vk_rmv_get_resource_id_locked(device, (uint64_t)_buffer);
+ token.type = VK_RMV_RESOURCE_TYPE_BUFFER;
+ token.buffer.create_flags = buffer->create_flags;
+ token.buffer.size = buffer->size;
+ token.buffer.usage_flags = buffer->usage;
+
+ vk_rmv_emit_token(&device->memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
+ simple_mtx_unlock(&device->memory_trace_data.token_mtx);
+}
+
+void
+vk_rmv_log_cpu_map(struct vk_device *device, uint64_t va, bool is_unmap)
+{
+ if (!device->memory_trace_data.is_enabled)
+ return;
+
+ struct vk_rmv_cpu_map_token map_token;
+ map_token.address = va;
+ map_token.unmapped = is_unmap;
+
+ simple_mtx_lock(&device->memory_trace_data.token_mtx);
+ vk_rmv_emit_token(&device->memory_trace_data, VK_RMV_TOKEN_TYPE_CPU_MAP, &map_token);
+ simple_mtx_unlock(&device->memory_trace_data.token_mtx);
+}
+
+void
+vk_rmv_log_misc_token(struct vk_device *device, enum vk_rmv_misc_event_type type)
+{
+ if (!device->memory_trace_data.is_enabled)
+ return;
+
+ simple_mtx_lock(&device->memory_trace_data.token_mtx);
+ struct vk_rmv_misc_token token;
+ token.type = type;
+ vk_rmv_emit_token(&device->memory_trace_data, VK_RMV_TOKEN_TYPE_MISC, &token);
+ simple_mtx_unlock(&device->memory_trace_data.token_mtx);
+}
diff --git a/src/vulkan/runtime/rmv/vk_rmv_common.h b/src/vulkan/runtime/rmv/vk_rmv_common.h
new file mode 100644
index 00000000000..d4f0fb62f54
--- /dev/null
+++ b/src/vulkan/runtime/rmv/vk_rmv_common.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2022 Friedrich Vock
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_RMV_COMMON_H
+#define VK_RMV_COMMON_H
+
+#include <stdbool.h>
+#include "util/hash_table.h"
+#include "util/simple_mtx.h"
+#include "util/u_debug.h"
+#include "util/u_dynarray.h"
+#include <vulkan/vulkan_core.h>
+#include "vk_rmv_tokens.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_memory_trace_data;
+
+/*
+ * The different memory domains RMV supports.
+ */
+enum vk_rmv_memory_location {
+ /* DEVICE_LOCAL | HOST_VISIBLE */
+ VK_RMV_MEMORY_LOCATION_DEVICE,
+ /* DEVICE_LOCAL */
+ VK_RMV_MEMORY_LOCATION_DEVICE_INVISIBLE,
+ /* HOST_VISIBLE | HOST_COHERENT */
+ VK_RMV_MEMORY_LOCATION_HOST,
+
+ /* add above here */
+ VK_RMV_MEMORY_LOCATION_COUNT
+};
+
+/*
+ * Information about a memory domain.
+ */
+struct vk_rmv_memory_info {
+ uint64_t size;
+ uint64_t physical_base_address;
+};
+
+enum vk_rmv_memory_type {
+ VK_RMV_MEMORY_TYPE_UNKNOWN,
+ VK_RMV_MEMORY_TYPE_DDR2,
+ VK_RMV_MEMORY_TYPE_DDR3,
+ VK_RMV_MEMORY_TYPE_DDR4,
+ VK_RMV_MEMORY_TYPE_GDDR5,
+ VK_RMV_MEMORY_TYPE_GDDR6,
+ VK_RMV_MEMORY_TYPE_HBM,
+ VK_RMV_MEMORY_TYPE_HBM2,
+ VK_RMV_MEMORY_TYPE_HBM3,
+ VK_RMV_MEMORY_TYPE_LPDDR4,
+ VK_RMV_MEMORY_TYPE_LPDDR5,
+ VK_RMV_MEMORY_TYPE_DDR5
+};
+
+/*
+ * Device information for RMV traces.
+ */
+struct vk_rmv_device_info {
+ struct vk_rmv_memory_info memory_infos[VK_RMV_MEMORY_LOCATION_COUNT];
+
+ /* The memory type of dedicated VRAM. */
+ enum vk_rmv_memory_type vram_type;
+
+ char device_name[128];
+
+ uint32_t pcie_family_id;
+ uint32_t pcie_revision_id;
+ uint32_t pcie_device_id;
+ /* The minimum shader clock, in MHz. */
+ uint32_t minimum_shader_clock;
+ /* The maximum shader clock, in MHz. */
+ uint32_t maximum_shader_clock;
+ uint32_t vram_operations_per_clock;
+ uint32_t vram_bus_width;
+ /* The VRAM bandwidth, in GB/s (1 GB/s = 1000 MB/s). */
+ uint32_t vram_bandwidth;
+ /* The minimum memory clock, in MHz. */
+ uint32_t minimum_memory_clock;
+ /* The maximum memory clock, in MHz. */
+ uint32_t maximum_memory_clock;
+};
+
+struct vk_device;
+
+struct vk_memory_trace_data {
+ struct util_dynarray tokens;
+ simple_mtx_t token_mtx;
+
+ bool is_enabled;
+
+ struct vk_rmv_device_info device_info;
+
+ struct hash_table_u64 *handle_table;
+ uint32_t next_resource_id;
+};
+
+struct vk_device;
+
+void vk_memory_trace_init(struct vk_device *device, const struct vk_rmv_device_info *device_info);
+
+void vk_memory_trace_finish(struct vk_device *device);
+
+int vk_dump_rmv_capture(struct vk_memory_trace_data *data);
+
+void vk_rmv_emit_token(struct vk_memory_trace_data *data, enum vk_rmv_token_type type,
+ void *token_data);
+void vk_rmv_log_buffer_create(struct vk_device *device, bool is_internal, VkBuffer _buffer);
+void vk_rmv_log_cpu_map(struct vk_device *device, uint64_t va, bool is_unmap);
+void vk_rmv_log_misc_token(struct vk_device *device, enum vk_rmv_misc_event_type type);
+
+/* Retrieves the unique resource id for the resource specified by handle.
+ * Allocates a new id if none exists already.
+ * The memory trace mutex should be locked when entering this function. */
+uint32_t vk_rmv_get_resource_id_locked(struct vk_device *device, uint64_t handle);
+/* Destroys a resource id. If the same handle is allocated again, a new resource
+ * id is given to it.
+ * The memory trace mutex should be locked when entering this function. */
+void vk_rmv_destroy_resource_id_locked(struct vk_device *device, uint64_t handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/vulkan/runtime/rmv/vk_rmv_exporter.c b/src/vulkan/runtime/rmv/vk_rmv_exporter.c
new file mode 100644
index 00000000000..ebf13941011
--- /dev/null
+++ b/src/vulkan/runtime/rmv/vk_rmv_exporter.c
@@ -0,0 +1,1727 @@
+/*
+ * Copyright © 2022 Friedrich Vock
+ *
+ * Exporter based on Radeon Memory Visualizer code which is
+ *
+ * Copyright (c) 2017-2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_rmv_common.h"
+#include "vk_rmv_tokens.h"
+
+#include "util/format/u_format.h"
+#include "util/u_process.h"
+#include "vk_format.h"
+
+static int
+vk_rmv_token_compare(const void *first, const void *second)
+{
+ const struct vk_rmv_token *first_token = (struct vk_rmv_token *)first;
+ const struct vk_rmv_token *second_token = (struct vk_rmv_token *)second;
+ if (first_token->timestamp < second_token->timestamp)
+ return -1;
+ else if (first_token->timestamp > second_token->timestamp)
+ return 1;
+ return 0;
+}
+
+enum rmt_format {
+ RMT_FORMAT_UNDEFINED,
+ RMT_FORMAT_R1_UNORM,
+ RMT_FORMAT_R1_USCALED,
+ RMT_FORMAT_R4G4_UNORM,
+ RMT_FORMAT_R4G4_USCALED,
+ RMT_FORMAT_L4A4_UNORM,
+ RMT_FORMAT_R4G4B4A4_UNORM,
+ RMT_FORMAT_R4G4B4A4_USCALED,
+ RMT_FORMAT_R5G6B5_UNORM,
+ RMT_FORMAT_R5G6B5_USCALED,
+ RMT_FORMAT_R5G5B5A1_UNORM,
+ RMT_FORMAT_R5G5B5A1_USCALED,
+ RMT_FORMAT_R1G5B5A5_UNORM,
+ RMT_FORMAT_R1G5B5A5_USCALED,
+ RMT_FORMAT_R8_XNORM,
+ RMT_FORMAT_R8_SNORM,
+ RMT_FORMAT_R8_USCALED,
+ RMT_FORMAT_R8_SSCALED,
+ RMT_FORMAT_R8_UINT,
+ RMT_FORMAT_R8_SINT,
+ RMT_FORMAT_R8_SRGB,
+ RMT_FORMAT_A8_UNORM,
+ RMT_FORMAT_L8_UNORM,
+ RMT_FORMAT_P8_UINT,
+ RMT_FORMAT_R8G8_UNORM,
+ RMT_FORMAT_R8G8_SNORM,
+ RMT_FORMAT_R8G8_USCALED,
+ RMT_FORMAT_R8G8_SSCALED,
+ RMT_FORMAT_R8G8_UINT,
+ RMT_FORMAT_R8G8_SINT,
+ RMT_FORMAT_R8G8_SRGB,
+ RMT_FORMAT_L8A8_UNORM,
+ RMT_FORMAT_R8G8B8A8_UNORM,
+ RMT_FORMAT_R8G8B8A8_SNORM,
+ RMT_FORMAT_R8G8B8A8_USCALED,
+ RMT_FORMAT_R8G8B8A8_SSCALED,
+ RMT_FORMAT_R8G8B8A8_UINT,
+ RMT_FORMAT_R8G8B8A8_SINT,
+ RMT_FORMAT_R8G8B8A8_SRGB,
+ RMT_FORMAT_U8V8_SNORM_L8W8_UNORM,
+ RMT_FORMAT_R10G11B11_FLOAT,
+ RMT_FORMAT_R11G11B10_FLOAT,
+ RMT_FORMAT_R10G10B10A2_UNORM,
+ RMT_FORMAT_R10G10B10A2_SNORM,
+ RMT_FORMAT_R10G10B10A2_USCALED,
+ RMT_FORMAT_R10G10B10A2_SSCALED,
+ RMT_FORMAT_R10G10B10A2_UINT,
+ RMT_FORMAT_R10G10B10A2_SINT,
+ RMT_FORMAT_R10G10B10A2_BIAS_UNORM,
+ RMT_FORMAT_U10V10W10_SNORMA2_UNORM,
+ RMT_FORMAT_R16_UNORM,
+ RMT_FORMAT_R16_SNORM,
+ RMT_FORMAT_R16_USCALED,
+ RMT_FORMAT_R16_SSCALED,
+ RMT_FORMAT_R16_UINT,
+ RMT_FORMAT_R16_SINT,
+ RMT_FORMAT_R16_FLOAT,
+ RMT_FORMAT_L16_UNORM,
+ RMT_FORMAT_R16G16_UNORM,
+ RMT_FORMAT_R16G16_SNORM,
+ RMT_FORMAT_R16G16_USCALED,
+ RMT_FORMAT_R16G16_SSCALED,
+ RMT_FORMAT_R16G16_UINT,
+ RMT_FORMAT_R16G16_SINT,
+ RMT_FORMAT_R16G16_FLOAT,
+ RMT_FORMAT_R16G16B16A16_UNORM,
+ RMT_FORMAT_R16G16B16A16_SNORM,
+ RMT_FORMAT_R16G16B16A16_USCALED,
+ RMT_FORMAT_R16G16B16A16_SSCALED,
+ RMT_FORMAT_R16G16B16A16_UINT,
+ RMT_FORMAT_R16G16B16A16_SINT,
+ RMT_FORMAT_R16G16B16A16_FLOAT,
+ RMT_FORMAT_R32_UINT,
+ RMT_FORMAT_R32_SINT,
+ RMT_FORMAT_R32_FLOAT,
+ RMT_FORMAT_R32G32_UINT,
+ RMT_FORMAT_R32G32_SINT,
+ RMT_FORMAT_R32G32_FLOAT,
+ RMT_FORMAT_R32G32B32_UINT,
+ RMT_FORMAT_R32G32B32_SINT,
+ RMT_FORMAT_R32G32B32_FLOAT,
+ RMT_FORMAT_R32G32B32A32_UINT,
+ RMT_FORMAT_R32G32B32A32_SINT,
+ RMT_FORMAT_R32G32B32A32_FLOAT,
+ RMT_FORMAT_D16_UNORM_S8_UINT,
+ RMT_FORMAT_D32_UNORM_S8_UINT,
+ RMT_FORMAT_R9G9B9E5_FLOAT,
+ RMT_FORMAT_BC1_UNORM,
+ RMT_FORMAT_BC1_SRGB,
+ RMT_FORMAT_BC2_UNORM,
+ RMT_FORMAT_BC2_SRGB,
+ RMT_FORMAT_BC3_UNORM,
+ RMT_FORMAT_BC3_SRGB,
+ RMT_FORMAT_BC4_UNORM,
+ RMT_FORMAT_BC4_SRGB,
+ RMT_FORMAT_BC5_UNORM,
+ RMT_FORMAT_BC5_SRGB,
+ RMT_FORMAT_BC6_UNORM,
+ RMT_FORMAT_BC6_SRGB,
+ RMT_FORMAT_BC7_UNORM,
+ RMT_FORMAT_BC7_SRGB,
+ RMT_FORMAT_ETC2_R8G8B8_UNORM,
+ RMT_FORMAT_ETC2_R8G8B8_SRGB,
+ RMT_FORMAT_ETC2_R8G8B8A1_UNORM,
+ RMT_FORMAT_ETC2_R8G8B8A1_SRGB,
+ RMT_FORMAT_ETC2_R8G8B8A8_UNORM,
+ RMT_FORMAT_ETC2_R8G8B8A8_SRGB,
+ RMT_FORMAT_ETC2_R11_UNORM,
+ RMT_FORMAT_ETC2_R11_SNORM,
+ RMT_FORMAT_ETC2_R11G11_UNORM,
+ RMT_FORMAT_ETC2_R11G11_SNORM,
+ RMT_FORMAT_ASTCLD_R4X4_UNORM,
+ RMT_FORMAT_ASTCLD_R4X4_SRGB,
+ RMT_FORMAT_ASTCLD_R5X4_UNORM,
+ RMT_FORMAT_ASTCLD_R5X4_SRGB,
+ RMT_FORMAT_ASTCLD_R5X5_UNORM,
+ RMT_FORMAT_ASTCLD_R5X5_SRGB,
+ RMT_FORMAT_ASTCLD_R6X5_UNORM,
+ RMT_FORMAT_ASTCLD_R6X5_SRGB,
+ RMT_FORMAT_ASTCLD_R6X6_UNORM,
+ RMT_FORMAT_ASTCLD_R6X6_SRGB,
+ RMT_FORMAT_ASTCLD_R8X5_UNORM,
+ RMT_FORMAT_ASTCLD_R8X5_SRGB,
+ RMT_FORMAT_ASTCLD_R8X6_UNORM,
+ RMT_FORMAT_ASTCLD_R8X6_SRGB,
+ RMT_FORMAT_ASTCLD_R8X8_UNORM,
+ RMT_FORMAT_ASTCLD_R8X8_SRGB,
+ RMT_FORMAT_ASTCLD_R10X5_UNORM,
+ RMT_FORMAT_ASTCLD_R10X5_SRGB,
+ RMT_FORMAT_ASTCLD_R10X6_UNORM,
+ RMT_FORMAT_ASTCLD_R10X6_SRGB,
+ RMT_FORMAT_ASTCLD_R10X8_UNORM,
+ RMT_FORMAT_ASTCLD_R10X10_UNORM,
+ RMT_FORMAT_ASTCLD_R12X10_UNORM,
+ RMT_FORMAT_ASTCLD_R12X10_SRGB,
+ RMT_FORMAT_ASTCLD_R12X12_UNORM,
+ RMT_FORMAT_ASTCLD_R12X12_SRGB,
+ RMT_FORMAT_ASTCHD_R4x4_FLOAT,
+ RMT_FORMAT_ASTCHD_R5x4_FLOAT,
+ RMT_FORMAT_ASTCHD_R5x5_FLOAT,
+ RMT_FORMAT_ASTCHD_R6x5_FLOAT,
+ RMT_FORMAT_ASTCHD_R6x6_FLOAT,
+ RMT_FORMAT_ASTCHD_R8x5_FLOAT,
+ RMT_FORMAT_ASTCHD_R8x6_FLOAT,
+ RMT_FORMAT_ASTCHD_R8x8_FLOAT,
+ RMT_FORMAT_ASTCHD_R10x5_FLOAT,
+ RMT_FORMAT_ASTCHD_R10x6_FLOAT,
+ RMT_FORMAT_ASTCHD_R10x8_FLOAT,
+ RMT_FORMAT_ASTCHD_R10x10_FLOAT,
+ RMT_FORMAT_ASTCHD_R12x10_FLOAT,
+ RMT_FORMAT_ASTCHD_R12x12_FLOAT,
+ RMT_FORMAT_R8G8B8G8_UNORM,
+ RMT_FORMAT_R8G8B8G8_USCALED,
+ RMT_FORMAT_G8R8G8B8_UNORM,
+ RMT_FORMAT_G8R8G8B8_USCALED,
+ RMT_FORMAT_AYUV,
+ RMT_FORMAT_UYVY,
+ RMT_FORMAT_VYUY,
+ RMT_FORMAT_YUY2,
+ RMT_FORMAT_YVY2,
+ RMT_FORMAT_YV12,
+ RMT_FORMAT_NV11,
+ RMT_FORMAT_NV12,
+ RMT_FORMAT_NV21,
+ RMT_FORMAT_P016,
+ RMT_FORMAT_P010,
+};
+
+enum rmt_swizzle {
+ RMT_SWIZZLE_ZERO,
+ RMT_SWIZZLE_ONE,
+ RMT_SWIZZLE_R,
+ RMT_SWIZZLE_G,
+ RMT_SWIZZLE_B,
+ RMT_SWIZZLE_A,
+};
+
+static inline enum rmt_format
+vk_to_rmt_format(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_R8_UNORM:
+ return RMT_FORMAT_A8_UNORM;
+ case VK_FORMAT_R8_SNORM:
+ return RMT_FORMAT_R8_SNORM;
+ case VK_FORMAT_R8_USCALED:
+ return RMT_FORMAT_R8_USCALED;
+ case VK_FORMAT_R8_SSCALED:
+ return RMT_FORMAT_R8_SSCALED;
+ case VK_FORMAT_R8_UINT:
+ return RMT_FORMAT_R8_UINT;
+ case VK_FORMAT_R8_SINT:
+ return RMT_FORMAT_R8_SINT;
+ case VK_FORMAT_R8_SRGB:
+ return RMT_FORMAT_R8_SRGB;
+ case VK_FORMAT_R8G8_UNORM:
+ return RMT_FORMAT_R8G8_UNORM;
+ case VK_FORMAT_R8G8_SNORM:
+ return RMT_FORMAT_R8G8_SNORM;
+ case VK_FORMAT_R8G8_USCALED:
+ return RMT_FORMAT_R8G8_USCALED;
+ case VK_FORMAT_R8G8_SSCALED:
+ return RMT_FORMAT_R8G8_SSCALED;
+ case VK_FORMAT_R8G8_UINT:
+ return RMT_FORMAT_R8G8_UINT;
+ case VK_FORMAT_R8G8_SINT:
+ return RMT_FORMAT_R8G8_SINT;
+ case VK_FORMAT_R8G8_SRGB:
+ return RMT_FORMAT_R8G8_SRGB;
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+ return RMT_FORMAT_R8G8B8A8_UNORM;
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ case VK_FORMAT_B8G8R8A8_SNORM:
+ case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+ return RMT_FORMAT_R8G8B8A8_SNORM;
+ case VK_FORMAT_R8G8B8A8_USCALED:
+ case VK_FORMAT_B8G8R8A8_USCALED:
+ case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+ return RMT_FORMAT_R8G8B8A8_USCALED;
+ case VK_FORMAT_R8G8B8A8_SSCALED:
+ case VK_FORMAT_B8G8R8A8_SSCALED:
+ case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+ return RMT_FORMAT_R8G8B8A8_SSCALED;
+ case VK_FORMAT_R8G8B8A8_UINT:
+ case VK_FORMAT_B8G8R8A8_UINT:
+ case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+ return RMT_FORMAT_R8G8B8A8_UINT;
+ case VK_FORMAT_R8G8B8A8_SINT:
+ case VK_FORMAT_B8G8R8A8_SINT:
+ case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+ return RMT_FORMAT_R8G8B8A8_SINT;
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+ return RMT_FORMAT_R8G8B8A8_SRGB;
+ case VK_FORMAT_R16_UNORM:
+ return RMT_FORMAT_R16_UNORM;
+ case VK_FORMAT_R16_SNORM:
+ return RMT_FORMAT_R16_SNORM;
+ case VK_FORMAT_R16_USCALED:
+ return RMT_FORMAT_R16_USCALED;
+ case VK_FORMAT_R16_SSCALED:
+ return RMT_FORMAT_R16_SSCALED;
+ case VK_FORMAT_R16_UINT:
+ return RMT_FORMAT_R16_UINT;
+ case VK_FORMAT_R16_SINT:
+ return RMT_FORMAT_R16_SINT;
+ case VK_FORMAT_R16G16_UNORM:
+ return RMT_FORMAT_R16G16_UNORM;
+ case VK_FORMAT_R16G16_SNORM:
+ return RMT_FORMAT_R16G16_SNORM;
+ case VK_FORMAT_R16G16_USCALED:
+ return RMT_FORMAT_R16G16_USCALED;
+ case VK_FORMAT_R16G16_SSCALED:
+ return RMT_FORMAT_R16G16_SSCALED;
+ case VK_FORMAT_R16G16_UINT:
+ return RMT_FORMAT_R16G16_UINT;
+ case VK_FORMAT_R16G16_SINT:
+ return RMT_FORMAT_R16G16_SINT;
+ case VK_FORMAT_R16G16_SFLOAT:
+ return RMT_FORMAT_R16G16_FLOAT;
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ return RMT_FORMAT_R16G16B16A16_UNORM;
+ case VK_FORMAT_R16G16B16A16_SNORM:
+ return RMT_FORMAT_R16G16B16A16_SNORM;
+ case VK_FORMAT_R16G16B16A16_USCALED:
+ return RMT_FORMAT_R16G16B16A16_USCALED;
+ case VK_FORMAT_R16G16B16A16_SSCALED:
+ return RMT_FORMAT_R16G16B16A16_SSCALED;
+ case VK_FORMAT_R16G16B16A16_UINT:
+ return RMT_FORMAT_R16G16B16A16_UINT;
+ case VK_FORMAT_R16G16B16A16_SINT:
+ return RMT_FORMAT_R16G16B16A16_SINT;
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ return RMT_FORMAT_R16G16B16A16_FLOAT;
+ case VK_FORMAT_R32_UINT:
+ return RMT_FORMAT_R32_UINT;
+ case VK_FORMAT_R32_SINT:
+ return RMT_FORMAT_R32_SINT;
+ case VK_FORMAT_R32_SFLOAT:
+ return RMT_FORMAT_R32_FLOAT;
+ case VK_FORMAT_R32G32_UINT:
+ return RMT_FORMAT_R32G32_UINT;
+ case VK_FORMAT_R32G32_SINT:
+ return RMT_FORMAT_R32G32_SINT;
+ case VK_FORMAT_R32G32_SFLOAT:
+ return RMT_FORMAT_R32G32_FLOAT;
+ case VK_FORMAT_R32G32B32_UINT:
+ return RMT_FORMAT_R32G32B32_UINT;
+ case VK_FORMAT_R32G32B32_SINT:
+ return RMT_FORMAT_R32G32B32_SINT;
+ case VK_FORMAT_R32G32B32_SFLOAT:
+ return RMT_FORMAT_R32G32B32_FLOAT;
+ case VK_FORMAT_R32G32B32A32_UINT:
+ return RMT_FORMAT_R32G32B32A32_UINT;
+ case VK_FORMAT_R32G32B32A32_SINT:
+ return RMT_FORMAT_R32G32B32A32_SINT;
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
+ return RMT_FORMAT_R32G32B32A32_FLOAT;
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ return RMT_FORMAT_D16_UNORM_S8_UINT;
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return RMT_FORMAT_D32_UNORM_S8_UINT;
+ case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ return RMT_FORMAT_BC1_UNORM;
+ case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ return RMT_FORMAT_BC1_SRGB;
+ case VK_FORMAT_BC2_UNORM_BLOCK:
+ return RMT_FORMAT_BC2_UNORM;
+ case VK_FORMAT_BC2_SRGB_BLOCK:
+ return RMT_FORMAT_BC2_SRGB;
+ case VK_FORMAT_BC3_UNORM_BLOCK:
+ return RMT_FORMAT_BC3_UNORM;
+ case VK_FORMAT_BC3_SRGB_BLOCK:
+ return RMT_FORMAT_BC3_SRGB;
+ case VK_FORMAT_BC4_UNORM_BLOCK:
+ return RMT_FORMAT_BC4_UNORM;
+ case VK_FORMAT_BC5_UNORM_BLOCK:
+ return RMT_FORMAT_BC5_UNORM;
+ case VK_FORMAT_BC7_UNORM_BLOCK:
+ return RMT_FORMAT_BC7_UNORM;
+ case VK_FORMAT_BC7_SRGB_BLOCK:
+ return RMT_FORMAT_BC7_SRGB;
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ return RMT_FORMAT_ETC2_R8G8B8_UNORM;
+ case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ return RMT_FORMAT_ETC2_R8G8B8_SRGB;
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ return RMT_FORMAT_ETC2_R8G8B8A1_UNORM;
+ case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ return RMT_FORMAT_ETC2_R8G8B8A1_SRGB;
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ return RMT_FORMAT_ETC2_R8G8B8A8_UNORM;
+ case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ return RMT_FORMAT_ETC2_R8G8B8A8_SRGB;
+ case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R4X4_UNORM;
+ case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R4X4_SRGB;
+ case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R5X4_UNORM;
+ case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R5X4_SRGB;
+ case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R5X5_UNORM;
+ case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R5X5_SRGB;
+ case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R6X5_UNORM;
+ case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R6X5_SRGB;
+ case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R6X6_UNORM;
+ case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R6X6_SRGB;
+ case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R8X5_UNORM;
+ case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R8X5_SRGB;
+ case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R8X6_UNORM;
+ case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R8X6_SRGB;
+ case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R8X8_UNORM;
+ case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R8X8_SRGB;
+ case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R10X5_UNORM;
+ case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R10X5_SRGB;
+ case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R10X6_UNORM;
+ case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R10X6_SRGB;
+ case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R10X8_UNORM;
+ case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R10X10_UNORM;
+ case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R12X10_UNORM;
+ case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R12X10_SRGB;
+ case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ return RMT_FORMAT_ASTCLD_R12X12_UNORM;
+ case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ return RMT_FORMAT_ASTCLD_R12X12_SRGB;
+ case VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R4x4_FLOAT;
+ case VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R5x4_FLOAT;
+ case VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R5x5_FLOAT;
+ case VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R6x5_FLOAT;
+ case VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R6x6_FLOAT;
+ case VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R8x5_FLOAT;
+ case VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R8x6_FLOAT;
+ case VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R8x8_FLOAT;
+ case VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R10x5_FLOAT;
+ case VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R10x6_FLOAT;
+ case VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R10x8_FLOAT;
+ case VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R10x10_FLOAT;
+ case VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R12x10_FLOAT;
+ case VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK:
+ return RMT_FORMAT_ASTCHD_R12x12_FLOAT;
+ default:
+ return RMT_FORMAT_UNDEFINED;
+ }
+}
+
+static void
+rmt_format_to_swizzle(VkFormat format, enum rmt_swizzle *swizzles)
+{
+ const struct util_format_description *description =
+ util_format_description(vk_format_to_pipe_format(format));
+ for (unsigned i = 0; i < 4; ++i) {
+ switch (description->swizzle[i]) {
+ case PIPE_SWIZZLE_X:
+ swizzles[i] = RMT_SWIZZLE_R;
+ break;
+ case PIPE_SWIZZLE_Y:
+ swizzles[i] = RMT_SWIZZLE_G;
+ break;
+ case PIPE_SWIZZLE_Z:
+ swizzles[i] = RMT_SWIZZLE_B;
+ break;
+ case PIPE_SWIZZLE_W:
+ swizzles[i] = RMT_SWIZZLE_A;
+ break;
+ case PIPE_SWIZZLE_0:
+ case PIPE_SWIZZLE_NONE:
+ swizzles[i] = RMT_SWIZZLE_ZERO;
+ break;
+ case PIPE_SWIZZLE_1:
+ swizzles[i] = RMT_SWIZZLE_ONE;
+ break;
+ }
+ }
+}
+
+#define RMT_FILE_MAGIC_NUMBER 0x494e494d
+#define RMT_FILE_VERSION_MAJOR 1
+#define RMT_FILE_VERSION_MINOR 0
+#define RMT_FILE_ADAPTER_NAME_MAX_SIZE 128
+
+enum rmt_heap_type {
+ RMT_HEAP_TYPE_LOCAL, /* DEVICE_LOCAL | HOST_VISIBLE */
+ RMT_HEAP_TYPE_INVISIBLE, /* DEVICE_LOCAL */
+ RMT_HEAP_TYPE_SYSTEM, /* HOST_VISIBLE | HOST_COHERENT */
+ RMT_HEAP_TYPE_NONE,
+ RMT_HEAP_TYPE_UNKNOWN = -1,
+};
+
+enum rmt_file_chunk_type {
+ RMT_FILE_CHUNK_TYPE_ASIC_INFO, /* Seems to be unused in RMV */
+ RMT_FILE_CHUNK_TYPE_API_INFO,
+ RMT_FILE_CHUNK_TYPE_SYSTEM_INFO,
+ RMT_FILE_CHUNK_TYPE_RMT_DATA,
+ RMT_FILE_CHUNK_TYPE_SEGMENT_INFO,
+ RMT_FILE_CHUNK_TYPE_PROCESS_START,
+ RMT_FILE_CHUNK_TYPE_SNAPSHOT_INFO,
+ RMT_FILE_CHUNK_TYPE_ADAPTER_INFO,
+};
+
+/**
+ * RMT API info.
+ */
+enum rmt_api_type {
+ RMT_API_TYPE_DIRECTX_12,
+ RMT_API_TYPE_VULKAN,
+ RMT_API_TYPE_GENERIC,
+ RMT_API_TYPE_OPENCL,
+};
+
+struct rmt_file_chunk_id {
+ enum rmt_file_chunk_type type : 8;
+ int32_t index : 8;
+ int32_t reserved : 16;
+};
+
+struct rmt_file_chunk_header {
+ struct rmt_file_chunk_id chunk_id;
+ uint16_t minor_version;
+ uint16_t major_version;
+ int32_t size_in_bytes;
+ int32_t padding;
+};
+
+struct rmt_file_header_flags {
+ union {
+ struct {
+ int32_t reserved : 32;
+ };
+
+ uint32_t value;
+ };
+};
+
+struct rmt_file_header {
+ uint32_t magic_number;
+ uint32_t version_major;
+ uint32_t version_minor;
+ struct rmt_file_header_flags flags;
+ int32_t chunk_offset;
+ int32_t second;
+ int32_t minute;
+ int32_t hour;
+ int32_t day_in_month;
+ int32_t month;
+ int32_t year;
+ int32_t day_in_week;
+ int32_t day_in_year;
+ int32_t is_daylight_savings;
+};
+
+static_assert(sizeof(struct rmt_file_header) == 56, "rmt_file_header doesn't match RMV spec");
+
+static void
+rmt_fill_header(struct rmt_file_header *header)
+{
+ struct tm *timep, result;
+ time_t raw_time;
+
+ header->magic_number = RMT_FILE_MAGIC_NUMBER;
+ header->version_major = RMT_FILE_VERSION_MAJOR;
+ header->version_minor = RMT_FILE_VERSION_MINOR;
+ header->flags.value = 0;
+ header->chunk_offset = sizeof(*header);
+
+ time(&raw_time);
+ timep = os_localtime(&raw_time, &result);
+
+ header->second = timep->tm_sec;
+ header->minute = timep->tm_min;
+ header->hour = timep->tm_hour;
+ header->day_in_month = timep->tm_mday;
+ header->month = timep->tm_mon;
+ header->year = timep->tm_year;
+ header->day_in_week = timep->tm_wday;
+ header->day_in_year = timep->tm_yday;
+ header->is_daylight_savings = timep->tm_isdst;
+}
+
+/*
+ * RMT data.
+ */
+struct rmt_file_chunk_rmt_data {
+ struct rmt_file_chunk_header header;
+ uint64_t process_id;
+ uint64_t thread_id;
+};
+
+static_assert(sizeof(struct rmt_file_chunk_rmt_data) == 32,
+ "rmt_file_chunk_rmt_data doesn't match RMV spec");
+
+static void
+rmt_fill_chunk_rmt_data(size_t token_stream_size, struct rmt_file_chunk_rmt_data *chunk)
+{
+ chunk->header.chunk_id.type = RMT_FILE_CHUNK_TYPE_RMT_DATA;
+ chunk->header.chunk_id.index = 0;
+ chunk->header.major_version = 1;
+ chunk->header.minor_version = 6;
+ chunk->header.size_in_bytes = sizeof(*chunk) + token_stream_size;
+
+ chunk->process_id = (uint64_t)getpid();
+}
+
+/*
+ * RMT System info. Equivalent to SQTT CPU info.
+ */
+struct rmt_file_chunk_system_info {
+ struct rmt_file_chunk_header header;
+ uint32_t vendor_id[4];
+ uint32_t processor_brand[12];
+ uint32_t reserved[2];
+ uint64_t cpu_timestamp_freq;
+ uint32_t clock_speed;
+ uint32_t num_logical_cores;
+ uint32_t num_physical_cores;
+ uint32_t system_ram_size;
+};
+
+static_assert(sizeof(struct rmt_file_chunk_system_info) == 112,
+ "rmt_file_chunk_system_info doesn't match RMV spec");
+
+/* same as vk_sqtt_fill_cpu_info. TODO: Share with ac_rgp.c */
+static void
+rmt_fill_chunk_system_info(struct rmt_file_chunk_system_info *chunk)
+{
+ uint32_t cpu_clock_speed_total = 0;
+ uint64_t system_ram_size = 0;
+ char line[1024];
+ FILE *f;
+
+ chunk->header.chunk_id.type = RMT_FILE_CHUNK_TYPE_SYSTEM_INFO;
+ chunk->header.chunk_id.index = 0;
+ chunk->header.major_version = 0;
+ chunk->header.minor_version = 0;
+ chunk->header.size_in_bytes = sizeof(*chunk);
+
+ /* For some reason, RMV allocates scratch data based on the
+ * maximum timestamp in clock ticks. A tick of 1ns produces extremely
+ * large timestamps, which causes RMV to run out of memory. Therefore,
+ * all timestamps are translated as if the clock ran at 1 MHz. */
+ chunk->cpu_timestamp_freq = 1 * 1000000;
+
+ strncpy((char *)chunk->vendor_id, "Unknown", sizeof(chunk->vendor_id));
+ strncpy((char *)chunk->processor_brand, "Unknown", sizeof(chunk->processor_brand));
+ chunk->clock_speed = 0;
+ chunk->num_logical_cores = 0;
+ chunk->num_physical_cores = 0;
+ chunk->system_ram_size = 0;
+ if (os_get_total_physical_memory(&system_ram_size))
+ chunk->system_ram_size = system_ram_size / (1024 * 1024);
+
+ /* Parse cpuinfo to get more detailled information. */
+ f = fopen("/proc/cpuinfo", "r");
+ if (!f)
+ return;
+
+ while (fgets(line, sizeof(line), f)) {
+ char *str;
+
+ /* Parse vendor name. */
+ str = strstr(line, "vendor_id");
+ if (str) {
+ char *ptr = (char *)chunk->vendor_id;
+ char *v = strtok(str, ":");
+ v = strtok(NULL, ":");
+ strncpy(ptr, v + 1, sizeof(chunk->vendor_id) - 1);
+ ptr[sizeof(chunk->vendor_id) - 1] = '\0';
+ }
+
+ /* Parse processor name. */
+ str = strstr(line, "model name");
+ if (str) {
+ char *ptr = (char *)chunk->processor_brand;
+ char *v = strtok(str, ":");
+ v = strtok(NULL, ":");
+ strncpy(ptr, v + 1, sizeof(chunk->processor_brand) - 1);
+ ptr[sizeof(chunk->processor_brand) - 1] = '\0';
+ }
+
+ /* Parse the current CPU clock speed for each cores. */
+ str = strstr(line, "cpu MHz");
+ if (str) {
+ uint32_t v = 0;
+ if (sscanf(str, "cpu MHz : %d", &v) == 1)
+ cpu_clock_speed_total += v;
+ }
+
+ /* Parse the number of logical cores. */
+ str = strstr(line, "siblings");
+ if (str) {
+ uint32_t v = 0;
+ if (sscanf(str, "siblings : %d", &v) == 1)
+ chunk->num_logical_cores = v;
+ }
+
+ /* Parse the number of physical cores. */
+ str = strstr(line, "cpu cores");
+ if (str) {
+ uint32_t v = 0;
+ if (sscanf(str, "cpu cores : %d", &v) == 1)
+ chunk->num_physical_cores = v;
+ }
+ }
+
+ if (chunk->num_logical_cores)
+ chunk->clock_speed = cpu_clock_speed_total / chunk->num_logical_cores;
+
+ fclose(f);
+}
+
+/*
+ * RMT Segment info.
+ */
+struct rmt_file_chunk_segment_info {
+ struct rmt_file_chunk_header header;
+ uint64_t base_address;
+ uint64_t size;
+ enum rmt_heap_type heap_type;
+ int32_t memory_index;
+};
+
+static_assert(sizeof(struct rmt_file_chunk_segment_info) == 40,
+ "rmt_file_chunk_segment_info doesn't match RMV spec");
+
+static void
+rmt_fill_chunk_segment_info(struct vk_memory_trace_data *data, struct vk_rmv_device_info *info,
+ struct rmt_file_chunk_segment_info *chunk, int32_t index)
+{
+ chunk->header.chunk_id.type = RMT_FILE_CHUNK_TYPE_SEGMENT_INFO;
+ chunk->header.chunk_id.index = index;
+ chunk->header.major_version = 0;
+ chunk->header.minor_version = 0;
+ chunk->header.size_in_bytes = sizeof(*chunk);
+
+ chunk->memory_index = index;
+ chunk->heap_type = (enum rmt_heap_type)index;
+ chunk->base_address = info->memory_infos[index].physical_base_address;
+ chunk->size = info->memory_infos[index].size;
+}
+
+/*
+ * RMT PCIe adapter info
+ */
+struct rmt_file_chunk_adapter_info {
+ struct rmt_file_chunk_header header;
+ char name[RMT_FILE_ADAPTER_NAME_MAX_SIZE];
+ uint32_t pcie_family_id;
+ uint32_t pcie_revision_id;
+ uint32_t device_id;
+ uint32_t minimum_engine_clock;
+ uint32_t maximum_engine_clock;
+ uint32_t memory_type;
+ uint32_t memory_operations_per_clock;
+ uint32_t memory_bus_width;
+ uint32_t memory_bandwidth;
+ uint32_t minimum_memory_clock;
+ uint32_t maximum_memory_clock;
+};
+
+static_assert(sizeof(struct rmt_file_chunk_adapter_info) == 188,
+ "rmt_file_chunk_adapter_info doesn't match RMV spec");
+
+static void
+rmt_fill_chunk_adapter_info(struct vk_rmv_device_info *info,
+ struct rmt_file_chunk_adapter_info *chunk)
+{
+ chunk->header.chunk_id.type = RMT_FILE_CHUNK_TYPE_ADAPTER_INFO;
+ chunk->header.chunk_id.index = 0;
+ chunk->header.major_version = 0;
+ chunk->header.minor_version = 0;
+ chunk->header.size_in_bytes = sizeof(*chunk);
+
+ memcpy(chunk->name, info->device_name, RMT_FILE_ADAPTER_NAME_MAX_SIZE);
+ chunk->pcie_family_id = info->pcie_family_id;
+ chunk->pcie_revision_id = info->pcie_revision_id;
+ chunk->device_id = info->pcie_device_id;
+ chunk->minimum_engine_clock = info->minimum_shader_clock;
+ chunk->maximum_engine_clock = info->maximum_shader_clock;
+ chunk->memory_type = info->vram_type;
+ chunk->memory_operations_per_clock = info->vram_operations_per_clock;
+
+ chunk->memory_bus_width = info->vram_bus_width;
+ chunk->minimum_memory_clock = info->minimum_memory_clock;
+ chunk->maximum_memory_clock = info->maximum_memory_clock;
+ /* Convert bandwidth from GB/s to MiB/s */
+ chunk->memory_bandwidth =
+ ((uint64_t)info->vram_bandwidth * 1000ULL * 1000ULL * 1000ULL) / (1024ULL * 1024ULL);
+}
+
+/*
+ * RMT snapshot info
+ */
+struct rmt_file_chunk_snapshot_info {
+ struct rmt_file_chunk_header header;
+ uint64_t snapshot_time;
+ int32_t name_length;
+ int32_t padding;
+ /* The name follows after this struct */
+ /* After the name, a stream of tokens is written. */
+};
+
+static_assert(sizeof(struct rmt_file_chunk_snapshot_info) == 32,
+ "rmt_file_chunk_snapshot_info doesn't match RMV spec");
+
+static void
+rmt_fill_chunk_snapshot_info(uint64_t timestamp, int32_t name_length,
+ struct rmt_file_chunk_snapshot_info *chunk)
+{
+ chunk->header.chunk_id.type = RMT_FILE_CHUNK_TYPE_SNAPSHOT_INFO;
+ chunk->header.chunk_id.index = 0;
+ chunk->header.major_version = 1;
+ chunk->header.minor_version = 6;
+ chunk->header.size_in_bytes = sizeof(*chunk) + name_length;
+
+ chunk->snapshot_time = timestamp;
+ chunk->name_length = name_length;
+}
+
+/*
+ * RMT stream tokens
+ */
+
+enum rmt_token_type {
+ RMT_TOKEN_TYPE_TIMESTAMP,
+ RMT_TOKEN_TYPE_RESERVED0,
+ RMT_TOKEN_TYPE_RESERVED1,
+ RMT_TOKEN_TYPE_PAGE_TABLE_UPDATE,
+ RMT_TOKEN_TYPE_USERDATA,
+ RMT_TOKEN_TYPE_MISC,
+ RMT_TOKEN_TYPE_RESOURCE_REFERENCE,
+ RMT_TOKEN_TYPE_RESOURCE_BIND,
+ RMT_TOKEN_TYPE_PROCESS_EVENT,
+ RMT_TOKEN_TYPE_PAGE_REFERENCE,
+ RMT_TOKEN_TYPE_CPU_MAP,
+ RMT_TOKEN_TYPE_VIRTUAL_FREE,
+ RMT_TOKEN_TYPE_VIRTUAL_ALLOCATE,
+ RMT_TOKEN_TYPE_RESOURCE_CREATE,
+ RMT_TOKEN_TYPE_TIME_DELTA,
+ RMT_TOKEN_TYPE_RESOURCE_DESTROY,
+};
+
+static enum rmt_token_type
+token_type_to_rmt(enum vk_rmv_token_type type)
+{
+ switch (type) {
+ case VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE:
+ return RMT_TOKEN_TYPE_PAGE_TABLE_UPDATE;
+ case VK_RMV_TOKEN_TYPE_USERDATA:
+ return RMT_TOKEN_TYPE_USERDATA;
+ case VK_RMV_TOKEN_TYPE_MISC:
+ return RMT_TOKEN_TYPE_MISC;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_REFERENCE:
+ return RMT_TOKEN_TYPE_RESOURCE_REFERENCE;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_BIND:
+ return RMT_TOKEN_TYPE_RESOURCE_BIND;
+ case VK_RMV_TOKEN_TYPE_CPU_MAP:
+ return RMT_TOKEN_TYPE_CPU_MAP;
+ case VK_RMV_TOKEN_TYPE_VIRTUAL_FREE:
+ return RMT_TOKEN_TYPE_VIRTUAL_FREE;
+ case VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE:
+ return RMT_TOKEN_TYPE_VIRTUAL_ALLOCATE;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_CREATE:
+ return RMT_TOKEN_TYPE_RESOURCE_CREATE;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY:
+ return RMT_TOKEN_TYPE_RESOURCE_DESTROY;
+ default:
+ unreachable("invalid token type");
+ }
+}
+
+enum rmt_descriptor_type {
+ RMT_DESCRIPTOR_TYPE_CSV_SRV_UAV,
+ RMT_DESCRIPTOR_TYPE_SAMPLER,
+ RMT_DESCRIPTOR_TYPE_RTV,
+ RMT_DESCRIPTOR_TYPE_DSV,
+ RMT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ RMT_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ RMT_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ RMT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ RMT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
+ RMT_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ RMT_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ RMT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
+ RMT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
+ RMT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
+ RMT_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK,
+ RMT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE,
+ RMT_DESCRIPTOR_TYPE_INVALID = 0x7FFF,
+};
+
+static enum rmt_descriptor_type
+vk_to_rmt_descriptor_type(VkDescriptorType type)
+{
+ switch (type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ return RMT_DESCRIPTOR_TYPE_SAMPLER;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return RMT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ return RMT_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ return RMT_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ return RMT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ return RMT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ return RMT_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ return RMT_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ return RMT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ return RMT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ return RMT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
+ return RMT_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK;
+ case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
+ return RMT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE;
+ default:
+ /* This is reachable, error should be handled by caller */
+ return RMT_DESCRIPTOR_TYPE_INVALID;
+ }
+};
+
+static uint32_t
+rmt_valid_pool_size_count(struct vk_rmv_descriptor_pool_description *description)
+{
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < description->pool_size_count; ++i) {
+ enum rmt_descriptor_type rmt_type =
+ vk_to_rmt_descriptor_type(description->pool_sizes[i].type);
+ if (rmt_type == RMT_DESCRIPTOR_TYPE_INVALID)
+ /* Unknown descriptor type, skip */
+ continue;
+ ++count;
+ }
+ return count;
+}
+
+enum rmt_resource_owner_type {
+ RMT_RESOURCE_OWNER_TYPE_APPLICATION,
+ RMT_RESOURCE_OWNER_TYPE_PAL,
+ RMT_RESOURCE_OWNER_TYPE_CLIENT_DRIVER,
+ RMT_RESOURCE_OWNER_TYPE_KMD,
+};
+
+static void
+rmt_file_write_bits(uint64_t *dst, uint64_t data, unsigned first_bit, unsigned last_bit)
+{
+ unsigned index = first_bit / 64;
+ unsigned shift = first_bit % 64;
+
+ /* Data crosses an uint64_t boundary, split */
+ if (index != last_bit / 64) {
+ unsigned first_part_size = 64 - shift;
+ rmt_file_write_bits(dst, data & ((1ULL << first_part_size) - 1ULL), first_bit,
+ index * 64 + 63);
+ rmt_file_write_bits(dst, data >> first_part_size, (index + 1) * 64, last_bit);
+ } else {
+ assert(data <= (1ULL << (uint64_t)(last_bit - first_bit + 1ULL)) - 1ULL);
+ dst[index] |= data << shift;
+ }
+}
+
+static void
+rmt_file_write_token_bits(uint64_t *dst, uint64_t data, unsigned first_bit, unsigned last_bit)
+{
+ rmt_file_write_bits(dst, data, first_bit - 8, last_bit - 8);
+}
+
+static enum rmt_heap_type
+rmt_file_domain_to_heap_type(enum vk_rmv_kernel_memory_domain domain, bool has_cpu_access)
+{
+ switch (domain) {
+ case VK_RMV_KERNEL_MEMORY_DOMAIN_CPU:
+ case VK_RMV_KERNEL_MEMORY_DOMAIN_GTT:
+ return RMT_HEAP_TYPE_SYSTEM;
+ case VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM:
+ return has_cpu_access ? RMT_HEAP_TYPE_LOCAL : RMT_HEAP_TYPE_INVISIBLE;
+ default:
+ unreachable("invalid domain");
+ }
+}
+
+/*
+ * Write helpers for stream tokens
+ */
+
+/* The timestamp frequency, in clock units / second.
+ * Currently set to 1MHz. */
+#define RMT_TIMESTAMP_FREQUENCY (1 * 1000000)
+/* Factor needed to convert nanosecond timestamps as returned by os_get_time_nano
+ * to RMV timestamps */
+#define RMT_TIMESTAMP_DIVISOR (1000000000L / RMT_TIMESTAMP_FREQUENCY)
+
+static void
+rmt_dump_timestamp(struct vk_rmv_timestamp_token *token, FILE *output)
+{
+ uint64_t data[2] = {0};
+ rmt_file_write_bits(data, RMT_TOKEN_TYPE_TIMESTAMP, 0, 3);
+ /* RMT stores clock ticks divided by 32 */
+ rmt_file_write_bits(data, token->value / 32, 4, 63);
+ rmt_file_write_bits(data, RMT_TIMESTAMP_FREQUENCY, 64, 89);
+ fwrite(data, 12, 1, output);
+}
+
+static void
+rmt_dump_time_delta(uint64_t delta, FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_bits(&data, RMT_TOKEN_TYPE_TIME_DELTA, 0, 3);
+ rmt_file_write_bits(&data, 7, 4, 7); /* no. of delta bytes */
+ rmt_file_write_bits(&data, delta, 8, 63);
+ fwrite(&data, 8, 1, output);
+}
+
+static void
+rmt_dump_event_resource(struct vk_rmv_event_description *description, FILE *output)
+{
+ /* 8 bits of flags are the only thing in the payload */
+ fwrite(&description->flags, 1, 1, output);
+}
+
+static void
+rmt_dump_border_color_palette_resource(struct vk_rmv_border_color_palette_description *description,
+ FILE *output)
+{
+ /* no. of entries is the only thing in the payload */
+ fwrite(&description->num_entries, 1, 1, output);
+}
+
+enum rmt_page_size {
+ RMT_PAGE_SIZE_UNMAPPED,
+ RMT_PAGE_SIZE_4_KB,
+ RMT_PAGE_SIZE_64_KB,
+ RMT_PAGE_SIZE_256_KB,
+ RMT_PAGE_SIZE_1_MB,
+ RMT_PAGE_SIZE_2_MB,
+};
+
+static enum rmt_page_size
+rmt_size_to_page_size(uint32_t size)
+{
+ switch (size) {
+ case 4096:
+ return RMT_PAGE_SIZE_4_KB;
+ case 65536:
+ return RMT_PAGE_SIZE_64_KB;
+ case 262144:
+ return RMT_PAGE_SIZE_256_KB;
+ case 1048576:
+ return RMT_PAGE_SIZE_1_MB;
+ case 2097152:
+ return RMT_PAGE_SIZE_2_MB;
+ default:
+ unreachable("invalid page size");
+ }
+}
+
+static void
+rmt_dump_heap_resource(struct vk_rmv_heap_description *description, FILE *output)
+{
+ uint64_t data[2] = {0};
+ rmt_file_write_bits(data, description->alloc_flags, 0, 3);
+ rmt_file_write_bits(data, description->size, 4, 68);
+ rmt_file_write_bits(data, rmt_size_to_page_size(description->alignment), 69, 73);
+ rmt_file_write_bits(data, description->heap_index, 74, 77);
+ fwrite(data, 10, 1, output);
+}
+
+enum rmt_buffer_usage_flags {
+ RMT_BUFFER_USAGE_FLAGS_TRANSFER_SOURCE = 1 << 0,
+ RMT_BUFFER_USAGE_FLAGS_TRANSFER_DESTINATION = 1 << 1,
+ RMT_BUFFER_USAGE_FLAGS_UNIFORM_TEXEL_BUFFER = 1 << 2,
+ RMT_BUFFER_USAGE_FLAGS_STORAGE_TEXEL_BUFFER = 1 << 3,
+ RMT_BUFFER_USAGE_FLAGS_UNIFORM_BUFFER = 1 << 4,
+ RMT_BUFFER_USAGE_FLAGS_STORAGE_BUFFER = 1 << 5,
+ RMT_BUFFER_USAGE_FLAGS_INDEX_BUFFER = 1 << 6,
+ RMT_BUFFER_USAGE_FLAGS_VERTEX_BUFFER = 1 << 7,
+ RMT_BUFFER_USAGE_FLAGS_INDIRECT_BUFFER = 1 << 8,
+ RMT_BUFFER_USAGE_FLAGS_TRANSFORM_FEEDBACK_BUFFER = 1 << 9,
+ RMT_BUFFER_USAGE_FLAGS_TRANSFORM_FEEDBACK_COUNTER_BUFFER = 1 << 10,
+ RMT_BUFFER_USAGE_FLAGS_CONDITIONAL_RENDERING = 1 << 11,
+ RMT_BUFFER_USAGE_FLAGS_RAY_TRACING = 1 << 12,
+ RMT_BUFFER_USAGE_FLAGS_SHADER_DEVICE_ADDRESS = 1 << 13,
+};
+
+static void
+rmt_dump_buffer_resource(struct vk_rmv_buffer_description *description, FILE *output)
+{
+ /* flags up to indirect buffer are equivalent */
+ uint32_t usage_flags =
+ description->usage_flags & ((VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT << 1) - 1);
+
+ if (description->usage_flags & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT)
+ usage_flags |= RMT_BUFFER_USAGE_FLAGS_TRANSFORM_FEEDBACK_BUFFER;
+ if (description->usage_flags & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT)
+ usage_flags |= RMT_BUFFER_USAGE_FLAGS_TRANSFORM_FEEDBACK_COUNTER_BUFFER;
+ if (description->usage_flags & VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT)
+ usage_flags |= RMT_BUFFER_USAGE_FLAGS_CONDITIONAL_RENDERING;
+ if (description->usage_flags &
+ (VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR |
+ VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR |
+ VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR))
+ usage_flags |= RMT_BUFFER_USAGE_FLAGS_RAY_TRACING;
+ if (description->usage_flags & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT)
+ usage_flags |= RMT_BUFFER_USAGE_FLAGS_SHADER_DEVICE_ADDRESS;
+
+ uint64_t data[2] = {0};
+ rmt_file_write_bits(data, description->create_flags, 0, 7);
+ rmt_file_write_bits(data, usage_flags, 8, 23);
+ rmt_file_write_bits(data, description->size, 24, 87);
+ fwrite(data, 11, 1, output);
+}
+
+enum rmt_tiling {
+ RMT_TILING_LINEAR,
+ RMT_TILING_OPTIMAL,
+ RMT_TILING_SWIZZLED,
+};
+
+enum rmt_tiling_optimization_mode {
+ RMT_TILING_OPTIMIZATION_MODE_BALANCED,
+ RMT_TILING_OPTIMIZATION_MODE_SPACE,
+ RMT_TILING_OPTIMIZATION_MODE_SPEED,
+};
+
+enum rmt_metadata_mode {
+ RMT_METADATA_MODE_DEFAULT,
+ RMT_METADATA_MODE_OPTIMIZE_TEX_PREFETCH,
+ RMT_METADATA_MODE_DISABLE,
+};
+
+enum rmt_image_create_flags {
+ RMT_IMAGE_CREATE_INVARIANT = 1 << 0,
+ RMT_IMAGE_CREATE_CLONEABLE = 1 << 1,
+ RMT_IMAGE_CREATE_SHAREABLE = 1 << 2,
+ RMT_IMAGE_CREATE_FLIPPABLE = 1 << 3,
+ RMT_IMAGE_CREATE_STEREO = 1 << 4,
+ RMT_IMAGE_CREATE_CUBEMAP = 1 << 5,
+ RMT_IMAGE_CREATE_PRT = 1 << 6,
+};
+
+enum rmt_image_usage_flags {
+ RMT_IMAGE_USAGE_SHADER_READ = 1 << 0,
+ RMT_IMAGE_USAGE_SHADER_WRITE = 1 << 1,
+ RMT_IMAGE_USAGE_RESOLVE_SRC = 1 << 2,
+ RMT_IMAGE_USAGE_RESOLVE_DST = 1 << 3,
+ RMT_IMAGE_USAGE_COLOR_TARGET = 1 << 4,
+ RMT_IMAGE_USAGE_DEPTH_STENCIL = 1 << 5,
+};
+
+static void
+rmt_dump_image_resource(struct vk_rmv_image_description *description, FILE *output)
+{
+ uint64_t data[5] = {0};
+
+ enum rmt_tiling tiling;
+ switch (description->tiling) {
+ case VK_IMAGE_TILING_LINEAR:
+ tiling = RMT_TILING_LINEAR;
+ break;
+ case VK_IMAGE_TILING_OPTIMAL:
+ case VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT:
+ tiling = RMT_TILING_OPTIMAL;
+ break;
+ default:
+ unreachable("invalid image tiling");
+ }
+
+ uint32_t create_flags = 0;
+ if (description->create_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
+ create_flags |= RMT_IMAGE_CREATE_CUBEMAP;
+ if (description->create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)
+ create_flags |= RMT_IMAGE_CREATE_PRT;
+
+ uint32_t usage_flags = 0;
+ if (description->usage_flags & VK_IMAGE_USAGE_SAMPLED_BIT ||
+ description->usage_flags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
+ usage_flags |= RMT_IMAGE_USAGE_SHADER_READ;
+ if (description->usage_flags & VK_IMAGE_USAGE_STORAGE_BIT)
+ usage_flags |= RMT_IMAGE_USAGE_SHADER_READ | RMT_IMAGE_USAGE_SHADER_WRITE;
+ if (description->usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
+ usage_flags |= RMT_IMAGE_USAGE_COLOR_TARGET;
+ if (description->usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
+ usage_flags |= RMT_IMAGE_USAGE_DEPTH_STENCIL;
+ if (description->usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)
+ usage_flags |= RMT_IMAGE_USAGE_RESOLVE_SRC;
+ if (description->usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)
+ usage_flags |= RMT_IMAGE_USAGE_RESOLVE_DST;
+
+ enum rmt_swizzle swizzles[4] = {RMT_SWIZZLE_ZERO, RMT_SWIZZLE_ZERO, RMT_SWIZZLE_ZERO,
+ RMT_SWIZZLE_ZERO};
+ rmt_format_to_swizzle(description->format, swizzles);
+
+ rmt_file_write_bits(data, create_flags, 0, 19);
+ rmt_file_write_bits(data, usage_flags, 20, 34);
+ rmt_file_write_bits(data, description->type, 35, 36);
+ rmt_file_write_bits(data, description->extent.width - 1, 37, 50);
+ rmt_file_write_bits(data, description->extent.height - 1, 51, 64);
+ rmt_file_write_bits(data, description->extent.depth - 1, 65, 78);
+ rmt_file_write_bits(data, swizzles[0], 79, 81);
+ rmt_file_write_bits(data, swizzles[1], 82, 84);
+ rmt_file_write_bits(data, swizzles[2], 85, 87);
+ rmt_file_write_bits(data, swizzles[3], 88, 90);
+ rmt_file_write_bits(data, vk_to_rmt_format(description->format), 91, 98);
+ rmt_file_write_bits(data, description->num_mips, 99, 102);
+ rmt_file_write_bits(data, description->num_slices - 1, 103, 113);
+ rmt_file_write_bits(data, description->log2_samples, 114, 116);
+ rmt_file_write_bits(data, description->log2_storage_samples, 117, 118);
+ rmt_file_write_bits(data, tiling, 119, 120);
+ rmt_file_write_bits(data, RMT_TILING_OPTIMIZATION_MODE_BALANCED, 121, 122);
+ rmt_file_write_bits(data, RMT_METADATA_MODE_DEFAULT, 123, 124);
+ rmt_file_write_bits(data, description->alignment_log2, 125, 129);
+ rmt_file_write_bits(data, description->presentable, 130, 130);
+ rmt_file_write_bits(data, description->size, 131, 162);
+ rmt_file_write_bits(data, description->metadata_offset, 163, 194);
+ rmt_file_write_bits(data, description->metadata_size, 195, 226);
+ rmt_file_write_bits(data, description->metadata_header_offset, 227, 258);
+ rmt_file_write_bits(data, description->metadata_header_size, 259, 290);
+ rmt_file_write_bits(data, description->image_alignment_log2, 291, 295);
+ rmt_file_write_bits(data, description->metadata_alignment_log2, 296, 300);
+ /* metadata header alignment */
+ rmt_file_write_bits(data, description->metadata_alignment_log2, 301, 305);
+ /* is fullscreen presentable */
+ rmt_file_write_bits(data, description->presentable, 306, 306);
+ fwrite(data, 39, 1, output);
+}
+
+enum rmt_query_pool_type {
+ RMT_QUERY_POOL_TYPE_OCCLUSION,
+ RMT_QUERY_POOL_TYPE_PIPELINE,
+ RMT_QUERY_POOL_TYPE_STREAMOUT
+};
+
+static void
+rmt_dump_query_pool_resource(struct vk_rmv_query_pool_description *description, FILE *output)
+{
+ enum rmt_query_pool_type pool_type;
+ switch (description->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ pool_type = RMT_QUERY_POOL_TYPE_OCCLUSION;
+ break;
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+ pool_type = RMT_QUERY_POOL_TYPE_PIPELINE;
+ break;
+ case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
+ pool_type = RMT_QUERY_POOL_TYPE_STREAMOUT;
+ break;
+ default:
+ unreachable("invalid query pool type");
+ break;
+ }
+
+ uint64_t data = 0;
+ rmt_file_write_bits(&data, pool_type, 0, 1);
+ rmt_file_write_bits(&data, description->has_cpu_access, 2, 2);
+ fwrite(&data, 1, 1, output);
+}
+
+enum rmt_pipeline_flags {
+ RMT_PIPELINE_FLAG_INTERNAL = (1 << 0),
+ RMT_PIPELINE_FLAG_OVERRIDE_GPU_HEAP = (1 << 1),
+};
+
+enum rmt_pipeline_stage_flags {
+ RMT_PIPELINE_STAGE_FRAGMENT = 1 << 0,
+ RMT_PIPELINE_STAGE_TESS_CONTROL = 1 << 1,
+ RMT_PIPELINE_STAGE_TESS_EVAL = 1 << 2,
+ RMT_PIPELINE_STAGE_VERTEX = 1 << 3,
+ RMT_PIPELINE_STAGE_GEOMETRY = 1 << 4,
+ RMT_PIPELINE_STAGE_COMPUTE = 1 << 5,
+ RMT_PIPELINE_STAGE_TASK = 1 << 6,
+ RMT_PIPELINE_STAGE_MESH = 1 << 7
+};
+
+static void
+rmt_dump_pipeline_resource(struct vk_rmv_pipeline_description *description, FILE *output)
+{
+ uint64_t data[3] = {0};
+
+ enum rmt_pipeline_flags flags = 0;
+ if (description->is_internal)
+ flags |= RMT_PIPELINE_FLAG_INTERNAL;
+
+ enum rmt_pipeline_stage_flags stage_flags = 0;
+ if (description->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
+ stage_flags |= RMT_PIPELINE_STAGE_FRAGMENT;
+ if (description->shader_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
+ stage_flags |= RMT_PIPELINE_STAGE_TESS_CONTROL;
+ if (description->shader_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
+ stage_flags |= RMT_PIPELINE_STAGE_TESS_EVAL;
+ if (description->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
+ stage_flags |= RMT_PIPELINE_STAGE_VERTEX;
+ if (description->shader_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
+ stage_flags |= RMT_PIPELINE_STAGE_GEOMETRY;
+ if (description->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT ||
+ description->shader_stages & VK_SHADER_STAGE_RAYGEN_BIT_KHR ||
+ description->shader_stages & VK_SHADER_STAGE_INTERSECTION_BIT_KHR ||
+ description->shader_stages & VK_SHADER_STAGE_ANY_HIT_BIT_KHR ||
+ description->shader_stages & VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR ||
+ description->shader_stages & VK_SHADER_STAGE_MISS_BIT_KHR ||
+ description->shader_stages & VK_SHADER_STAGE_CALLABLE_BIT_KHR)
+ stage_flags |= RMT_PIPELINE_STAGE_COMPUTE;
+ if (description->shader_stages & VK_SHADER_STAGE_TASK_BIT_EXT)
+ stage_flags |= RMT_PIPELINE_STAGE_TASK;
+ if (description->shader_stages & VK_SHADER_STAGE_MESH_BIT_EXT)
+ stage_flags |= RMT_PIPELINE_STAGE_MESH;
+
+ rmt_file_write_bits(data, flags, 0, 7);
+ rmt_file_write_bits(data, description->hash_hi, 8, 71);
+ rmt_file_write_bits(data, description->hash_lo, 72, 135);
+ rmt_file_write_bits(data, stage_flags, 136, 143);
+ rmt_file_write_bits(data, description->is_ngg, 144, 144);
+ fwrite(data, 19, 1, output);
+}
+
+static void
+rmt_dump_descriptor_pool_resource(struct vk_rmv_descriptor_pool_description *description,
+ FILE *output)
+{
+ uint64_t data = 0;
+ /* TODO: figure out a better way of handling descriptor counts > 65535 */
+ rmt_file_write_bits(&data, MIN2(description->max_sets, 65535), 0, 15);
+ rmt_file_write_bits(&data, rmt_valid_pool_size_count(description), 16, 23);
+ fwrite(&data, 3, 1, output);
+
+ for (uint32_t i = 0; i < description->pool_size_count; ++i) {
+ data = 0;
+ enum rmt_descriptor_type rmt_type =
+ vk_to_rmt_descriptor_type(description->pool_sizes[i].type);
+ if (rmt_type == RMT_DESCRIPTOR_TYPE_INVALID)
+ /* Unknown descriptor type, skip */
+ continue;
+ rmt_file_write_bits(&data, rmt_type, 0, 15);
+ rmt_file_write_bits(&data, MIN2(description->pool_sizes[i].descriptorCount, 65535), 16, 31);
+ fwrite(&data, 4, 1, output);
+ }
+}
+
+static void
+rmt_dump_command_buffer_resource(struct vk_rmv_command_buffer_description *description,
+ FILE *output)
+{
+ uint64_t data[6] = {0};
+ rmt_file_write_bits(data, 0, 0, 3); /* flags */
+ /* heap for executable commands */
+ rmt_file_write_bits(data, rmt_file_domain_to_heap_type(description->preferred_domain, true), 4,
+ 7);
+ /* executable command allocation size */
+ rmt_file_write_bits(data, description->executable_size, 8, 63);
+ /* executable command size usable by command buffers */
+ rmt_file_write_bits(data, description->app_available_executable_size, 64, 119);
+ /* heap for embedded data */
+ rmt_file_write_bits(data, rmt_file_domain_to_heap_type(description->preferred_domain, true), 120,
+ 123);
+ /* embedded data allocation size */
+ rmt_file_write_bits(data, description->embedded_data_size, 124, 179);
+ /* embedded data size usable by command buffers */
+ rmt_file_write_bits(data, description->app_available_embedded_data_size, 180, 235);
+ /* heap for scratch data */
+ rmt_file_write_bits(data, rmt_file_domain_to_heap_type(description->preferred_domain, true), 4,
+ 7);
+ /* scratch data allocation size */
+ rmt_file_write_bits(data, description->scratch_size, 240, 295);
+ /* scratch data size usable by command buffers */
+ rmt_file_write_bits(data, description->app_available_scratch_size, 296, 351);
+
+ fwrite(data, 44, 1, output);
+}
+
+static void
+rmt_dump_misc_internal_resource(struct vk_rmv_misc_internal_description *description,
+ FILE *output)
+{
+ /* 8 bits of zero-value enum are the only thing in the payload */
+ fwrite(&description->type, 1, 1, output);
+}
+
+static void
+rmt_dump_resource_create(struct vk_rmv_resource_create_token *token, FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_token_bits(&data, token->resource_id, 8, 39);
+ rmt_file_write_token_bits(&data,
+ token->is_driver_internal ? RMT_RESOURCE_OWNER_TYPE_CLIENT_DRIVER
+ : RMT_RESOURCE_OWNER_TYPE_APPLICATION,
+ 40, 41);
+ rmt_file_write_token_bits(&data, token->type, 48, 53);
+ fwrite(&data, 6, 1, output);
+
+ switch (token->type) {
+ case VK_RMV_RESOURCE_TYPE_GPU_EVENT:
+ rmt_dump_event_resource(&token->event, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_BORDER_COLOR_PALETTE:
+ rmt_dump_border_color_palette_resource(&token->border_color_palette, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_HEAP:
+ rmt_dump_heap_resource(&token->heap, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_BUFFER:
+ rmt_dump_buffer_resource(&token->buffer, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_IMAGE:
+ rmt_dump_image_resource(&token->image, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_QUERY_HEAP:
+ rmt_dump_query_pool_resource(&token->query_pool, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_PIPELINE:
+ rmt_dump_pipeline_resource(&token->pipeline, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_DESCRIPTOR_POOL:
+ rmt_dump_descriptor_pool_resource(&token->descriptor_pool, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_COMMAND_ALLOCATOR:
+ rmt_dump_command_buffer_resource(&token->command_buffer, output);
+ break;
+ case VK_RMV_RESOURCE_TYPE_MISC_INTERNAL:
+ rmt_dump_misc_internal_resource(&token->misc_internal, output);
+ break;
+ default:
+ unreachable("invalid resource type");
+ }
+}
+
+static void
+rmt_dump_resource_bind(struct vk_rmv_resource_bind_token *token, FILE *output)
+{
+ uint64_t data[3] = {0};
+ rmt_file_write_token_bits(data, token->address & 0xFFFFFFFFFFFF, 8, 55);
+ rmt_file_write_token_bits(data, token->size, 56, 99);
+ rmt_file_write_token_bits(data, token->is_system_memory, 100, 100);
+ rmt_file_write_token_bits(data, token->resource_id, 104, 135);
+ fwrite(data, 16, 1, output);
+}
+
+static void
+rmt_dump_resource_reference(struct vk_rmv_resource_reference_token *token,
+ FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_token_bits(&data, token->residency_removed, 8, 8);
+ rmt_file_write_token_bits(&data, token->virtual_address & 0xFFFFFFFFFFFF, 9, 56);
+ fwrite(&data, 7, 1, output);
+}
+
+static void
+rmt_dump_resource_destroy(struct vk_rmv_resource_destroy_token *token, FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_token_bits(&data, token->resource_id, 8, 39);
+ fwrite(&data, 4, 1, output);
+}
+
+enum rmt_virtual_allocation_owner_type {
+ RMT_VIRTUAL_ALLOCATION_OWNER_TYPE_APPLICATION,
+ RMT_VIRTUAL_ALLOCATION_OWNER_TYPE_PAL,
+ RMT_VIRTUAL_ALLOCATION_OWNER_TYPE_CLIENT_DRIVER,
+ RMT_VIRTUAL_ALLOCATION_OWNER_TYPE_KERNEL_DRIVER
+};
+
+static void
+rmt_dump_virtual_alloc(struct vk_rmv_virtual_allocate_token *token, FILE *output)
+{
+ uint64_t data[2] = {0};
+ rmt_file_write_token_bits(data, token->page_count - 1, 8, 31);
+ rmt_file_write_token_bits(data,
+ token->is_driver_internal
+ ? RMT_VIRTUAL_ALLOCATION_OWNER_TYPE_CLIENT_DRIVER
+ : RMT_VIRTUAL_ALLOCATION_OWNER_TYPE_APPLICATION,
+ 32, 33);
+ rmt_file_write_token_bits(data, token->address & 0xFFFFFFFFFFFF, 34, 81);
+ if (token->preferred_domains) {
+ rmt_file_write_token_bits(
+ data, rmt_file_domain_to_heap_type(token->preferred_domains, !token->is_in_invisible_vram),
+ 82, 83);
+ /* num. of heap types */
+ rmt_file_write_token_bits(data, 1, 90, 92);
+ } else
+ rmt_file_write_token_bits(data, 0, 90, 92);
+ fwrite(data, 11, 1, output);
+}
+
+static void
+rmt_dump_virtual_free(struct vk_rmv_virtual_free_token *token, FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_token_bits(&data, token->address & 0xFFFFFFFFFFFF, 8, 56);
+ fwrite(&data, 6, 1, output);
+}
+
+enum rmt_page_table_controller {
+ RMT_PAGE_TABLE_CONTROLLER_OS,
+ RMT_PAGE_TABLE_CONTROLLER_KMD,
+};
+
+static void
+rmt_dump_page_table_update(struct vk_rmv_page_table_update_token *token,
+ FILE *output)
+{
+ uint64_t virtual_page_idx = (token->virtual_address / 4096);
+ uint64_t physical_page_idx = (token->physical_address / 4096);
+
+ enum rmt_page_size page_size = rmt_size_to_page_size(token->page_size);
+
+ uint64_t data[3] = {0};
+ rmt_file_write_token_bits(data, virtual_page_idx & 0xFFFFFFFFF, 8, 43);
+ rmt_file_write_token_bits(data, physical_page_idx & 0xFFFFFFFFF, 44, 79);
+ rmt_file_write_token_bits(data, token->page_count, 80, 99);
+ rmt_file_write_token_bits(data, page_size, 100, 102);
+ rmt_file_write_token_bits(data, token->is_unmap, 103, 103);
+ rmt_file_write_token_bits(data, token->pid, 104, 135);
+ rmt_file_write_token_bits(data, token->type, 136, 137);
+ rmt_file_write_token_bits(data, RMT_PAGE_TABLE_CONTROLLER_KMD, 138, 138);
+ fwrite(data, 17, 1, output);
+}
+
+enum rmt_userdata_type {
+ RMT_USERDATA_TYPE_NAME,
+ RMT_USERDATA_TYPE_SNAPSHOT,
+ RMT_USERDATA_TYPE_BINARY,
+ RMT_USERDATA_TYPE_RESERVED,
+ RMT_USERDATA_TYPE_CORRELATION,
+ RMT_USERDATA_TYPE_MARK_IMPLICIT_RESOURCE,
+};
+
+static void
+rmt_dump_userdata(struct vk_rmv_userdata_token *token, FILE *output)
+{
+ uint64_t data = 0;
+ /* userdata type */
+ rmt_file_write_token_bits(&data, RMT_USERDATA_TYPE_NAME, 8, 11);
+ /* size of userdata payload */
+ rmt_file_write_token_bits(&data, strlen(token->name) + sizeof(uint32_t) + 1, 12, 23);
+
+ fwrite(&data, 3, 1, output);
+ fwrite(token->name, 1, strlen(token->name) + 1, output);
+ fwrite(&token->resource_id, sizeof(uint32_t), 1, output);
+}
+
+static void
+rmt_dump_misc(struct vk_rmv_misc_token *token, FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_token_bits(&data, token->type, 8, 11);
+ fwrite(&data, 1, 1, output);
+}
+
+static void
+rmt_dump_cpu_map(struct vk_rmv_cpu_map_token *token, FILE *output)
+{
+ uint64_t data = 0;
+ rmt_file_write_token_bits(&data, token->address & 0xFFFFFFFFFFFF, 8, 55);
+ rmt_file_write_token_bits(&data, token->unmapped, 56, 56);
+ fwrite(&data, 7, 1, output);
+}
+
+static void
+rmt_dump_data(struct vk_memory_trace_data *data, FILE *output)
+{
+ struct rmt_file_header header = {0};
+ struct rmt_file_chunk_system_info system_info_chunk = {0};
+ struct rmt_file_chunk_adapter_info adapter_info_chunk = {0};
+ struct rmt_file_chunk_rmt_data data_chunk = {0};
+
+ /* RMT header */
+ rmt_fill_header(&header);
+ fwrite(&header, sizeof(header), 1, output);
+
+ /* System info */
+ rmt_fill_chunk_system_info(&system_info_chunk);
+ fwrite(&system_info_chunk, sizeof(system_info_chunk), 1, output);
+
+ /* Segment info */
+ for (int32_t i = 0; i < 3; ++i) {
+ struct rmt_file_chunk_segment_info segment_info_chunk = {0};
+
+ rmt_fill_chunk_segment_info(data, &data->device_info, &segment_info_chunk, i);
+ fwrite(&segment_info_chunk, sizeof(segment_info_chunk), 1, output);
+ }
+
+ /* Adapter info */
+ rmt_fill_chunk_adapter_info(&data->device_info, &adapter_info_chunk);
+ fwrite(&adapter_info_chunk, sizeof(adapter_info_chunk), 1, output);
+
+ long chunk_start = ftell(output);
+ /* Write a dummy data chunk to reserve space */
+ fwrite(&data_chunk, sizeof(data_chunk), 1, output);
+
+ qsort(data->tokens.data, util_dynarray_num_elements(&data->tokens, struct vk_rmv_token),
+ sizeof(struct vk_rmv_token), vk_rmv_token_compare);
+
+ uint64_t current_timestamp = 0;
+ if (util_dynarray_num_elements(&data->tokens, struct vk_rmv_token))
+ current_timestamp =
+ util_dynarray_element(&data->tokens, struct vk_rmv_token, 0)->timestamp / RMT_TIMESTAMP_DIVISOR;
+
+ long stream_start = ftell(output);
+
+ struct vk_rmv_timestamp_token timestamp_token;
+ timestamp_token.value = 0;
+ rmt_dump_timestamp(&timestamp_token, output);
+
+ util_dynarray_foreach (&data->tokens, struct vk_rmv_token, token) {
+ /* Only temporarily modify the token's timestamp in case of multiple traces */
+ uint64_t old_timestamp = token->timestamp;
+ /* adjust timestamp to 1 MHz, see rmt_fill_chunk_system_info */
+ token->timestamp /= RMT_TIMESTAMP_DIVISOR;
+
+ int64_t delta = token->timestamp - current_timestamp;
+
+ /* Time values are stored divided by 32 */
+ delta /= 32;
+
+ /*
+ * Each token can hold up to 4 bits of time delta. If the delta doesn't
+ * fit in 4 bits, an additional token containing more space for the delta
+ * has to be emitted.
+ */
+ if (delta > 0xF) {
+ rmt_dump_time_delta(delta, output);
+ delta = 0;
+ }
+
+ uint64_t token_header = 0;
+ rmt_file_write_bits(&token_header, token_type_to_rmt(token->type), 0, 3);
+ rmt_file_write_bits(&token_header, delta, 4, 7);
+ fwrite(&token_header, 1, 1, output);
+
+ switch (token->type) {
+ case VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE:
+ rmt_dump_virtual_alloc(&token->data.virtual_allocate, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_VIRTUAL_FREE:
+ rmt_dump_virtual_free(&token->data.virtual_free, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE:
+ rmt_dump_page_table_update(&token->data.page_table_update, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_CREATE:
+ rmt_dump_resource_create(&token->data.resource_create, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY:
+ rmt_dump_resource_destroy(&token->data.resource_destroy, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_BIND:
+ rmt_dump_resource_bind(&token->data.resource_bind, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_RESOURCE_REFERENCE:
+ rmt_dump_resource_reference(&token->data.resource_reference, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_USERDATA:
+ rmt_dump_userdata(&token->data.userdata, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_MISC:
+ rmt_dump_misc(&token->data.misc, output);
+ break;
+ case VK_RMV_TOKEN_TYPE_CPU_MAP:
+ rmt_dump_cpu_map(&token->data.cpu_map, output);
+ break;
+ default:
+ unreachable("invalid token type");
+ }
+
+ current_timestamp = token->timestamp;
+ token->timestamp = old_timestamp;
+ }
+ long stream_end = ftell(output);
+
+ /* Go back and write the correct chunk data. */
+ fseek(output, chunk_start, SEEK_SET);
+ rmt_fill_chunk_rmt_data(stream_end - stream_start, &data_chunk);
+ fwrite(&data_chunk, sizeof(data_chunk), 1, output);
+}
+
+int
+vk_dump_rmv_capture(struct vk_memory_trace_data *data)
+{
+ char filename[2048];
+ struct tm now;
+ FILE *f;
+
+ time_t t = time(NULL);
+ now = *localtime(&t);
+
+ snprintf(filename, sizeof(filename), "/tmp/%s_%04d.%02d.%02d_%02d.%02d.%02d.rmv",
+ util_get_process_name(), 1900 + now.tm_year, now.tm_mon + 1, now.tm_mday, now.tm_hour,
+ now.tm_min, now.tm_sec);
+
+ f = fopen(filename, "wb");
+ if (!f)
+ return -1;
+
+ rmt_dump_data(data, f);
+
+ fprintf(stderr, "RMV capture saved to '%s'\n", filename);
+
+ fclose(f);
+ return 0;
+}
diff --git a/src/vulkan/runtime/rmv/vk_rmv_tokens.h b/src/vulkan/runtime/rmv/vk_rmv_tokens.h
new file mode 100644
index 00000000000..e16998b1184
--- /dev/null
+++ b/src/vulkan/runtime/rmv/vk_rmv_tokens.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright © 2022 Friedrich Vock
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_RMV_TOKENS_H
+#define VK_RMV_TOKENS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "util/os_time.h"
+#include <vulkan/vulkan_core.h>
+
+/*
+ * Implemented types of tokens.
+ */
+enum vk_rmv_token_type {
+ VK_RMV_TOKEN_TYPE_USERDATA,
+ VK_RMV_TOKEN_TYPE_MISC,
+ VK_RMV_TOKEN_TYPE_RESOURCE_BIND,
+ VK_RMV_TOKEN_TYPE_RESOURCE_REFERENCE,
+ VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE,
+ VK_RMV_TOKEN_TYPE_CPU_MAP,
+ VK_RMV_TOKEN_TYPE_VIRTUAL_FREE,
+ VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE,
+ VK_RMV_TOKEN_TYPE_RESOURCE_CREATE,
+ VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY
+};
+
+/*
+ * The type of miscellaneous event reported through a MISC token.
+ */
+enum vk_rmv_misc_event_type {
+ VK_RMV_MISC_EVENT_TYPE_SUBMIT_GRAPHICS,
+ VK_RMV_MISC_EVENT_TYPE_SUBMIT_COMPUTE,
+ VK_RMV_MISC_EVENT_TYPE_SUBMIT_COPY,
+ VK_RMV_MISC_EVENT_TYPE_PRESENT,
+ VK_RMV_MISC_EVENT_TYPE_INVALIDATE_RANGES,
+ VK_RMV_MISC_EVENT_TYPE_FLUSH_MAPPED_RANGE,
+ VK_RMV_MISC_EVENT_TYPE_TRIM_MEMORY
+};
+
+enum vk_rmv_resource_type {
+ VK_RMV_RESOURCE_TYPE_IMAGE,
+ VK_RMV_RESOURCE_TYPE_BUFFER,
+ VK_RMV_RESOURCE_TYPE_GPU_EVENT,
+ VK_RMV_RESOURCE_TYPE_BORDER_COLOR_PALETTE,
+ VK_RMV_RESOURCE_TYPE_INDIRECT_CMD_GENERATOR,
+ VK_RMV_RESOURCE_TYPE_MOTION_ESTIMATOR,
+ VK_RMV_RESOURCE_TYPE_PERF_EXPERIMENT,
+ VK_RMV_RESOURCE_TYPE_QUERY_HEAP,
+ VK_RMV_RESOURCE_TYPE_VIDEO_DECODER,
+ VK_RMV_RESOURCE_TYPE_VIDEO_ENCODER,
+ VK_RMV_RESOURCE_TYPE_TIMESTAMP,
+ VK_RMV_RESOURCE_TYPE_HEAP,
+ VK_RMV_RESOURCE_TYPE_PIPELINE,
+ VK_RMV_RESOURCE_TYPE_DESCRIPTOR_HEAP,
+ VK_RMV_RESOURCE_TYPE_DESCRIPTOR_POOL,
+ VK_RMV_RESOURCE_TYPE_COMMAND_ALLOCATOR,
+ VK_RMV_RESOURCE_TYPE_MISC_INTERNAL
+};
+
+/*
+ * Token data for all types of tokens.
+ */
+
+struct vk_rmv_timestamp_token {
+ uint64_t value;
+};
+
+struct vk_rmv_userdata_token {
+ char *name;
+ uint32_t resource_id;
+};
+
+struct vk_rmv_misc_token {
+ enum vk_rmv_misc_event_type type;
+};
+
+struct vk_rmv_resource_bind_token {
+ uint64_t address;
+ uint64_t size;
+ bool is_system_memory;
+ uint32_t resource_id;
+};
+
+struct vk_rmv_resource_reference_token {
+ uint64_t virtual_address;
+ bool residency_removed;
+};
+
+enum vk_rmv_page_table_update_type {
+ VK_RMV_PAGE_TABLE_UPDATE_TYPE_DISCARD,
+ VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
+ VK_RMV_PAGE_TABLE_UPDATE_TYPE_TRANSFER
+};
+
+struct vk_rmv_page_table_update_token {
+ uint64_t virtual_address;
+ uint64_t physical_address;
+ uint64_t page_count;
+ uint32_t page_size;
+ int pid;
+ bool is_unmap;
+ enum vk_rmv_page_table_update_type type;
+};
+
+struct vk_rmv_cpu_map_token {
+ uint64_t address;
+ bool unmapped;
+};
+
+struct vk_rmv_virtual_free_token {
+ uint64_t address;
+};
+
+enum vk_rmv_kernel_memory_domain {
+ VK_RMV_KERNEL_MEMORY_DOMAIN_CPU = 0x1,
+ VK_RMV_KERNEL_MEMORY_DOMAIN_GTT = 0x2,
+ VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM = 0x4
+};
+
+struct vk_rmv_virtual_allocate_token {
+ uint32_t page_count;
+ bool is_driver_internal;
+ bool is_in_invisible_vram;
+ uint64_t address;
+ enum vk_rmv_kernel_memory_domain preferred_domains;
+};
+
+struct vk_rmv_image_description {
+ VkImageCreateFlags create_flags;
+ VkImageUsageFlags usage_flags;
+ VkImageType type;
+ VkExtent3D extent;
+ VkFormat format;
+ uint32_t num_mips;
+ uint32_t num_slices;
+ VkImageTiling tiling;
+
+ uint32_t log2_samples;
+ uint32_t log2_storage_samples;
+
+ uint32_t alignment_log2;
+ uint32_t metadata_alignment_log2;
+ uint32_t image_alignment_log2;
+
+ uint64_t size;
+ uint64_t metadata_size;
+ uint64_t metadata_header_size;
+
+ uint64_t metadata_offset;
+ uint64_t metadata_header_offset;
+
+ bool presentable;
+};
+
+struct vk_rmv_event_description {
+ VkEventCreateFlags flags;
+};
+
+struct vk_rmv_border_color_palette_description {
+ uint8_t num_entries;
+};
+
+struct vk_rmv_buffer_description {
+ VkBufferCreateFlags create_flags;
+ VkBufferUsageFlags usage_flags;
+ uint64_t size;
+};
+
+struct vk_rmv_query_pool_description {
+ VkQueryType type;
+ bool has_cpu_access;
+};
+
+/* The heap description refers to a VkDeviceMemory resource. */
+struct vk_rmv_heap_description {
+ VkMemoryAllocateFlags alloc_flags;
+ uint64_t size;
+ uint32_t alignment;
+ uint32_t heap_index;
+};
+
+struct vk_rmv_pipeline_description {
+ bool is_internal;
+ uint64_t hash_lo;
+ uint64_t hash_hi;
+ VkShaderStageFlags shader_stages;
+ bool is_ngg;
+};
+
+struct vk_rmv_descriptor_pool_description {
+ uint32_t max_sets;
+ uint32_t pool_size_count;
+ VkDescriptorPoolSize *pool_sizes;
+};
+
+struct vk_rmv_command_buffer_description {
+ enum vk_rmv_kernel_memory_domain preferred_domain;
+ uint64_t executable_size;
+ uint64_t app_available_executable_size;
+ uint64_t embedded_data_size;
+ uint64_t app_available_embedded_data_size;
+ uint64_t scratch_size;
+ uint64_t app_available_scratch_size;
+};
+
+enum vk_rmv_misc_internal_type {
+ VK_RMV_MISC_INTERNAL_TYPE_PADDING,
+};
+
+struct vk_rmv_misc_internal_description {
+ enum vk_rmv_misc_internal_type type;
+};
+
+struct vk_rmv_resource_create_token {
+ uint32_t resource_id;
+ bool is_driver_internal;
+ enum vk_rmv_resource_type type;
+ union {
+ struct vk_rmv_event_description event;
+ struct vk_rmv_border_color_palette_description border_color_palette;
+ struct vk_rmv_image_description image;
+ struct vk_rmv_buffer_description buffer;
+ struct vk_rmv_query_pool_description query_pool;
+ struct vk_rmv_heap_description heap;
+ struct vk_rmv_pipeline_description pipeline;
+ struct vk_rmv_descriptor_pool_description descriptor_pool;
+ struct vk_rmv_command_buffer_description command_buffer;
+ struct vk_rmv_misc_internal_description misc_internal;
+ };
+};
+
+struct vk_rmv_resource_destroy_token {
+ uint32_t resource_id;
+};
+
+struct vk_rmv_token {
+ enum vk_rmv_token_type type;
+ uint64_t timestamp;
+ union {
+ struct vk_rmv_timestamp_token timestamp;
+ struct vk_rmv_userdata_token userdata;
+ struct vk_rmv_misc_token misc;
+ struct vk_rmv_resource_bind_token resource_bind;
+ struct vk_rmv_resource_reference_token resource_reference;
+ struct vk_rmv_page_table_update_token page_table_update;
+ struct vk_rmv_cpu_map_token cpu_map;
+ struct vk_rmv_virtual_free_token virtual_free;
+ struct vk_rmv_virtual_allocate_token virtual_allocate;
+ struct vk_rmv_resource_create_token resource_create;
+ struct vk_rmv_resource_destroy_token resource_destroy;
+ } data;
+};
+
+static inline size_t
+vk_rmv_token_size_from_type(enum vk_rmv_token_type type)
+{
+ switch (type) {
+ case VK_RMV_TOKEN_TYPE_USERDATA:
+ return sizeof(struct vk_rmv_userdata_token);
+ case VK_RMV_TOKEN_TYPE_MISC:
+ return sizeof(struct vk_rmv_misc_token);
+ case VK_RMV_TOKEN_TYPE_RESOURCE_BIND:
+ return sizeof(struct vk_rmv_resource_bind_token);
+ case VK_RMV_TOKEN_TYPE_RESOURCE_REFERENCE:
+ return sizeof(struct vk_rmv_resource_reference_token);
+ case VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE:
+ return sizeof(struct vk_rmv_page_table_update_token);
+ case VK_RMV_TOKEN_TYPE_CPU_MAP:
+ return sizeof(struct vk_rmv_cpu_map_token);
+ case VK_RMV_TOKEN_TYPE_VIRTUAL_FREE:
+ return sizeof(struct vk_rmv_virtual_free_token);
+ case VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE:
+ return sizeof(struct vk_rmv_virtual_allocate_token);
+ case VK_RMV_TOKEN_TYPE_RESOURCE_CREATE:
+ return sizeof(struct vk_rmv_resource_create_token);
+ case VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY:
+ return sizeof(struct vk_rmv_resource_destroy_token);
+ default:
+ unreachable("invalid token type");
+ }
+}
+
+#endif
diff --git a/src/vulkan/runtime/vk_acceleration_structure.c b/src/vulkan/runtime/vk_acceleration_structure.c
new file mode 100644
index 00000000000..074b94ea85c
--- /dev/null
+++ b/src/vulkan/runtime/vk_acceleration_structure.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2021 Bas Nieuwenhuizen
+ * Copyright © 2023 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_acceleration_structure.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+
+VkDeviceAddress
+vk_acceleration_structure_get_va(struct vk_acceleration_structure *accel_struct)
+{
+ VkBufferDeviceAddressInfo info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
+ .buffer = accel_struct->buffer,
+ };
+
+ VkDeviceAddress base_addr = accel_struct->base.device->dispatch_table.GetBufferDeviceAddress(
+ vk_device_to_handle(accel_struct->base.device), &info);
+
+ return base_addr + accel_struct->offset;
+}
+
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateAccelerationStructureKHR(VkDevice _device,
+ const VkAccelerationStructureCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkAccelerationStructureKHR *pAccelerationStructure)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ struct vk_acceleration_structure *accel_struct = vk_object_alloc(
+ device, pAllocator, sizeof(struct vk_acceleration_structure),
+ VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR);
+
+ if (!accel_struct)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ accel_struct->buffer = pCreateInfo->buffer;
+ accel_struct->offset = pCreateInfo->offset;
+ accel_struct->size = pCreateInfo->size;
+
+ if (pCreateInfo->deviceAddress &&
+ vk_acceleration_structure_get_va(accel_struct) != pCreateInfo->deviceAddress)
+ return vk_error(device, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS);
+
+ *pAccelerationStructure = vk_acceleration_structure_to_handle(accel_struct);
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyAccelerationStructureKHR(VkDevice _device,
+ VkAccelerationStructureKHR accelerationStructure,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, accelerationStructure);
+
+ if (!accel_struct)
+ return;
+
+ vk_object_free(device, pAllocator, accel_struct);
+}
+
+VKAPI_ATTR VkDeviceAddress VKAPI_CALL
+vk_common_GetAccelerationStructureDeviceAddressKHR(
+ VkDevice _device, const VkAccelerationStructureDeviceAddressInfoKHR *pInfo)
+{
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfo->accelerationStructure);
+ return vk_acceleration_structure_get_va(accel_struct);
+}
diff --git a/src/vulkan/wsi/wsi_common_win32.h b/src/vulkan/runtime/vk_acceleration_structure.h
index 661ba9dea80..bcc2eff4660 100644
--- a/src/vulkan/wsi/wsi_common_win32.h
+++ b/src/vulkan/runtime/vk_acceleration_structure.h
@@ -1,5 +1,6 @@
/*
- * Copyright © 2015 Intel Corporation
+ * Copyright © 2021 Bas Nieuwenhuizen
+ * Copyright © 2023 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,18 +21,23 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef WSI_COMMON_WIN32_H
-#define WSI_COMMON_WIN32_H
-#include "wsi_common.h"
-#include <vulkan/vulkan_win32.h>
+#ifndef VK_ACCELERATION_STRUCTURE_H
+#define VK_ACCELERATION_STRUCTURE_H
-VkBool32
-wsi_win32_get_presentation_support(struct wsi_device *wsi_device);
+#include "vk_object.h"
+
+struct vk_acceleration_structure {
+ struct vk_object_base base;
+
+ VkBuffer buffer;
+ uint64_t offset;
+ uint64_t size;
+};
+
+VkDeviceAddress vk_acceleration_structure_get_va(struct vk_acceleration_structure *accel_struct);
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_acceleration_structure, base, VkAccelerationStructureKHR,
+ VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR)
-VkResult
-wsi_create_win32_surface(VkInstance instance,
- const VkAllocationCallbacks *pAllocator,
- const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface);
#endif
diff --git a/src/vulkan/runtime/vk_android.c b/src/vulkan/runtime/vk_android.c
new file mode 100644
index 00000000000..df4efae1b5f
--- /dev/null
+++ b/src/vulkan/runtime/vk_android.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_android.h"
+
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_image.h"
+#include "vk_log.h"
+#include "vk_queue.h"
+#include "vk_util.h"
+
+#include "util/libsync.h"
+
+#include <hardware/gralloc.h>
+
+#if ANDROID_API_LEVEL >= 26
+#include <hardware/gralloc1.h>
+#endif
+
+#include <unistd.h>
+
+#if ANDROID_API_LEVEL >= 26
+#include <vndk/hardware_buffer.h>
+
+/* From the Android hardware_buffer.h header:
+ *
+ * "The buffer will be written to by the GPU as a framebuffer attachment.
+ *
+ * Note that the name of this flag is somewhat misleading: it does not
+ * imply that the buffer contains a color format. A buffer with depth or
+ * stencil format that will be used as a framebuffer attachment should
+ * also have this flag. Use the equivalent flag
+ * AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER to avoid this confusion."
+ *
+ * The flag was renamed from COLOR_OUTPUT to FRAMEBUFFER at Android API
+ * version 29.
+ */
+#if ANDROID_API_LEVEL < 29
+#define AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT
+#endif
+
+/* Convert an AHB format to a VkFormat, based on the "AHardwareBuffer Format
+ * Equivalence" table in Vulkan spec.
+ *
+ * Note that this only covers a subset of AHB formats defined in NDK. Drivers
+ * can support more AHB formats, including private ones.
+ */
+VkFormat
+vk_ahb_format_to_image_format(uint32_t ahb_format)
+{
+ switch (ahb_format) {
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ return VK_FORMAT_R8G8B8_UNORM;
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ return VK_FORMAT_R5G6B5_UNORM_PACK16;
+ case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+ case AHARDWAREBUFFER_FORMAT_D16_UNORM:
+ return VK_FORMAT_D16_UNORM;
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM:
+ return VK_FORMAT_X8_D24_UNORM_PACK32;
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
+ return VK_FORMAT_D24_UNORM_S8_UINT;
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
+ return VK_FORMAT_D32_SFLOAT;
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ case AHARDWAREBUFFER_FORMAT_S8_UINT:
+ return VK_FORMAT_S8_UINT;
+ default:
+ return VK_FORMAT_UNDEFINED;
+ }
+}
+
+/* Convert a VkFormat to an AHB format, based on the "AHardwareBuffer Format
+ * Equivalence" table in Vulkan spec.
+ *
+ * Note that this only covers a subset of AHB formats defined in NDK. Drivers
+ * can support more AHB formats, including private ones.
+ */
+uint32_t
+vk_image_format_to_ahb_format(VkFormat vk_format)
+{
+ switch (vk_format) {
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ return AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+ case VK_FORMAT_R8G8B8_UNORM:
+ return AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM;
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ return AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT;
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
+ case VK_FORMAT_D16_UNORM:
+ return AHARDWAREBUFFER_FORMAT_D16_UNORM;
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ return AHARDWAREBUFFER_FORMAT_D24_UNORM;
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ return AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT;
+ case VK_FORMAT_D32_SFLOAT:
+ return AHARDWAREBUFFER_FORMAT_D32_FLOAT;
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT;
+ case VK_FORMAT_S8_UINT:
+ return AHARDWAREBUFFER_FORMAT_S8_UINT;
+ default:
+ return 0;
+ }
+}
+
+/* Construct ahw usage mask from image usage bits, see
+ * 'AHardwareBuffer Usage Equivalence' in Vulkan spec.
+ */
+uint64_t
+vk_image_usage_to_ahb_usage(const VkImageCreateFlags vk_create,
+ const VkImageUsageFlags vk_usage)
+{
+ uint64_t ahb_usage = 0;
+ if (vk_usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
+ ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
+
+ if (vk_usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT))
+ ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER;
+
+ if (vk_usage & VK_IMAGE_USAGE_STORAGE_BIT)
+ ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
+
+ if (vk_create & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
+ ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP;
+
+ if (vk_create & VK_IMAGE_CREATE_PROTECTED_BIT)
+ ahb_usage |= AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
+
+ /* No usage bits set - set at least one GPU usage. */
+ if (ahb_usage == 0)
+ ahb_usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
+
+ return ahb_usage;
+}
+
+struct AHardwareBuffer *
+vk_alloc_ahardware_buffer(const VkMemoryAllocateInfo *pAllocateInfo)
+{
+ const VkMemoryDedicatedAllocateInfo *dedicated_info =
+ vk_find_struct_const(pAllocateInfo->pNext,
+ MEMORY_DEDICATED_ALLOCATE_INFO);
+
+ uint32_t w = 0;
+ uint32_t h = 1;
+ uint32_t layers = 1;
+ uint32_t format = 0;
+ uint64_t usage = 0;
+
+ /* If caller passed dedicated information. */
+ if (dedicated_info && dedicated_info->image) {
+ VK_FROM_HANDLE(vk_image, image, dedicated_info->image);
+
+ if (!image->ahb_format)
+ return NULL;
+
+ w = image->extent.width;
+ h = image->extent.height;
+ layers = image->array_layers;
+ format = image->ahb_format;
+ usage = vk_image_usage_to_ahb_usage(image->create_flags,
+ image->usage);
+ } else {
+ /* AHB export allocation for VkBuffer requires a valid allocationSize */
+ assert(pAllocateInfo->allocationSize);
+ w = pAllocateInfo->allocationSize;
+ format = AHARDWAREBUFFER_FORMAT_BLOB;
+ usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER |
+ AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+ }
+
+ struct AHardwareBuffer_Desc desc = {
+ .width = w,
+ .height = h,
+ .layers = layers,
+ .format = format,
+ .usage = usage,
+ };
+
+ struct AHardwareBuffer *ahb;
+ if (AHardwareBuffer_allocate(&desc, &ahb) != 0)
+ return NULL;
+
+ return ahb;
+}
+#endif /* ANDROID_API_LEVEL >= 26 */
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_AcquireImageANDROID(VkDevice _device,
+ VkImage image,
+ int nativeFenceFd,
+ VkSemaphore semaphore,
+ VkFence fence)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VkResult result = VK_SUCCESS;
+
+ /* From https://source.android.com/devices/graphics/implement-vulkan :
+ *
+ * "The driver takes ownership of the fence file descriptor and closes
+ * the fence file descriptor when no longer needed. The driver must do
+ * so even if neither a semaphore or fence object is provided, or even
+ * if vkAcquireImageANDROID fails and returns an error."
+ *
+ * The Vulkan spec for VkImportFence/SemaphoreFdKHR(), however, requires
+ * the file descriptor to be left alone on failure.
+ */
+ int semaphore_fd = -1, fence_fd = -1;
+ if (nativeFenceFd >= 0) {
+ if (semaphore != VK_NULL_HANDLE && fence != VK_NULL_HANDLE) {
+ /* We have both so we have to import the sync file twice. One of
+ * them needs to be a dup.
+ */
+ semaphore_fd = nativeFenceFd;
+ fence_fd = dup(nativeFenceFd);
+ if (fence_fd < 0) {
+ VkResult err = (errno == EMFILE) ? VK_ERROR_TOO_MANY_OBJECTS :
+ VK_ERROR_OUT_OF_HOST_MEMORY;
+ close(nativeFenceFd);
+ return vk_error(device, err);
+ }
+ } else if (semaphore != VK_NULL_HANDLE) {
+ semaphore_fd = nativeFenceFd;
+ } else if (fence != VK_NULL_HANDLE) {
+ fence_fd = nativeFenceFd;
+ } else {
+ /* Nothing to import into so we have to close the file */
+ close(nativeFenceFd);
+ }
+ }
+
+ if (semaphore != VK_NULL_HANDLE) {
+ const VkImportSemaphoreFdInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
+ .semaphore = semaphore,
+ .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ .fd = semaphore_fd,
+ };
+ result = device->dispatch_table.ImportSemaphoreFdKHR(_device, &info);
+ if (result == VK_SUCCESS)
+ semaphore_fd = -1; /* The driver took ownership */
+ }
+
+ if (result == VK_SUCCESS && fence != VK_NULL_HANDLE) {
+ const VkImportFenceFdInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR,
+ .fence = fence,
+ .flags = VK_FENCE_IMPORT_TEMPORARY_BIT,
+ .handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
+ .fd = fence_fd,
+ };
+ result = device->dispatch_table.ImportFenceFdKHR(_device, &info);
+ if (result == VK_SUCCESS)
+ fence_fd = -1; /* The driver took ownership */
+ }
+
+ if (semaphore_fd >= 0)
+ close(semaphore_fd);
+ if (fence_fd >= 0)
+ close(fence_fd);
+
+ return result;
+}
+
+static VkResult
+vk_anb_semaphore_init_once(struct vk_queue *queue, struct vk_device *device)
+{
+ if (queue->anb_semaphore != VK_NULL_HANDLE)
+ return VK_SUCCESS;
+
+ const VkExportSemaphoreCreateInfo export_info = {
+ .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
+ .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ };
+ const VkSemaphoreCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = &export_info,
+ };
+ return device->dispatch_table.CreateSemaphore(vk_device_to_handle(device),
+ &create_info, NULL,
+ &queue->anb_semaphore);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_QueueSignalReleaseImageANDROID(VkQueue _queue,
+ uint32_t waitSemaphoreCount,
+ const VkSemaphore *pWaitSemaphores,
+ VkImage image,
+ int *pNativeFenceFd)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+ struct vk_device *device = queue->base.device;
+ VkResult result = VK_SUCCESS;
+
+ STACK_ARRAY(VkPipelineStageFlags, stage_flags, MAX2(1, waitSemaphoreCount));
+ for (uint32_t i = 0; i < MAX2(1, waitSemaphoreCount); i++)
+ stage_flags[i] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+
+ result = vk_anb_semaphore_init_once(queue, device);
+ if (result != VK_SUCCESS) {
+ STACK_ARRAY_FINISH(stage_flags);
+ return result;
+ }
+
+ const VkSubmitInfo submit_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .waitSemaphoreCount = waitSemaphoreCount,
+ .pWaitSemaphores = pWaitSemaphores,
+ .pWaitDstStageMask = stage_flags,
+ .signalSemaphoreCount = 1,
+ .pSignalSemaphores = &queue->anb_semaphore,
+ };
+ result = device->dispatch_table.QueueSubmit(_queue, 1, &submit_info,
+ VK_NULL_HANDLE);
+ STACK_ARRAY_FINISH(stage_flags);
+ if (result != VK_SUCCESS)
+ return result;
+
+ const VkSemaphoreGetFdInfoKHR get_fd = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ .semaphore = queue->anb_semaphore,
+ };
+ return device->dispatch_table.GetSemaphoreFdKHR(vk_device_to_handle(device),
+ &get_fd, pNativeFenceFd);
+}
diff --git a/src/vulkan/runtime/vk_android.h b/src/vulkan/runtime/vk_android.h
new file mode 100644
index 00000000000..496b6c54751
--- /dev/null
+++ b/src/vulkan/runtime/vk_android.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2023 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_ANDROID_H
+#define VK_ANDROID_H
+
+#include "vulkan/vulkan_core.h"
+
+#include "util/detect_os.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26
+
+VkFormat vk_ahb_format_to_image_format(uint32_t ahb_format);
+
+uint32_t vk_image_format_to_ahb_format(VkFormat vk_format);
+
+uint64_t vk_image_usage_to_ahb_usage(const VkImageCreateFlags vk_create,
+ const VkImageUsageFlags vk_usage);
+
+struct AHardwareBuffer *
+vk_alloc_ahardware_buffer(const VkMemoryAllocateInfo *pAllocateInfo);
+
+#else /* DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26 */
+
+static inline VkFormat
+vk_ahb_format_to_image_format(uint32_t ahb_format)
+{
+ return VK_FORMAT_UNDEFINED;
+}
+
+static inline uint32_t
+vk_image_format_to_ahb_format(VkFormat vk_format)
+{
+ return 0;
+}
+
+static inline uint64_t
+vk_image_usage_to_ahb_usage(const VkImageCreateFlags vk_create,
+ const VkImageUsageFlags vk_usage)
+{
+ return 0;
+}
+
+static inline struct AHardwareBuffer *
+vk_alloc_ahardware_buffer(const VkMemoryAllocateInfo *pAllocateInfo)
+{
+ return NULL;
+}
+
+#endif /* ANDROID_API_LEVEL >= 26 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_ANDROID_H */
diff --git a/src/vulkan/runtime/vk_blend.c b/src/vulkan/runtime/vk_blend.c
new file mode 100644
index 00000000000..b7253bb0ea7
--- /dev/null
+++ b/src/vulkan/runtime/vk_blend.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2023 Valve Corporation
+ * Copyright 2021 Collabora Ltd.
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "vk_blend.h"
+#include "util/macros.h"
+
+enum pipe_logicop
+vk_logic_op_to_pipe(VkLogicOp in)
+{
+ switch (in) {
+ case VK_LOGIC_OP_CLEAR:
+ return PIPE_LOGICOP_CLEAR;
+ case VK_LOGIC_OP_AND:
+ return PIPE_LOGICOP_AND;
+ case VK_LOGIC_OP_AND_REVERSE:
+ return PIPE_LOGICOP_AND_REVERSE;
+ case VK_LOGIC_OP_COPY:
+ return PIPE_LOGICOP_COPY;
+ case VK_LOGIC_OP_AND_INVERTED:
+ return PIPE_LOGICOP_AND_INVERTED;
+ case VK_LOGIC_OP_NO_OP:
+ return PIPE_LOGICOP_NOOP;
+ case VK_LOGIC_OP_XOR:
+ return PIPE_LOGICOP_XOR;
+ case VK_LOGIC_OP_OR:
+ return PIPE_LOGICOP_OR;
+ case VK_LOGIC_OP_NOR:
+ return PIPE_LOGICOP_NOR;
+ case VK_LOGIC_OP_EQUIVALENT:
+ return PIPE_LOGICOP_EQUIV;
+ case VK_LOGIC_OP_INVERT:
+ return PIPE_LOGICOP_INVERT;
+ case VK_LOGIC_OP_OR_REVERSE:
+ return PIPE_LOGICOP_OR_REVERSE;
+ case VK_LOGIC_OP_COPY_INVERTED:
+ return PIPE_LOGICOP_COPY_INVERTED;
+ case VK_LOGIC_OP_OR_INVERTED:
+ return PIPE_LOGICOP_OR_INVERTED;
+ case VK_LOGIC_OP_NAND:
+ return PIPE_LOGICOP_NAND;
+ case VK_LOGIC_OP_SET:
+ return PIPE_LOGICOP_SET;
+ default:
+ unreachable("Invalid logicop");
+ }
+}
+
+enum pipe_blend_func
+vk_blend_op_to_pipe(VkBlendOp in)
+{
+ switch (in) {
+ case VK_BLEND_OP_ADD:
+ return PIPE_BLEND_ADD;
+ case VK_BLEND_OP_SUBTRACT:
+ return PIPE_BLEND_SUBTRACT;
+ case VK_BLEND_OP_REVERSE_SUBTRACT:
+ return PIPE_BLEND_REVERSE_SUBTRACT;
+ case VK_BLEND_OP_MIN:
+ return PIPE_BLEND_MIN;
+ case VK_BLEND_OP_MAX:
+ return PIPE_BLEND_MAX;
+ default:
+ unreachable("Invalid blend op");
+ }
+}
+
+enum pipe_blendfactor
+vk_blend_factor_to_pipe(enum VkBlendFactor vk_factor)
+{
+ switch (vk_factor) {
+ case VK_BLEND_FACTOR_ZERO:
+ return PIPE_BLENDFACTOR_ZERO;
+ case VK_BLEND_FACTOR_ONE:
+ return PIPE_BLENDFACTOR_ONE;
+ case VK_BLEND_FACTOR_SRC_COLOR:
+ return PIPE_BLENDFACTOR_SRC_COLOR;
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+ return PIPE_BLENDFACTOR_INV_SRC_COLOR;
+ case VK_BLEND_FACTOR_DST_COLOR:
+ return PIPE_BLENDFACTOR_DST_COLOR;
+ case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+ return PIPE_BLENDFACTOR_INV_DST_COLOR;
+ case VK_BLEND_FACTOR_SRC_ALPHA:
+ return PIPE_BLENDFACTOR_SRC_ALPHA;
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+ return PIPE_BLENDFACTOR_INV_SRC_ALPHA;
+ case VK_BLEND_FACTOR_DST_ALPHA:
+ return PIPE_BLENDFACTOR_DST_ALPHA;
+ case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+ return PIPE_BLENDFACTOR_INV_DST_ALPHA;
+ case VK_BLEND_FACTOR_CONSTANT_COLOR:
+ return PIPE_BLENDFACTOR_CONST_COLOR;
+ case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+ return PIPE_BLENDFACTOR_INV_CONST_COLOR;
+ case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+ return PIPE_BLENDFACTOR_CONST_ALPHA;
+ case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+ return PIPE_BLENDFACTOR_INV_CONST_ALPHA;
+ case VK_BLEND_FACTOR_SRC1_COLOR:
+ return PIPE_BLENDFACTOR_SRC1_COLOR;
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
+ return PIPE_BLENDFACTOR_INV_SRC1_COLOR;
+ case VK_BLEND_FACTOR_SRC1_ALPHA:
+ return PIPE_BLENDFACTOR_SRC1_ALPHA;
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
+ return PIPE_BLENDFACTOR_INV_SRC1_ALPHA;
+ case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+ return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE;
+ default:
+ unreachable("Invalid blend factor");
+ }
+}
diff --git a/src/vulkan/runtime/vk_blend.h b/src/vulkan/runtime/vk_blend.h
new file mode 100644
index 00000000000..fb50c17eddc
--- /dev/null
+++ b/src/vulkan/runtime/vk_blend.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2023 Valve Corporation
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef VK_BLEND_H
+#define VK_BLEND_H
+
+#include <stdbool.h>
+#include "util/blend.h"
+#include "vulkan/vulkan_core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum pipe_logicop vk_logic_op_to_pipe(VkLogicOp in);
+enum pipe_blend_func vk_blend_op_to_pipe(VkBlendOp in);
+enum pipe_blendfactor vk_blend_factor_to_pipe(VkBlendFactor in);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/vulkan/runtime/vk_buffer.c b/src/vulkan/runtime/vk_buffer.c
new file mode 100644
index 00000000000..023aafd2177
--- /dev/null
+++ b/src/vulkan/runtime/vk_buffer.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright © 2022 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_buffer.h"
+
+#include "vk_common_entrypoints.h"
+#include "vk_alloc.h"
+#include "vk_device.h"
+#include "vk_util.h"
+
+void
+vk_buffer_init(struct vk_device *device,
+ struct vk_buffer *buffer,
+ const VkBufferCreateInfo *pCreateInfo)
+{
+ vk_object_base_init(device, &buffer->base, VK_OBJECT_TYPE_BUFFER);
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
+ assert(pCreateInfo->size > 0);
+
+ buffer->create_flags = pCreateInfo->flags;
+ buffer->size = pCreateInfo->size;
+ buffer->usage = pCreateInfo->usage;
+
+ const VkBufferUsageFlags2CreateInfoKHR *usage2_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR);
+ if (usage2_info != NULL)
+ buffer->usage = usage2_info->usage;
+}
+
+void *
+vk_buffer_create(struct vk_device *device,
+ const VkBufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_buffer *buffer =
+ vk_zalloc2(&device->alloc, alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (buffer == NULL)
+ return NULL;
+
+ vk_buffer_init(device, buffer, pCreateInfo);
+
+ return buffer;
+}
+
+void
+vk_buffer_finish(struct vk_buffer *buffer)
+{
+ vk_object_base_finish(&buffer->base);
+}
+
+void
+vk_buffer_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_buffer *buffer)
+{
+ vk_object_free(device, alloc, buffer);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetBufferMemoryRequirements(VkDevice _device,
+ VkBuffer buffer,
+ VkMemoryRequirements *pMemoryRequirements)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ VkBufferMemoryRequirementsInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
+ .buffer = buffer,
+ };
+ VkMemoryRequirements2 reqs = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
+ };
+ device->dispatch_table.GetBufferMemoryRequirements2(_device, &info, &reqs);
+
+ *pMemoryRequirements = reqs.memoryRequirements;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetBufferMemoryRequirements2(VkDevice _device,
+ const VkBufferMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_buffer, buffer, pInfo->buffer);
+
+ VkBufferCreateInfo pCreateInfo = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .pNext = NULL,
+ .usage = buffer->usage,
+ .size = buffer->size,
+ .flags = buffer->create_flags,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .queueFamilyIndexCount = 0,
+ .pQueueFamilyIndices = NULL,
+ };
+ VkDeviceBufferMemoryRequirements info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS,
+ .pNext = NULL,
+ .pCreateInfo = &pCreateInfo,
+ };
+
+ device->dispatch_table.GetDeviceBufferMemoryRequirements(_device, &info, pMemoryRequirements);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_BindBufferMemory(VkDevice _device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ VkBindBufferMemoryInfo bind = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
+ .buffer = buffer,
+ .memory = memory,
+ .memoryOffset = memoryOffset,
+ };
+
+ return device->dispatch_table.BindBufferMemory2(_device, 1, &bind);
+}
diff --git a/src/vulkan/runtime/vk_buffer.h b/src/vulkan/runtime/vk_buffer.h
new file mode 100644
index 00000000000..0bd9b19b6fb
--- /dev/null
+++ b/src/vulkan/runtime/vk_buffer.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2022 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_BUFFER_H
+#define VK_BUFFER_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_buffer {
+ struct vk_object_base base;
+
+ /** VkBufferCreateInfo::flags */
+ VkBufferCreateFlags create_flags;
+
+ /** VkBufferCreateInfo::size */
+ VkDeviceSize size;
+
+ /** VkBufferCreateInfo::usage or VkBufferUsageFlags2CreateInfoKHR::usage */
+ VkBufferUsageFlags2KHR usage;
+};
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_buffer, base, VkBuffer,
+ VK_OBJECT_TYPE_BUFFER);
+
+void vk_buffer_init(struct vk_device *device,
+ struct vk_buffer *buffer,
+ const VkBufferCreateInfo *pCreateInfo);
+void vk_buffer_finish(struct vk_buffer *buffer);
+
+void *vk_buffer_create(struct vk_device *device,
+ const VkBufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+void vk_buffer_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_buffer *buffer);
+
+static inline uint64_t
+vk_buffer_range(const struct vk_buffer *buffer,
+ uint64_t offset, uint64_t range)
+{
+ assert(offset <= buffer->size);
+ if (range == VK_WHOLE_SIZE) {
+ return buffer->size - offset;
+ } else {
+ assert(range + offset >= range);
+ assert(range + offset <= buffer->size);
+ return range;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_BUFFER_H */
diff --git a/src/vulkan/runtime/vk_buffer_view.c b/src/vulkan/runtime/vk_buffer_view.c
new file mode 100644
index 00000000000..cea62d7de90
--- /dev/null
+++ b/src/vulkan/runtime/vk_buffer_view.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright © 2022 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_buffer_view.h"
+
+#include "vk_alloc.h"
+#include "vk_buffer.h"
+#include "vk_device.h"
+#include "vk_format.h"
+
+void
+vk_buffer_view_init(struct vk_device *device,
+ struct vk_buffer_view *buffer_view,
+ const VkBufferViewCreateInfo *pCreateInfo)
+{
+ VK_FROM_HANDLE(vk_buffer, buffer, pCreateInfo->buffer);
+
+ vk_object_base_init(device, &buffer_view->base, VK_OBJECT_TYPE_BUFFER_VIEW);
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
+ assert(pCreateInfo->flags == 0);
+ assert(pCreateInfo->range > 0);
+
+ buffer_view->buffer = buffer;
+ buffer_view->format = pCreateInfo->format;
+ buffer_view->offset = pCreateInfo->offset;
+ buffer_view->range = vk_buffer_range(buffer, pCreateInfo->offset,
+ pCreateInfo->range);
+ buffer_view->elements = buffer_view->range /
+ vk_format_get_blocksize(buffer_view->format);
+}
+
+void *
+vk_buffer_view_create(struct vk_device *device,
+ const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_buffer_view *buffer_view;
+
+ buffer_view = vk_zalloc2(&device->alloc, alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!buffer_view)
+ return NULL;
+
+ vk_buffer_view_init(device, buffer_view, pCreateInfo);
+
+ return buffer_view;
+}
+
+void
+vk_buffer_view_finish(struct vk_buffer_view *buffer_view)
+{
+ vk_object_base_finish(&buffer_view->base);
+}
+
+void
+vk_buffer_view_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_buffer_view *buffer_view)
+{
+ vk_object_free(device, alloc, buffer_view);
+}
diff --git a/src/vulkan/runtime/vk_buffer_view.h b/src/vulkan/runtime/vk_buffer_view.h
new file mode 100644
index 00000000000..aa9f4270175
--- /dev/null
+++ b/src/vulkan/runtime/vk_buffer_view.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright © 2022 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_BUFFER_VIEW_H
+#define VK_BUFFER_VIEW_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_buffer_view {
+ struct vk_object_base base;
+
+ /** VkBufferViewCreateInfo::buffer */
+ struct vk_buffer *buffer;
+
+ /** VkBufferViewCreateInfo::format */
+ VkFormat format;
+
+ /** VkBufferViewCreateInfo::offset */
+ VkDeviceSize offset;
+
+ /** VkBufferViewCreateInfo::range
+ *
+ * This is asserted to be in-range for the attached buffer and will never
+ * be VK_WHOLE_SIZE.
+ */
+ VkDeviceSize range;
+
+ /* Number of elements in the buffer. This is range divided by the size of
+ * format, rounded down.
+ */
+ VkDeviceSize elements;
+};
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_buffer_view, base, VkBufferView,
+ VK_OBJECT_TYPE_BUFFER_VIEW);
+
+void vk_buffer_view_init(struct vk_device *device,
+ struct vk_buffer_view *buffer_view,
+ const VkBufferViewCreateInfo *pCreateInfo);
+void vk_buffer_view_finish(struct vk_buffer_view *buffer_view);
+void *vk_buffer_view_create(struct vk_device *device,
+ const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+void vk_buffer_view_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_buffer_view *buffer_view);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_BUFFER_VIEW_H */
diff --git a/src/vulkan/util/vk_cmd_copy.c b/src/vulkan/runtime/vk_cmd_copy.c
index 9fcb0c23d6e..5b4ef8a28d2 100644
--- a/src/vulkan/util/vk_cmd_copy.c
+++ b/src/vulkan/runtime/vk_cmd_copy.c
@@ -37,26 +37,26 @@ vk_common_CmdCopyBuffer(VkCommandBuffer commandBuffer,
*/
struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
- STACK_ARRAY(VkBufferCopy2KHR, region2s, regionCount);
+ STACK_ARRAY(VkBufferCopy2, region2s, regionCount);
for (uint32_t r = 0; r < regionCount; r++) {
- region2s[r] = (VkBufferCopy2KHR) {
- .sType = VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR,
+ region2s[r] = (VkBufferCopy2) {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_COPY_2,
.srcOffset = pRegions[r].srcOffset,
.dstOffset = pRegions[r].dstOffset,
.size = pRegions[r].size,
};
}
- VkCopyBufferInfo2KHR info = {
- .sType = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR,
+ VkCopyBufferInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2,
.srcBuffer = srcBuffer,
.dstBuffer = dstBuffer,
.regionCount = regionCount,
.pRegions = region2s,
};
- disp->device->dispatch_table.CmdCopyBuffer2KHR(commandBuffer, &info);
+ disp->device->dispatch_table.CmdCopyBuffer2(commandBuffer, &info);
STACK_ARRAY_FINISH(region2s);
}
@@ -75,11 +75,11 @@ vk_common_CmdCopyImage(VkCommandBuffer commandBuffer,
*/
struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
- STACK_ARRAY(VkImageCopy2KHR, region2s, regionCount);
+ STACK_ARRAY(VkImageCopy2, region2s, regionCount);
for (uint32_t r = 0; r < regionCount; r++) {
- region2s[r] = (VkImageCopy2KHR) {
- .sType = VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR,
+ region2s[r] = (VkImageCopy2) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_COPY_2,
.srcSubresource = pRegions[r].srcSubresource,
.srcOffset = pRegions[r].srcOffset,
.dstSubresource = pRegions[r].dstSubresource,
@@ -88,8 +88,8 @@ vk_common_CmdCopyImage(VkCommandBuffer commandBuffer,
};
}
- VkCopyImageInfo2KHR info = {
- .sType = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR,
+ VkCopyImageInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2,
.srcImage = srcImage,
.srcImageLayout = srcImageLayout,
.dstImage = dstImage,
@@ -98,7 +98,7 @@ vk_common_CmdCopyImage(VkCommandBuffer commandBuffer,
.pRegions = region2s,
};
- disp->device->dispatch_table.CmdCopyImage2KHR(commandBuffer, &info);
+ disp->device->dispatch_table.CmdCopyImage2(commandBuffer, &info);
STACK_ARRAY_FINISH(region2s);
}
@@ -116,11 +116,11 @@ vk_common_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
*/
struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
- STACK_ARRAY(VkBufferImageCopy2KHR, region2s, regionCount);
+ STACK_ARRAY(VkBufferImageCopy2, region2s, regionCount);
for (uint32_t r = 0; r < regionCount; r++) {
- region2s[r] = (VkBufferImageCopy2KHR) {
- .sType = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR,
+ region2s[r] = (VkBufferImageCopy2) {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2,
.bufferOffset = pRegions[r].bufferOffset,
.bufferRowLength = pRegions[r].bufferRowLength,
.bufferImageHeight = pRegions[r].bufferImageHeight,
@@ -130,8 +130,8 @@ vk_common_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
};
}
- VkCopyBufferToImageInfo2KHR info = {
- .sType = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR,
+ VkCopyBufferToImageInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2,
.srcBuffer = srcBuffer,
.dstImage = dstImage,
.dstImageLayout = dstImageLayout,
@@ -139,7 +139,7 @@ vk_common_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
.pRegions = region2s,
};
- disp->device->dispatch_table.CmdCopyBufferToImage2KHR(commandBuffer, &info);
+ disp->device->dispatch_table.CmdCopyBufferToImage2(commandBuffer, &info);
STACK_ARRAY_FINISH(region2s);
}
@@ -157,11 +157,11 @@ vk_common_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
*/
struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
- STACK_ARRAY(VkBufferImageCopy2KHR, region2s, regionCount);
+ STACK_ARRAY(VkBufferImageCopy2, region2s, regionCount);
for (uint32_t r = 0; r < regionCount; r++) {
- region2s[r] = (VkBufferImageCopy2KHR) {
- .sType = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR,
+ region2s[r] = (VkBufferImageCopy2) {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2,
.bufferOffset = pRegions[r].bufferOffset,
.bufferRowLength = pRegions[r].bufferRowLength,
.bufferImageHeight = pRegions[r].bufferImageHeight,
@@ -171,8 +171,8 @@ vk_common_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
};
}
- VkCopyImageToBufferInfo2KHR info = {
- .sType = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR,
+ VkCopyImageToBufferInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2,
.srcImage = srcImage,
.srcImageLayout = srcImageLayout,
.dstBuffer = dstBuffer,
@@ -180,7 +180,7 @@ vk_common_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
.pRegions = region2s,
};
- disp->device->dispatch_table.CmdCopyImageToBuffer2KHR(commandBuffer, &info);
+ disp->device->dispatch_table.CmdCopyImageToBuffer2(commandBuffer, &info);
STACK_ARRAY_FINISH(region2s);
}
@@ -200,11 +200,11 @@ vk_common_CmdBlitImage(VkCommandBuffer commandBuffer,
*/
struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
- STACK_ARRAY(VkImageBlit2KHR, region2s, regionCount);
+ STACK_ARRAY(VkImageBlit2, region2s, regionCount);
for (uint32_t r = 0; r < regionCount; r++) {
- region2s[r] = (VkImageBlit2KHR) {
- .sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR,
+ region2s[r] = (VkImageBlit2) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.srcSubresource = pRegions[r].srcSubresource,
.srcOffsets = {
pRegions[r].srcOffsets[0],
@@ -218,8 +218,8 @@ vk_common_CmdBlitImage(VkCommandBuffer commandBuffer,
};
}
- VkBlitImageInfo2KHR info = {
- .sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR,
+ VkBlitImageInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.srcImage = srcImage,
.srcImageLayout = srcImageLayout,
.dstImage = dstImage,
@@ -229,7 +229,7 @@ vk_common_CmdBlitImage(VkCommandBuffer commandBuffer,
.filter = filter,
};
- disp->device->dispatch_table.CmdBlitImage2KHR(commandBuffer, &info);
+ disp->device->dispatch_table.CmdBlitImage2(commandBuffer, &info);
STACK_ARRAY_FINISH(region2s);
}
@@ -248,11 +248,11 @@ vk_common_CmdResolveImage(VkCommandBuffer commandBuffer,
*/
struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
- STACK_ARRAY(VkImageResolve2KHR, region2s, regionCount);
+ STACK_ARRAY(VkImageResolve2, region2s, regionCount);
for (uint32_t r = 0; r < regionCount; r++) {
- region2s[r] = (VkImageResolve2KHR) {
- .sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR,
+ region2s[r] = (VkImageResolve2) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2,
.srcSubresource = pRegions[r].srcSubresource,
.srcOffset = pRegions[r].srcOffset,
.dstSubresource = pRegions[r].dstSubresource,
@@ -261,8 +261,8 @@ vk_common_CmdResolveImage(VkCommandBuffer commandBuffer,
};
}
- VkResolveImageInfo2KHR info = {
- .sType = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR,
+ VkResolveImageInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2,
.srcImage = srcImage,
.srcImageLayout = srcImageLayout,
.dstImage = dstImage,
@@ -271,7 +271,7 @@ vk_common_CmdResolveImage(VkCommandBuffer commandBuffer,
.pRegions = region2s,
};
- disp->device->dispatch_table.CmdResolveImage2KHR(commandBuffer, &info);
+ disp->device->dispatch_table.CmdResolveImage2(commandBuffer, &info);
STACK_ARRAY_FINISH(region2s);
}
diff --git a/src/vulkan/runtime/vk_cmd_enqueue.c b/src/vulkan/runtime/vk_cmd_enqueue.c
new file mode 100644
index 00000000000..31ea5589d67
--- /dev/null
+++ b/src/vulkan/runtime/vk_cmd_enqueue.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright © 2019 Red Hat.
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_alloc.h"
+#include "vk_cmd_enqueue_entrypoints.h"
+#include "vk_command_buffer.h"
+#include "vk_device.h"
+#include "vk_pipeline_layout.h"
+#include "vk_util.h"
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,
+ uint32_t drawCount,
+ const VkMultiDrawInfoEXT *pVertexInfo,
+ uint32_t instanceCount,
+ uint32_t firstInstance,
+ uint32_t stride)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ struct vk_cmd_queue_entry *cmd =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd)
+ return;
+
+ cmd->type = VK_CMD_DRAW_MULTI_EXT;
+ list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
+
+ cmd->u.draw_multi_ext.draw_count = drawCount;
+ if (pVertexInfo) {
+ unsigned i = 0;
+ cmd->u.draw_multi_ext.vertex_info =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(*cmd->u.draw_multi_ext.vertex_info) * drawCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ vk_foreach_multi_draw(draw, i, pVertexInfo, drawCount, stride) {
+ memcpy(&cmd->u.draw_multi_ext.vertex_info[i], draw,
+ sizeof(*cmd->u.draw_multi_ext.vertex_info));
+ }
+ }
+ cmd->u.draw_multi_ext.instance_count = instanceCount;
+ cmd->u.draw_multi_ext.first_instance = firstInstance;
+ cmd->u.draw_multi_ext.stride = stride;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,
+ uint32_t drawCount,
+ const VkMultiDrawIndexedInfoEXT *pIndexInfo,
+ uint32_t instanceCount,
+ uint32_t firstInstance,
+ uint32_t stride,
+ const int32_t *pVertexOffset)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ struct vk_cmd_queue_entry *cmd =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd)
+ return;
+
+ cmd->type = VK_CMD_DRAW_MULTI_INDEXED_EXT;
+ list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
+
+ cmd->u.draw_multi_indexed_ext.draw_count = drawCount;
+
+ if (pIndexInfo) {
+ unsigned i = 0;
+ cmd->u.draw_multi_indexed_ext.index_info =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(*cmd->u.draw_multi_indexed_ext.index_info) * drawCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
+ cmd->u.draw_multi_indexed_ext.index_info[i].firstIndex = draw->firstIndex;
+ cmd->u.draw_multi_indexed_ext.index_info[i].indexCount = draw->indexCount;
+ if (pVertexOffset == NULL)
+ cmd->u.draw_multi_indexed_ext.index_info[i].vertexOffset = draw->vertexOffset;
+ }
+ }
+
+ cmd->u.draw_multi_indexed_ext.instance_count = instanceCount;
+ cmd->u.draw_multi_indexed_ext.first_instance = firstInstance;
+ cmd->u.draw_multi_indexed_ext.stride = stride;
+
+ if (pVertexOffset) {
+ cmd->u.draw_multi_indexed_ext.vertex_offset =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ memcpy(cmd->u.draw_multi_indexed_ext.vertex_offset, pVertexOffset,
+ sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset));
+ }
+}
+
+static void
+push_descriptors_set_free(struct vk_cmd_queue *queue,
+ struct vk_cmd_queue_entry *cmd)
+{
+ struct vk_cmd_push_descriptor_set_khr *pds = &cmd->u.push_descriptor_set_khr;
+ for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
+ VkWriteDescriptorSet *entry = &pds->descriptor_writes[i];
+ switch (entry->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ vk_free(queue->alloc, (void *)entry->pImageInfo);
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ vk_free(queue->alloc, (void *)entry->pTexelBufferView);
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ default:
+ vk_free(queue->alloc, (void *)entry->pBufferInfo);
+ break;
+ }
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_cmd_push_descriptor_set_khr *pds;
+
+ struct vk_cmd_queue_entry *cmd =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd)
+ return;
+
+ pds = &cmd->u.push_descriptor_set_khr;
+
+ cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_KHR;
+ cmd->driver_free_cb = push_descriptors_set_free;
+ list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
+
+ pds->pipeline_bind_point = pipelineBindPoint;
+ pds->layout = layout;
+ pds->set = set;
+ pds->descriptor_write_count = descriptorWriteCount;
+
+ if (pDescriptorWrites) {
+ pds->descriptor_writes =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(*pds->descriptor_writes) * descriptorWriteCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ memcpy(pds->descriptor_writes,
+ pDescriptorWrites,
+ sizeof(*pds->descriptor_writes) * descriptorWriteCount);
+
+ for (unsigned i = 0; i < descriptorWriteCount; i++) {
+ switch (pds->descriptor_writes[i].descriptorType) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ pds->descriptor_writes[i].pImageInfo =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ memcpy((VkDescriptorImageInfo *)pds->descriptor_writes[i].pImageInfo,
+ pDescriptorWrites[i].pImageInfo,
+ sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount);
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ pds->descriptor_writes[i].pTexelBufferView =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ memcpy((VkBufferView *)pds->descriptor_writes[i].pTexelBufferView,
+ pDescriptorWrites[i].pTexelBufferView,
+ sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount);
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ default:
+ pds->descriptor_writes[i].pBufferInfo =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ memcpy((VkDescriptorBufferInfo *)pds->descriptor_writes[i].pBufferInfo,
+ pDescriptorWrites[i].pBufferInfo,
+ sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount);
+ break;
+ }
+ }
+ }
+}
+
+static void
+unref_pipeline_layout(struct vk_cmd_queue *queue,
+ struct vk_cmd_queue_entry *cmd)
+{
+ struct vk_command_buffer *cmd_buffer =
+ container_of(queue, struct vk_command_buffer, cmd_queue);
+ VK_FROM_HANDLE(vk_pipeline_layout, layout,
+ cmd->u.bind_descriptor_sets.layout);
+
+ assert(cmd->type == VK_CMD_BIND_DESCRIPTOR_SETS);
+
+ vk_pipeline_layout_unref(cmd_buffer->base.device, layout);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t *pDynamicOffsets)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ struct vk_cmd_queue_entry *cmd =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd)
+ return;
+
+ cmd->type = VK_CMD_BIND_DESCRIPTOR_SETS;
+ list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
+
+ /* We need to hold a reference to the descriptor set as long as this
+ * command is in the queue. Otherwise, it may get deleted out from under
+ * us before the command is replayed.
+ */
+ vk_pipeline_layout_ref(vk_pipeline_layout_from_handle(layout));
+ cmd->u.bind_descriptor_sets.layout = layout;
+ cmd->driver_free_cb = unref_pipeline_layout;
+
+ cmd->u.bind_descriptor_sets.pipeline_bind_point = pipelineBindPoint;
+ cmd->u.bind_descriptor_sets.first_set = firstSet;
+ cmd->u.bind_descriptor_sets.descriptor_set_count = descriptorSetCount;
+ if (pDescriptorSets) {
+ cmd->u.bind_descriptor_sets.descriptor_sets =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ memcpy(cmd->u.bind_descriptor_sets.descriptor_sets, pDescriptorSets,
+ sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount);
+ }
+ cmd->u.bind_descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
+ if (pDynamicOffsets) {
+ cmd->u.bind_descriptor_sets.dynamic_offsets =
+ vk_zalloc(cmd_buffer->cmd_queue.alloc,
+ sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ memcpy(cmd->u.bind_descriptor_sets.dynamic_offsets, pDynamicOffsets,
+ sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount);
+ }
+}
+
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static void
+dispatch_graph_amdx_free(struct vk_cmd_queue *queue, struct vk_cmd_queue_entry *cmd)
+{
+ VkDispatchGraphCountInfoAMDX *count_info = cmd->u.dispatch_graph_amdx.count_info;
+ void *infos = (void *)count_info->infos.hostAddress;
+
+ for (uint32_t i = 0; i < count_info->count; i++) {
+ VkDispatchGraphInfoAMDX *info = (void *)((const uint8_t *)infos + i * count_info->stride);
+ vk_free(queue->alloc, (void *)info->payloads.hostAddress);
+ }
+
+ vk_free(queue->alloc, infos);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_CmdDispatchGraphAMDX(VkCommandBuffer commandBuffer, VkDeviceAddress scratch,
+ const VkDispatchGraphCountInfoAMDX *pCountInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ if (vk_command_buffer_has_error(cmd_buffer))
+ return;
+
+ VkResult result = VK_SUCCESS;
+ const VkAllocationCallbacks *alloc = cmd_buffer->cmd_queue.alloc;
+
+ struct vk_cmd_queue_entry *cmd =
+ vk_zalloc(alloc, sizeof(struct vk_cmd_queue_entry), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto err;
+ }
+
+ cmd->type = VK_CMD_DISPATCH_GRAPH_AMDX;
+ cmd->driver_free_cb = dispatch_graph_amdx_free;
+
+ cmd->u.dispatch_graph_amdx.scratch = scratch;
+
+ cmd->u.dispatch_graph_amdx.count_info =
+ vk_zalloc(alloc, sizeof(VkDispatchGraphCountInfoAMDX), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (cmd->u.dispatch_graph_amdx.count_info == NULL)
+ goto err;
+
+ memcpy((void *)cmd->u.dispatch_graph_amdx.count_info, pCountInfo,
+ sizeof(VkDispatchGraphCountInfoAMDX));
+
+ uint32_t infos_size = pCountInfo->count * pCountInfo->stride;
+ void *infos = vk_zalloc(alloc, infos_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ cmd->u.dispatch_graph_amdx.count_info->infos.hostAddress = infos;
+ memcpy(infos, pCountInfo->infos.hostAddress, infos_size);
+
+ for (uint32_t i = 0; i < pCountInfo->count; i++) {
+ VkDispatchGraphInfoAMDX *info = (void *)((const uint8_t *)infos + i * pCountInfo->stride);
+
+ uint32_t payloads_size = info->payloadCount * info->payloadStride;
+ void *dst_payload = vk_zalloc(alloc, payloads_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ memcpy(dst_payload, info->payloads.hostAddress, payloads_size);
+ info->payloads.hostAddress = dst_payload;
+ }
+
+ list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
+ goto finish;
+err:
+ if (cmd) {
+ vk_free(alloc, cmd);
+ dispatch_graph_amdx_free(&cmd_buffer->cmd_queue, cmd);
+ }
+
+finish:
+ if (unlikely(result != VK_SUCCESS))
+ vk_command_buffer_set_error(cmd_buffer, result);
+}
+#endif
+
+static void
+vk_cmd_build_acceleration_structures_khr_free(struct vk_cmd_queue *queue,
+ struct vk_cmd_queue_entry *cmd)
+{
+ struct vk_cmd_build_acceleration_structures_khr *build =
+ &cmd->u.build_acceleration_structures_khr;
+
+ for (uint32_t i = 0; i < build->info_count; i++) {
+ vk_free(queue->alloc, (void *)build->infos[i].pGeometries);
+ vk_free(queue->alloc, (void *)build->pp_build_range_infos[i]);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_CmdBuildAccelerationStructuresKHR(
+ VkCommandBuffer commandBuffer, uint32_t infoCount,
+ const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
+ const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ if (vk_command_buffer_has_error(cmd_buffer))
+ return;
+
+ struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
+
+ struct vk_cmd_queue_entry *cmd =
+ vk_zalloc(queue->alloc, vk_cmd_queue_type_sizes[VK_CMD_BUILD_ACCELERATION_STRUCTURES_KHR], 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd)
+ goto err;
+
+ cmd->type = VK_CMD_BUILD_ACCELERATION_STRUCTURES_KHR;
+ cmd->driver_free_cb = vk_cmd_build_acceleration_structures_khr_free;
+
+ struct vk_cmd_build_acceleration_structures_khr *build =
+ &cmd->u.build_acceleration_structures_khr;
+
+ build->info_count = infoCount;
+ if (pInfos) {
+ build->infos = vk_zalloc(queue->alloc, sizeof(*build->infos) * infoCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!build->infos)
+ goto err;
+
+ memcpy((VkAccelerationStructureBuildGeometryInfoKHR *)build->infos, pInfos,
+ sizeof(*build->infos) * (infoCount));
+
+ for (uint32_t i = 0; i < infoCount; i++) {
+ uint32_t geometries_size =
+ build->infos[i].geometryCount * sizeof(VkAccelerationStructureGeometryKHR);
+ VkAccelerationStructureGeometryKHR *geometries =
+ vk_zalloc(queue->alloc, geometries_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!geometries)
+ goto err;
+
+ if (pInfos[i].pGeometries) {
+ memcpy(geometries, pInfos[i].pGeometries, geometries_size);
+ } else {
+ for (uint32_t j = 0; j < build->infos[i].geometryCount; j++)
+ memcpy(&geometries[j], pInfos[i].ppGeometries[j], sizeof(VkAccelerationStructureGeometryKHR));
+ }
+
+ build->infos[i].pGeometries = geometries;
+ }
+ }
+ if (ppBuildRangeInfos) {
+ build->pp_build_range_infos =
+ vk_zalloc(queue->alloc, sizeof(*build->pp_build_range_infos) * infoCount, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!build->pp_build_range_infos)
+ goto err;
+
+ VkAccelerationStructureBuildRangeInfoKHR **pp_build_range_infos =
+ (void *)build->pp_build_range_infos;
+
+ for (uint32_t i = 0; i < infoCount; i++) {
+ uint32_t build_range_size =
+ build->infos[i].geometryCount * sizeof(VkAccelerationStructureBuildRangeInfoKHR);
+ VkAccelerationStructureBuildRangeInfoKHR *p_build_range_infos =
+ vk_zalloc(queue->alloc, build_range_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!p_build_range_infos)
+ goto err;
+
+ memcpy(p_build_range_infos, ppBuildRangeInfos[i], build_range_size);
+
+ pp_build_range_infos[i] = p_build_range_infos;
+ }
+ }
+
+ list_addtail(&cmd->cmd_link, &queue->cmds);
+ return;
+
+err:
+ if (cmd)
+ vk_cmd_build_acceleration_structures_khr_free(queue, cmd);
+
+ vk_command_buffer_set_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
+}
diff --git a/src/vulkan/runtime/vk_command_buffer.c b/src/vulkan/runtime/vk_command_buffer.c
new file mode 100644
index 00000000000..f678d9bc0a1
--- /dev/null
+++ b/src/vulkan/runtime/vk_command_buffer.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_command_buffer.h"
+
+#include "vk_command_pool.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+
+VkResult
+vk_command_buffer_init(struct vk_command_pool *pool,
+ struct vk_command_buffer *command_buffer,
+ const struct vk_command_buffer_ops *ops,
+ VkCommandBufferLevel level)
+{
+ memset(command_buffer, 0, sizeof(*command_buffer));
+ vk_object_base_init(pool->base.device, &command_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
+ command_buffer->pool = pool;
+ command_buffer->level = level;
+ command_buffer->ops = ops;
+ vk_dynamic_graphics_state_init(&command_buffer->dynamic_graphics_state);
+ command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_INITIAL;
+ command_buffer->record_result = VK_SUCCESS;
+ vk_cmd_queue_init(&command_buffer->cmd_queue, &pool->alloc);
+ vk_meta_object_list_init(&command_buffer->meta_objects);
+ util_dynarray_init(&command_buffer->labels, NULL);
+ command_buffer->region_begin = true;
+
+ list_add(&command_buffer->pool_link, &pool->command_buffers);
+
+ return VK_SUCCESS;
+}
+
+void
+vk_command_buffer_reset(struct vk_command_buffer *command_buffer)
+{
+ vk_dynamic_graphics_state_clear(&command_buffer->dynamic_graphics_state);
+ command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_INITIAL;
+ command_buffer->record_result = VK_SUCCESS;
+ vk_command_buffer_reset_render_pass(command_buffer);
+ vk_cmd_queue_reset(&command_buffer->cmd_queue);
+ vk_meta_object_list_reset(command_buffer->base.device,
+ &command_buffer->meta_objects);
+ util_dynarray_clear(&command_buffer->labels);
+ command_buffer->region_begin = true;
+}
+
+void
+vk_command_buffer_begin(struct vk_command_buffer *command_buffer,
+ const VkCommandBufferBeginInfo *pBeginInfo)
+{
+ if (command_buffer->state != MESA_VK_COMMAND_BUFFER_STATE_INITIAL &&
+ command_buffer->ops->reset != NULL)
+ command_buffer->ops->reset(command_buffer, 0);
+
+ command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_RECORDING;
+}
+
+VkResult
+vk_command_buffer_end(struct vk_command_buffer *command_buffer)
+{
+ assert(command_buffer->state == MESA_VK_COMMAND_BUFFER_STATE_RECORDING);
+
+ if (vk_command_buffer_has_error(command_buffer))
+ command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_INVALID;
+ else
+ command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_EXECUTABLE;
+
+ return vk_command_buffer_get_record_result(command_buffer);
+}
+
+void
+vk_command_buffer_finish(struct vk_command_buffer *command_buffer)
+{
+ list_del(&command_buffer->pool_link);
+ vk_command_buffer_reset_render_pass(command_buffer);
+ vk_cmd_queue_finish(&command_buffer->cmd_queue);
+ util_dynarray_fini(&command_buffer->labels);
+ vk_meta_object_list_finish(command_buffer->base.device,
+ &command_buffer->meta_objects);
+ vk_object_base_finish(&command_buffer->base);
+}
+
+void
+vk_command_buffer_recycle(struct vk_command_buffer *cmd_buffer)
+{
+ /* Reset, returning resources to the pool. The command buffer object
+ * itself will be recycled but, if the driver supports returning other
+ * resources such as batch buffers to the pool, it should do so so they're
+ * not tied up in recycled command buffer objects.
+ */
+ cmd_buffer->ops->reset(cmd_buffer,
+ VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT);
+
+ vk_object_base_recycle(&cmd_buffer->base);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_ResetCommandBuffer(VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ if (cmd_buffer->state != MESA_VK_COMMAND_BUFFER_STATE_INITIAL)
+ cmd_buffer->ops->reset(cmd_buffer, flags);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdExecuteCommands(VkCommandBuffer commandBuffer,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers)
+{
+ VK_FROM_HANDLE(vk_command_buffer, primary, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ primary->base.device->command_dispatch_table;
+
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ VK_FROM_HANDLE(vk_command_buffer, secondary, pCommandBuffers[i]);
+
+ vk_cmd_queue_execute(&secondary->cmd_queue, commandBuffer, disp);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer *pBuffers,
+ const VkDeviceSize *pOffsets)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdBindVertexBuffers2(commandBuffer, firstBinding, bindingCount,
+ pBuffers, pOffsets, NULL, NULL);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBindIndexBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdBindIndexBuffer2KHR(commandBuffer, buffer, offset,
+ VK_WHOLE_SIZE, indexType);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdDispatch(VkCommandBuffer commandBuffer,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdDispatchBase(commandBuffer, 0, 0, 0,
+ groupCountX, groupCountY, groupCountZ);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
+{
+ /* Nothing to do here since we only support a single device */
+ assert(deviceMask == 0x1);
+}
+
+VkShaderStageFlags
+vk_shader_stages_from_bind_point(VkPipelineBindPoint pipelineBindPoint)
+{
+ switch (pipelineBindPoint) {
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ case VK_PIPELINE_BIND_POINT_EXECUTION_GRAPH_AMDX:
+ return VK_SHADER_STAGE_COMPUTE_BIT | MESA_VK_SHADER_STAGE_WORKGRAPH_HACK_BIT_FIXME;
+#endif
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ return VK_SHADER_STAGE_COMPUTE_BIT;
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ return VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_TASK_BIT_EXT | VK_SHADER_STAGE_MESH_BIT_EXT;
+ case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR:
+ return VK_SHADER_STAGE_RAYGEN_BIT_KHR |
+ VK_SHADER_STAGE_ANY_HIT_BIT_KHR |
+ VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR |
+ VK_SHADER_STAGE_MISS_BIT_KHR |
+ VK_SHADER_STAGE_INTERSECTION_BIT_KHR |
+ VK_SHADER_STAGE_CALLABLE_BIT_KHR;
+ default:
+ unreachable("unknown bind point!");
+ }
+ return 0;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBindDescriptorSets(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets)
+{
+ const VkBindDescriptorSetsInfoKHR two = {
+ .sType = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR,
+ .stageFlags = vk_shader_stages_from_bind_point(pipelineBindPoint),
+ .layout = layout,
+ .firstSet = firstSet,
+ .descriptorSetCount = descriptorSetCount,
+ .pDescriptorSets = pDescriptorSets,
+ .dynamicOffsetCount = dynamicOffsetCount,
+ .pDynamicOffsets = pDynamicOffsets
+ };
+
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdBindDescriptorSets2KHR(commandBuffer, &two);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdPushConstants(
+ VkCommandBuffer commandBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void* pValues)
+{
+ const VkPushConstantsInfoKHR two = {
+ .sType = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR,
+ .layout = layout,
+ .stageFlags = stageFlags,
+ .offset = offset,
+ .size = size,
+ .pValues = pValues,
+ };
+
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdPushConstants2KHR(commandBuffer, &two);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdPushDescriptorSetKHR(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites)
+{
+ const VkPushDescriptorSetInfoKHR two = {
+ .sType = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR,
+ .stageFlags = vk_shader_stages_from_bind_point(pipelineBindPoint),
+ .layout = layout,
+ .set = set,
+ .descriptorWriteCount = descriptorWriteCount,
+ .pDescriptorWrites = pDescriptorWrites,
+ };
+
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdPushDescriptorSet2KHR(commandBuffer, &two);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdPushDescriptorSetWithTemplateKHR(
+ VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ VkPipelineLayout layout,
+ uint32_t set,
+ const void* pData)
+{
+ const VkPushDescriptorSetWithTemplateInfoKHR two = {
+ .sType = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR,
+ .descriptorUpdateTemplate = descriptorUpdateTemplate,
+ .layout = layout,
+ .set = set,
+ .pData = pData,
+ };
+
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdPushDescriptorSetWithTemplate2KHR(commandBuffer, &two);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDescriptorBufferOffsetsEXT(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const uint32_t* pBufferIndices,
+ const VkDeviceSize* pOffsets)
+{
+ const VkSetDescriptorBufferOffsetsInfoEXT two = {
+ .sType = VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT,
+ .stageFlags = vk_shader_stages_from_bind_point(pipelineBindPoint),
+ .layout = layout,
+ .firstSet = firstSet,
+ .setCount = setCount,
+ .pBufferIndices = pBufferIndices,
+ .pOffsets = pOffsets
+ };
+
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdSetDescriptorBufferOffsets2EXT(commandBuffer, &two);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBindDescriptorBufferEmbeddedSamplersEXT(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t set)
+{
+ const VkBindDescriptorBufferEmbeddedSamplersInfoEXT two = {
+ .sType = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT,
+ .stageFlags = vk_shader_stages_from_bind_point(pipelineBindPoint),
+ .layout = layout,
+ .set = set
+ };
+
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdBindDescriptorBufferEmbeddedSamplers2EXT(commandBuffer, &two);
+}
diff --git a/src/vulkan/runtime/vk_command_buffer.h b/src/vulkan/runtime/vk_command_buffer.h
new file mode 100644
index 00000000000..5ff51bbf578
--- /dev/null
+++ b/src/vulkan/runtime/vk_command_buffer.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_COMMAND_BUFFER_H
+#define VK_COMMAND_BUFFER_H
+
+#include "vk_cmd_queue.h"
+#include "vk_graphics_state.h"
+#include "vk_log.h"
+#include "vk_meta.h"
+#include "vk_object.h"
+#include "util/list.h"
+#include "util/u_dynarray.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_command_pool;
+struct vk_framebuffer;
+struct vk_image_view;
+struct vk_render_pass;
+
+struct vk_attachment_view_state {
+ VkImageLayout layout;
+ VkImageLayout stencil_layout;
+ const VkSampleLocationsInfoEXT *sample_locations;
+};
+
+struct vk_attachment_state {
+ struct vk_image_view *image_view;
+
+ /** A running tally of which views have been loaded */
+ uint32_t views_loaded;
+
+ /** Per-view state */
+ struct vk_attachment_view_state views[MESA_VK_MAX_MULTIVIEW_VIEW_COUNT];
+
+ /** VkRenderPassBeginInfo::pClearValues[i] */
+ VkClearValue clear_value;
+};
+
+/** Command buffer ops */
+struct vk_command_buffer_ops {
+ /** Creates a command buffer
+ *
+ * Used by the common command pool implementation. This function MUST
+ * call `vk_command_buffer_finish()`. Notably, this function does not
+ * receive any additional parameters such as the level. The level will be
+ * set by `vk_common_AllocateCommandBuffers()` and the driver must not rely
+ * on it until `vkBeginCommandBuffer()` time.
+ */
+ VkResult (*create)(struct vk_command_pool *, VkCommandBufferLevel,
+ struct vk_command_buffer **);
+
+ /** Resets the command buffer
+ *
+ * Used by the common command pool implementation. This function MUST
+ * call `vk_command_buffer_reset()`. Unlike `vkResetCommandBuffer()`,
+ * this function does not have a return value because it may be called on
+ * destruction paths.
+ */
+ void (*reset)(struct vk_command_buffer *, VkCommandBufferResetFlags);
+
+ /** Destroys the command buffer
+ *
+ * Used by the common command pool implementation. This function MUST
+ * call `vk_command_buffer_finish()`.
+ */
+ void (*destroy)(struct vk_command_buffer *);
+};
+
+enum mesa_vk_command_buffer_state {
+ MESA_VK_COMMAND_BUFFER_STATE_INVALID,
+ MESA_VK_COMMAND_BUFFER_STATE_INITIAL,
+ MESA_VK_COMMAND_BUFFER_STATE_RECORDING,
+ MESA_VK_COMMAND_BUFFER_STATE_EXECUTABLE,
+ MESA_VK_COMMAND_BUFFER_STATE_PENDING,
+};
+
+/* this needs spec fixes */
+#define MESA_VK_SHADER_STAGE_WORKGRAPH_HACK_BIT_FIXME (1<<30)
+VkShaderStageFlags vk_shader_stages_from_bind_point(VkPipelineBindPoint pipelineBindPoint);
+
+struct vk_command_buffer {
+ struct vk_object_base base;
+
+ struct vk_command_pool *pool;
+
+ /** VkCommandBufferAllocateInfo::level */
+ VkCommandBufferLevel level;
+
+ const struct vk_command_buffer_ops *ops;
+
+ struct vk_dynamic_graphics_state dynamic_graphics_state;
+
+ /** State of the command buffer */
+ enum mesa_vk_command_buffer_state state;
+
+ /** Command buffer recording error state. */
+ VkResult record_result;
+
+ /** Link in vk_command_pool::command_buffers if pool != NULL */
+ struct list_head pool_link;
+
+ /** Command list for emulated secondary command buffers */
+ struct vk_cmd_queue cmd_queue;
+
+ /** Object list for meta objects */
+ struct vk_meta_object_list meta_objects;
+
+ /**
+ * VK_EXT_debug_utils
+ *
+ * The next two fields represent debug labels storage.
+ *
+ * VK_EXT_debug_utils spec requires that upon triggering a debug message
+ * with a command buffer attached to it, all "active" labels will also be
+ * provided to the callback. The spec describes two distinct ways of
+ * attaching a debug label to the command buffer: opening a label region
+ * and inserting a single label.
+ *
+ * Label region is active between the corresponding `*BeginDebugUtilsLabel`
+ * and `*EndDebugUtilsLabel` calls. The spec doesn't mention any limits on
+ * nestedness of label regions. This implementation assumes that there
+ * aren't any.
+ *
+ * The spec, however, doesn't explain the lifetime of a label submitted by
+ * an `*InsertDebugUtilsLabel` call. The LunarG whitepaper [1] (pp 12-15)
+ * provides a more detailed explanation along with some examples. According
+ * to those, such label remains active until the next `*DebugUtilsLabel`
+ * call. This means that there can be no more than one such label at a
+ * time.
+ *
+ * ``labels`` contains all active labels at this point in order of
+ * submission ``region_begin`` denotes whether the most recent label opens
+ * a new region If ``labels`` is empty ``region_begin`` must be true.
+ *
+ * Anytime we modify labels, we first check for ``region_begin``. If it's
+ * false, it means that the most recent label was submitted by
+ * `*InsertDebugUtilsLabel` and we need to remove it before doing anything
+ * else.
+ *
+ * See the discussion here:
+ * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10318#note_1061317
+ *
+ * [1] https://www.lunarg.com/wp-content/uploads/2018/05/Vulkan-Debug-Utils_05_18_v1.pdf
+ */
+ struct util_dynarray labels;
+ bool region_begin;
+
+ struct vk_render_pass *render_pass;
+ uint32_t subpass_idx;
+ struct vk_framebuffer *framebuffer;
+ VkRect2D render_area;
+
+ /**
+ * True if we are currently inside a CmdPipelineBarrier() is inserted by
+ * the runtime's vk_render_pass.c
+ */
+ bool runtime_rp_barrier;
+
+ /* This uses the same trick as STACK_ARRAY */
+ struct vk_attachment_state *attachments;
+ struct vk_attachment_state _attachments[8];
+
+ VkRenderPassSampleLocationsBeginInfoEXT *pass_sample_locations;
+
+ /**
+ * Bitmask of shader stages bound via a vk_pipeline since the last call to
+ * vkBindShadersEXT().
+ *
+ * Used by the common vk_pipeline implementation
+ */
+ VkShaderStageFlags pipeline_shader_stages;
+};
+
+VK_DEFINE_HANDLE_CASTS(vk_command_buffer, base, VkCommandBuffer,
+ VK_OBJECT_TYPE_COMMAND_BUFFER)
+
+VkResult MUST_CHECK
+vk_command_buffer_init(struct vk_command_pool *pool,
+ struct vk_command_buffer *command_buffer,
+ const struct vk_command_buffer_ops *ops,
+ VkCommandBufferLevel level);
+
+void
+vk_command_buffer_reset_render_pass(struct vk_command_buffer *cmd_buffer);
+
+void
+vk_command_buffer_reset(struct vk_command_buffer *command_buffer);
+
+void
+vk_command_buffer_recycle(struct vk_command_buffer *command_buffer);
+
+void
+vk_command_buffer_begin(struct vk_command_buffer *command_buffer,
+ const VkCommandBufferBeginInfo *pBeginInfo);
+
+VkResult
+vk_command_buffer_end(struct vk_command_buffer *command_buffer);
+
+void
+vk_command_buffer_finish(struct vk_command_buffer *command_buffer);
+
+static inline VkResult
+__vk_command_buffer_set_error(struct vk_command_buffer *command_buffer,
+ VkResult error, const char *file, int line)
+{
+ assert(error != VK_SUCCESS);
+ error = __vk_errorf(command_buffer, error, file, line, NULL);
+ if (command_buffer->record_result == VK_SUCCESS)
+ command_buffer->record_result = error;
+ return error;
+}
+
+#define vk_command_buffer_set_error(command_buffer, error) \
+ __vk_command_buffer_set_error(command_buffer, error, __FILE__, __LINE__)
+
+static inline VkResult
+vk_command_buffer_get_record_result(struct vk_command_buffer *command_buffer)
+{
+ return command_buffer->record_result;
+}
+
+#define vk_command_buffer_has_error(command_buffer) \
+ unlikely((command_buffer)->record_result != VK_SUCCESS)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_COMMAND_BUFFER_H */
diff --git a/src/vulkan/runtime/vk_command_pool.c b/src/vulkan/runtime/vk_command_pool.c
new file mode 100644
index 00000000000..4481a52da80
--- /dev/null
+++ b/src/vulkan/runtime/vk_command_pool.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ * Copyright © 2022 Collabora, Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_command_pool.h"
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+
+static bool
+should_recycle_command_buffers(struct vk_device *device)
+{
+ /* They have to be using the common allocation implementation, otherwise
+ * the recycled command buffers will never actually get re-used
+ */
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ if (disp->AllocateCommandBuffers != vk_common_AllocateCommandBuffers)
+ return false;
+
+ /* We need to be able to reset command buffers */
+ if (device->command_buffer_ops->reset == NULL)
+ return false;
+
+ return true;
+}
+
+VkResult MUST_CHECK
+vk_command_pool_init(struct vk_device *device,
+ struct vk_command_pool *pool,
+ const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator)
+{
+ memset(pool, 0, sizeof(*pool));
+ vk_object_base_init(device, &pool->base,
+ VK_OBJECT_TYPE_COMMAND_POOL);
+
+ pool->flags = pCreateInfo->flags;
+ pool->queue_family_index = pCreateInfo->queueFamilyIndex;
+ pool->alloc = pAllocator ? *pAllocator : device->alloc;
+ pool->command_buffer_ops = device->command_buffer_ops;
+ pool->recycle_command_buffers = should_recycle_command_buffers(device);
+ list_inithead(&pool->command_buffers);
+ list_inithead(&pool->free_command_buffers);
+
+ return VK_SUCCESS;
+}
+
+void
+vk_command_pool_finish(struct vk_command_pool *pool)
+{
+ list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
+ &pool->command_buffers, pool_link) {
+ cmd_buffer->ops->destroy(cmd_buffer);
+ }
+ assert(list_is_empty(&pool->command_buffers));
+
+ list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
+ &pool->free_command_buffers, pool_link) {
+ cmd_buffer->ops->destroy(cmd_buffer);
+ }
+ assert(list_is_empty(&pool->free_command_buffers));
+
+ vk_object_base_finish(&pool->base);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateCommandPool(VkDevice _device,
+ const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkCommandPool *pCommandPool)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_command_pool *pool;
+ VkResult result;
+
+ pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pool == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ result = vk_command_pool_init(device, pool, pCreateInfo, pAllocator);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_free2(&device->alloc, pAllocator, pool);
+ return result;
+ }
+
+ *pCommandPool = vk_command_pool_to_handle(pool);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyCommandPool(VkDevice _device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
+
+ if (pool == NULL)
+ return;
+
+ vk_command_pool_finish(pool);
+ vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_ResetCommandPool(VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags)
+{
+ VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
+ const struct vk_device_dispatch_table *disp =
+ &pool->base.device->dispatch_table;
+
+#define COPY_FLAG(flag) \
+ if (flags & VK_COMMAND_POOL_RESET_##flag) \
+ cb_flags |= VK_COMMAND_BUFFER_RESET_##flag
+
+ VkCommandBufferResetFlags cb_flags = 0;
+ COPY_FLAG(RELEASE_RESOURCES_BIT);
+
+#undef COPY_FLAG
+
+ list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
+ &pool->command_buffers, pool_link) {
+ VkResult result =
+ disp->ResetCommandBuffer(vk_command_buffer_to_handle(cmd_buffer),
+ cb_flags);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+static void
+vk_command_buffer_recycle_or_destroy(struct vk_command_pool *pool,
+ struct vk_command_buffer *cmd_buffer)
+{
+ assert(pool == cmd_buffer->pool);
+
+ if (pool->recycle_command_buffers) {
+ vk_command_buffer_recycle(cmd_buffer);
+
+ list_del(&cmd_buffer->pool_link);
+ list_add(&cmd_buffer->pool_link, &pool->free_command_buffers);
+ } else {
+ cmd_buffer->ops->destroy(cmd_buffer);
+ }
+}
+
+static struct vk_command_buffer *
+vk_command_pool_find_free(struct vk_command_pool *pool)
+{
+ if (list_is_empty(&pool->free_command_buffers))
+ return NULL;
+
+ struct vk_command_buffer *cmd_buffer =
+ list_first_entry(&pool->free_command_buffers,
+ struct vk_command_buffer, pool_link);
+
+ list_del(&cmd_buffer->pool_link);
+ list_addtail(&cmd_buffer->pool_link, &pool->command_buffers);
+
+ return cmd_buffer;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_AllocateCommandBuffers(VkDevice device,
+ const VkCommandBufferAllocateInfo *pAllocateInfo,
+ VkCommandBuffer *pCommandBuffers)
+{
+ VK_FROM_HANDLE(vk_command_pool, pool, pAllocateInfo->commandPool);
+ VkResult result;
+ uint32_t i;
+
+ assert(device == vk_device_to_handle(pool->base.device));
+
+ for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
+ struct vk_command_buffer *cmd_buffer = vk_command_pool_find_free(pool);
+ if (cmd_buffer == NULL) {
+ result = pool->command_buffer_ops->create(pool, pAllocateInfo->level, &cmd_buffer);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+ }
+
+ cmd_buffer->level = pAllocateInfo->level;
+
+ pCommandBuffers[i] = vk_command_buffer_to_handle(cmd_buffer);
+ }
+
+ return VK_SUCCESS;
+
+fail:
+ while (i--) {
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
+ vk_command_buffer_recycle_or_destroy(pool, cmd_buffer);
+ }
+ for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
+ pCommandBuffers[i] = VK_NULL_HANDLE;
+
+ return result;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_FreeCommandBuffers(VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers)
+{
+ VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
+
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
+
+ if (cmd_buffer == NULL)
+ continue;
+
+ vk_command_buffer_recycle_or_destroy(pool, cmd_buffer);
+ }
+}
+
+void
+vk_command_pool_trim(struct vk_command_pool *pool,
+ VkCommandPoolTrimFlags flags)
+{
+ list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
+ &pool->free_command_buffers, pool_link) {
+ cmd_buffer->ops->destroy(cmd_buffer);
+ }
+ assert(list_is_empty(&pool->free_command_buffers));
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_TrimCommandPool(VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlags flags)
+{
+ VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
+
+ vk_command_pool_trim(pool, flags);
+}
diff --git a/src/vulkan/runtime/vk_command_pool.h b/src/vulkan/runtime/vk_command_pool.h
new file mode 100644
index 00000000000..104cfab7265
--- /dev/null
+++ b/src/vulkan/runtime/vk_command_pool.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright © 2022 Collabora, Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_COMMAND_POOL_H
+#define VK_COMMAND_POOL_H
+
+#include "vk_object.h"
+#include "util/list.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Base object for implementin VkCommandPool */
+struct vk_command_pool {
+ struct vk_object_base base;
+
+ /** VkCommandPoolCreateInfo::flags */
+ VkCommandPoolCreateFlags flags;
+
+ /** VkCommandPoolCreateInfo::queueFamilyIndex */
+ uint32_t queue_family_index;
+
+ /** Allocator passed to vkCreateCommandPool() */
+ VkAllocationCallbacks alloc;
+
+ /** Command buffer vtable for command buffers allocated from this pool */
+ const struct vk_command_buffer_ops *command_buffer_ops;
+
+ /** True if we should recycle command buffers */
+ bool recycle_command_buffers;
+
+ /** List of all command buffers */
+ struct list_head command_buffers;
+
+ /** List of freed command buffers for trimming. */
+ struct list_head free_command_buffers;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_command_pool, base, VkCommandPool,
+ VK_OBJECT_TYPE_COMMAND_POOL);
+
+/** Initialize a vk_command_pool
+ *
+ * :param device: |in| The Vulkan device
+ * :param pool: |out| The command pool to initialize
+ * :param pCreateInfo: |in| VkCommandPoolCreateInfo pointer passed to
+ * `vkCreateCommandPool()`
+ * :param pAllocator: |in| Allocation callbacks passed to
+ * `vkCreateCommandPool()`
+ */
+VkResult MUST_CHECK
+vk_command_pool_init(struct vk_device *device,
+ struct vk_command_pool *pool,
+ const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator);
+
+/** Tear down a vk_command_pool
+ *
+ * :param pool: |inout| The command pool to tear down
+ */
+void
+vk_command_pool_finish(struct vk_command_pool *pool);
+
+/** Trim a vk_command_pool
+ *
+ * This discards any resources that may be cached by the common
+ * vk_command_pool code. For driver-implemented command pools, drivers should
+ * call this function inside their `vkTrimCommandPool()` implementation. This
+ * should be called before doing any driver-specific trimming in case it ends
+ * up returning driver-internal resources to the pool.
+ *
+ * :param pool: |inout| The command pool to trim
+ * :param flags: |in| Flags controling the trim operation
+ */
+void
+vk_command_pool_trim(struct vk_command_pool *pool,
+ VkCommandPoolTrimFlags flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_COMMAND_POOL_H */
diff --git a/src/vulkan/util/vk_debug_report.c b/src/vulkan/runtime/vk_debug_report.c
index db89e3d6f5d..6712ba6d1c8 100644
--- a/src/vulkan/util/vk_debug_report.c
+++ b/src/vulkan/runtime/vk_debug_report.c
@@ -58,8 +58,8 @@ vk_common_CreateDebugReportCallbackEXT(VkInstance _instance,
if (!cb)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- vk_object_base_init(NULL, &cb->base,
- VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT);
+ vk_object_base_instance_init(instance, &cb->base,
+ VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT);
cb->flags = pCreateInfo->flags;
cb->callback = pCreateInfo->pfnCallback;
@@ -150,8 +150,9 @@ vk_debug_report(struct vk_instance *instance,
const char* pLayerPrefix,
const char *pMessage)
{
- VkDebugReportObjectTypeEXT object_type =
+ VkObjectType object_type =
object ? object->type : VK_OBJECT_TYPE_UNKNOWN;
- debug_report(instance, flags, object_type, (uint64_t)(uintptr_t)object,
- location, messageCode, pLayerPrefix, pMessage);
+ debug_report(instance, flags, (VkDebugReportObjectTypeEXT)object_type,
+ (uint64_t)(uintptr_t)object, location, messageCode,
+ pLayerPrefix, pMessage);
}
diff --git a/src/vulkan/util/vk_debug_report.h b/src/vulkan/runtime/vk_debug_report.h
index ca208bb5f26..ca208bb5f26 100644
--- a/src/vulkan/util/vk_debug_report.h
+++ b/src/vulkan/runtime/vk_debug_report.h
diff --git a/src/vulkan/runtime/vk_debug_utils.c b/src/vulkan/runtime/vk_debug_utils.c
new file mode 100644
index 00000000000..2c083ab8937
--- /dev/null
+++ b/src/vulkan/runtime/vk_debug_utils.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_debug_utils.h"
+
+#include "vk_common_entrypoints.h"
+#include "vk_command_buffer.h"
+#include "vk_device.h"
+#include "vk_queue.h"
+#include "vk_object.h"
+#include "vk_alloc.h"
+#include "vk_util.h"
+#include "stdarg.h"
+#include "util/u_dynarray.h"
+
+void
+vk_debug_message(struct vk_instance *instance,
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT types,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData)
+{
+ mtx_lock(&instance->debug_utils.callbacks_mutex);
+
+ list_for_each_entry(struct vk_debug_utils_messenger, messenger,
+ &instance->debug_utils.callbacks, link) {
+ if ((messenger->severity & severity) &&
+ (messenger->type & types))
+ messenger->callback(severity, types, pCallbackData, messenger->data);
+ }
+
+ mtx_unlock(&instance->debug_utils.callbacks_mutex);
+}
+
+/* This function intended to be used by the drivers to report a
+ * message to the special messenger, provided in the pNext chain while
+ * creating an instance. It's only meant to be used during
+ * vkCreateInstance or vkDestroyInstance calls.
+ */
+void
+vk_debug_message_instance(struct vk_instance *instance,
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT types,
+ const char *pMessageIdName,
+ int32_t messageIdNumber,
+ const char *pMessage)
+{
+ if (list_is_empty(&instance->debug_utils.instance_callbacks))
+ return;
+
+ const VkDebugUtilsMessengerCallbackDataEXT cbData = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT,
+ .pMessageIdName = pMessageIdName,
+ .messageIdNumber = messageIdNumber,
+ .pMessage = pMessage,
+ };
+
+ list_for_each_entry(struct vk_debug_utils_messenger, messenger,
+ &instance->debug_utils.instance_callbacks, link) {
+ if ((messenger->severity & severity) &&
+ (messenger->type & types))
+ messenger->callback(severity, types, &cbData, messenger->data);
+ }
+}
+
+void
+vk_address_binding_report(struct vk_instance *instance,
+ struct vk_object_base *object,
+ uint64_t base_address,
+ uint64_t size,
+ VkDeviceAddressBindingTypeEXT type)
+{
+ if (list_is_empty(&instance->debug_utils.callbacks))
+ return;
+
+ VkDeviceAddressBindingCallbackDataEXT addr_binding = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_ADDRESS_BINDING_CALLBACK_DATA_EXT,
+ .flags = object->client_visible ? 0 : VK_DEVICE_ADDRESS_BINDING_INTERNAL_OBJECT_BIT_EXT,
+ .baseAddress = base_address,
+ .size = size,
+ .bindingType = type,
+ };
+
+ VkDebugUtilsObjectNameInfoEXT object_name_info = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
+ .pNext = NULL,
+ .objectType = object->type,
+ .objectHandle = (uint64_t)(uintptr_t)object,
+ .pObjectName = object->object_name,
+ };
+
+ VkDebugUtilsMessengerCallbackDataEXT cb_data = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT,
+ .pNext = &addr_binding,
+ .objectCount = 1,
+ .pObjects = &object_name_info,
+ };
+
+ vk_debug_message(instance, VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_DEVICE_ADDRESS_BINDING_BIT_EXT,
+ &cb_data);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateDebugUtilsMessengerEXT(
+ VkInstance _instance,
+ const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDebugUtilsMessengerEXT *pMessenger)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+
+ struct vk_debug_utils_messenger *messenger =
+ vk_alloc2(&instance->alloc, pAllocator,
+ sizeof(struct vk_debug_utils_messenger), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (!messenger)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ if (pAllocator)
+ messenger->alloc = *pAllocator;
+ else
+ messenger->alloc = instance->alloc;
+
+ vk_object_base_init(NULL, &messenger->base,
+ VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT);
+
+ messenger->severity = pCreateInfo->messageSeverity;
+ messenger->type = pCreateInfo->messageType;
+ messenger->callback = pCreateInfo->pfnUserCallback;
+ messenger->data = pCreateInfo->pUserData;
+
+ mtx_lock(&instance->debug_utils.callbacks_mutex);
+ list_addtail(&messenger->link, &instance->debug_utils.callbacks);
+ mtx_unlock(&instance->debug_utils.callbacks_mutex);
+
+ *pMessenger = vk_debug_utils_messenger_to_handle(messenger);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_SubmitDebugUtilsMessageEXT(
+ VkInstance _instance,
+ VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageTypes,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+
+ vk_debug_message(instance, messageSeverity, messageTypes, pCallbackData);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyDebugUtilsMessengerEXT(
+ VkInstance _instance,
+ VkDebugUtilsMessengerEXT _messenger,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ VK_FROM_HANDLE(vk_debug_utils_messenger, messenger, _messenger);
+
+ if (messenger == NULL)
+ return;
+
+ mtx_lock(&instance->debug_utils.callbacks_mutex);
+ list_del(&messenger->link);
+ mtx_unlock(&instance->debug_utils.callbacks_mutex);
+
+ vk_object_base_finish(&messenger->base);
+ vk_free2(&instance->alloc, pAllocator, messenger);
+}
+
+static VkResult
+vk_common_set_object_name_locked(
+ struct vk_device *device,
+ const VkDebugUtilsObjectNameInfoEXT *pNameInfo)
+{
+ if (unlikely(device->swapchain_name == NULL)) {
+ /* Even though VkSwapchain/Surface are non-dispatchable objects, we know
+ * a priori that these are actually pointers so we can use
+ * the pointer hash table for them.
+ */
+ device->swapchain_name = _mesa_pointer_hash_table_create(NULL);
+ if (device->swapchain_name == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ char *object_name = vk_strdup(&device->alloc, pNameInfo->pObjectName,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (object_name == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ struct hash_entry *entry =
+ _mesa_hash_table_search(device->swapchain_name,
+ (void *)(uintptr_t)pNameInfo->objectHandle);
+ if (unlikely(entry == NULL)) {
+ entry = _mesa_hash_table_insert(device->swapchain_name,
+ (void *)(uintptr_t)pNameInfo->objectHandle,
+ object_name);
+ if (entry == NULL) {
+ vk_free(&device->alloc, object_name);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ } else {
+ vk_free(&device->alloc, entry->data);
+ entry->data = object_name;
+ }
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_DebugMarkerSetObjectNameEXT(
+ VkDevice _device,
+ const VkDebugMarkerObjectNameInfoEXT *pNameInfo)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT);
+
+ VkObjectType object_type;
+ switch (pNameInfo->objectType) {
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT:
+ object_type = VK_OBJECT_TYPE_SURFACE_KHR;
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
+ object_type = VK_OBJECT_TYPE_SWAPCHAIN_KHR;
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT:
+ object_type = VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT;
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT:
+ object_type = VK_OBJECT_TYPE_DISPLAY_KHR;
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT:
+ object_type = VK_OBJECT_TYPE_DISPLAY_MODE_KHR;
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT:
+ object_type = VK_OBJECT_TYPE_VALIDATION_CACHE_EXT;
+ break;
+ default:
+ object_type = (VkObjectType)pNameInfo->objectType;
+ break;
+ }
+
+ VkDebugUtilsObjectNameInfoEXT name_info = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
+ .objectType = object_type,
+ .objectHandle = pNameInfo->object,
+ .pObjectName = pNameInfo->pObjectName,
+ };
+
+ return device->dispatch_table.SetDebugUtilsObjectNameEXT(_device, &name_info);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_SetDebugUtilsObjectNameEXT(
+ VkDevice _device,
+ const VkDebugUtilsObjectNameInfoEXT *pNameInfo)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+#if DETECT_OS_ANDROID
+ if (pNameInfo->objectType == VK_OBJECT_TYPE_SWAPCHAIN_KHR ||
+ pNameInfo->objectType == VK_OBJECT_TYPE_SURFACE_KHR) {
+#else
+ if (pNameInfo->objectType == VK_OBJECT_TYPE_SURFACE_KHR) {
+#endif
+ mtx_lock(&device->swapchain_name_mtx);
+ VkResult res = vk_common_set_object_name_locked(device, pNameInfo);
+ mtx_unlock(&device->swapchain_name_mtx);
+ return res;
+ }
+
+ struct vk_object_base *object =
+ vk_object_base_from_u64_handle(pNameInfo->objectHandle,
+ pNameInfo->objectType);
+
+ assert(object->device != NULL || object->instance != NULL);
+ VkAllocationCallbacks *alloc = object->device != NULL ?
+ &object->device->alloc : &object->instance->alloc;
+ if (object->object_name) {
+ vk_free(alloc, object->object_name);
+ object->object_name = NULL;
+ }
+ object->object_name = vk_strdup(alloc, pNameInfo->pObjectName,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!object->object_name)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_SetDebugUtilsObjectTagEXT(
+ VkDevice _device,
+ const VkDebugUtilsObjectTagInfoEXT *pTagInfo)
+{
+ /* no-op */
+ return VK_SUCCESS;
+}
+
+static void
+vk_common_append_debug_label(struct vk_device *device,
+ struct util_dynarray *labels,
+ const VkDebugUtilsLabelEXT *pLabelInfo)
+{
+ util_dynarray_append(labels, VkDebugUtilsLabelEXT, *pLabelInfo);
+ VkDebugUtilsLabelEXT *current_label =
+ util_dynarray_top_ptr(labels, VkDebugUtilsLabelEXT);
+ current_label->pLabelName =
+ vk_strdup(&device->alloc, current_label->pLabelName,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+}
+
+static void
+vk_common_pop_debug_label(struct vk_device *device,
+ struct util_dynarray *labels)
+{
+ if (labels->size == 0)
+ return;
+
+ VkDebugUtilsLabelEXT previous_label =
+ util_dynarray_pop(labels, VkDebugUtilsLabelEXT);
+ vk_free(&device->alloc, (void *)previous_label.pLabelName);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBeginDebugUtilsLabelEXT(
+ VkCommandBuffer _commandBuffer,
+ const VkDebugUtilsLabelEXT *pLabelInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, command_buffer, _commandBuffer);
+
+ /* If the latest label was submitted by CmdInsertDebugUtilsLabelEXT, we
+ * should remove it first.
+ */
+ if (!command_buffer->region_begin) {
+ vk_common_pop_debug_label(command_buffer->base.device,
+ &command_buffer->labels);
+ }
+
+ vk_common_append_debug_label(command_buffer->base.device,
+ &command_buffer->labels,
+ pLabelInfo);
+ command_buffer->region_begin = true;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdEndDebugUtilsLabelEXT(VkCommandBuffer _commandBuffer)
+{
+ VK_FROM_HANDLE(vk_command_buffer, command_buffer, _commandBuffer);
+
+ /* If the latest label was submitted by CmdInsertDebugUtilsLabelEXT, we
+ * should remove it first.
+ */
+ if (!command_buffer->region_begin) {
+ vk_common_pop_debug_label(command_buffer->base.device,
+ &command_buffer->labels);
+ }
+
+ vk_common_pop_debug_label(command_buffer->base.device,
+ &command_buffer->labels);
+ command_buffer->region_begin = true;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdInsertDebugUtilsLabelEXT(
+ VkCommandBuffer _commandBuffer,
+ const VkDebugUtilsLabelEXT *pLabelInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, command_buffer, _commandBuffer);
+
+ /* If the latest label was submitted by CmdInsertDebugUtilsLabelEXT, we
+ * should remove it first.
+ */
+ if (!command_buffer->region_begin) {
+ vk_common_append_debug_label(command_buffer->base.device,
+ &command_buffer->labels,
+ pLabelInfo);
+ }
+
+ vk_common_append_debug_label(command_buffer->base.device,
+ &command_buffer->labels,
+ pLabelInfo);
+ command_buffer->region_begin = false;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_QueueBeginDebugUtilsLabelEXT(
+ VkQueue _queue,
+ const VkDebugUtilsLabelEXT *pLabelInfo)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+
+ /* If the latest label was submitted by QueueInsertDebugUtilsLabelEXT, we
+ * should remove it first.
+ */
+ if (!queue->region_begin)
+ (void)util_dynarray_pop(&queue->labels, VkDebugUtilsLabelEXT);
+
+ vk_common_append_debug_label(queue->base.device,
+ &queue->labels,
+ pLabelInfo);
+ queue->region_begin = true;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_QueueEndDebugUtilsLabelEXT(VkQueue _queue)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+
+ /* If the latest label was submitted by QueueInsertDebugUtilsLabelEXT, we
+ * should remove it first.
+ */
+ if (!queue->region_begin)
+ vk_common_pop_debug_label(queue->base.device, &queue->labels);
+
+ vk_common_pop_debug_label(queue->base.device, &queue->labels);
+ queue->region_begin = true;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_QueueInsertDebugUtilsLabelEXT(
+ VkQueue _queue,
+ const VkDebugUtilsLabelEXT *pLabelInfo)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+
+ /* If the latest label was submitted by QueueInsertDebugUtilsLabelEXT, we
+ * should remove it first.
+ */
+ if (!queue->region_begin)
+ vk_common_pop_debug_label(queue->base.device, &queue->labels);
+
+ vk_common_append_debug_label(queue->base.device,
+ &queue->labels,
+ pLabelInfo);
+ queue->region_begin = false;
+}
diff --git a/src/vulkan/runtime/vk_debug_utils.h b/src/vulkan/runtime/vk_debug_utils.h
new file mode 100644
index 00000000000..7f27be47461
--- /dev/null
+++ b/src/vulkan/runtime/vk_debug_utils.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_DEBUG_UTILS_H
+#define VK_DEBUG_UTILS_H
+
+#include "vk_instance.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_debug_utils_messenger {
+ struct vk_object_base base;
+ VkAllocationCallbacks alloc;
+
+ struct list_head link;
+
+ VkDebugUtilsMessageSeverityFlagsEXT severity;
+ VkDebugUtilsMessageTypeFlagsEXT type;
+ PFN_vkDebugUtilsMessengerCallbackEXT callback;
+ void *data;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_debug_utils_messenger, base,
+ VkDebugUtilsMessengerEXT,
+ VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT)
+
+void
+vk_debug_message(struct vk_instance *instance,
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT types,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData);
+
+void
+vk_debug_message_instance(struct vk_instance *instance,
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT types,
+ const char *pMessageIdName,
+ int32_t messageIdNumber,
+ const char *pMessage);
+
+void
+vk_address_binding_report(struct vk_instance *instance,
+ struct vk_object_base *object,
+ uint64_t base_address,
+ uint64_t size,
+ VkDeviceAddressBindingTypeEXT type);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_DEBUG_UTILS_H */
diff --git a/src/vulkan/util/vk_deferred_operation.c b/src/vulkan/runtime/vk_deferred_operation.c
index a9f6e0d269b..a9f6e0d269b 100644
--- a/src/vulkan/util/vk_deferred_operation.c
+++ b/src/vulkan/runtime/vk_deferred_operation.c
diff --git a/src/vulkan/util/vk_deferred_operation.h b/src/vulkan/runtime/vk_deferred_operation.h
index 588db8085f2..588db8085f2 100644
--- a/src/vulkan/util/vk_deferred_operation.h
+++ b/src/vulkan/runtime/vk_deferred_operation.h
diff --git a/src/vulkan/runtime/vk_descriptor_set_layout.c b/src/vulkan/runtime/vk_descriptor_set_layout.c
new file mode 100644
index 00000000000..9e657bec4c5
--- /dev/null
+++ b/src/vulkan/runtime/vk_descriptor_set_layout.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_descriptor_set_layout.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+
+static void
+vk_descriptor_set_layout_init(struct vk_device *device,
+ struct vk_descriptor_set_layout *layout)
+{
+ vk_object_base_init(device, &layout->base,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
+
+ layout->ref_cnt = 1;
+ layout->destroy = vk_descriptor_set_layout_destroy;
+}
+
+void *
+vk_descriptor_set_layout_zalloc(struct vk_device *device, size_t size)
+{
+ /* Because we're reference counting and lifetimes may not be what the
+ * client expects, these have to be allocated off the device and not as
+ * their own object.
+ */
+ struct vk_descriptor_set_layout *layout =
+ vk_zalloc(&device->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (!layout)
+ return NULL;
+
+ vk_descriptor_set_layout_init(device, layout);
+
+ return layout;
+}
+
+void *
+vk_descriptor_set_layout_multizalloc(struct vk_device *device,
+ struct vk_multialloc *ma)
+{
+ /* Because we're reference counting and lifetimes may not be what the
+ * client expects, these have to be allocated off the device and not as
+ * their own object.
+ */
+ struct vk_descriptor_set_layout *layout =
+ vk_multialloc_zalloc(ma, &device->alloc,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (!layout)
+ return NULL;
+
+ vk_descriptor_set_layout_init(device, layout);
+
+ return layout;
+}
+
+void
+vk_descriptor_set_layout_destroy(struct vk_device *device,
+ struct vk_descriptor_set_layout *layout)
+{
+ vk_object_free(device, NULL, layout);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyDescriptorSetLayout(VkDevice _device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ UNUSED const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_descriptor_set_layout, layout, descriptorSetLayout);
+
+ if (layout == NULL)
+ return;
+
+ vk_descriptor_set_layout_unref(device, layout);
+}
diff --git a/src/vulkan/runtime/vk_descriptor_set_layout.h b/src/vulkan/runtime/vk_descriptor_set_layout.h
new file mode 100644
index 00000000000..b01f30157e4
--- /dev/null
+++ b/src/vulkan/runtime/vk_descriptor_set_layout.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_DESCRIPTOR_SET_LAYOUT_H
+#define VK_DESCRIPTOR_SET_LAYOUT_H
+
+#include "vk_object.h"
+
+#include "util/mesa-blake3.h"
+#include "util/u_atomic.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_descriptor_set_layout {
+ struct vk_object_base base;
+
+ /* BLAKE3 hash of the descriptor set layout. This is used by the common
+ * pipeline code to properly cache shaders, including handling pipeline
+ * layouts. It must be populated by the driver or you risk pipeline cache
+ * collisions.
+ */
+ blake3_hash blake3;
+
+ void (*destroy)(struct vk_device *device,
+ struct vk_descriptor_set_layout *layout);
+
+ /** Reference count
+ *
+ * It's often necessary to store a pointer to the descriptor set layout in
+ * the descriptor so that any entrypoint which has access to a descriptor
+ * set also has the layout. While layouts are often passed into various
+ * entrypoints, they're notably missing from vkUpdateDescriptorSets(). In
+ * order to implement descriptor writes, you either need to stash a pointer
+ * to the descriptor set layout in the descriptor set or you need to copy
+ * all of the relevant information. Storing a pointer is a lot cheaper.
+ *
+ * Because descriptor set layout lifetimes and descriptor set lifetimes are
+ * not guaranteed to coincide, we have to reference count if we're going to
+ * do this.
+ */
+ uint32_t ref_cnt;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_descriptor_set_layout, base,
+ VkDescriptorSetLayout,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
+
+void *vk_descriptor_set_layout_zalloc(struct vk_device *device, size_t size);
+
+void *vk_descriptor_set_layout_multizalloc(struct vk_device *device,
+ struct vk_multialloc *ma);
+
+void vk_descriptor_set_layout_destroy(struct vk_device *device,
+ struct vk_descriptor_set_layout *layout);
+
+static inline struct vk_descriptor_set_layout *
+vk_descriptor_set_layout_ref(struct vk_descriptor_set_layout *layout)
+{
+ assert(layout && layout->ref_cnt >= 1);
+ p_atomic_inc(&layout->ref_cnt);
+ return layout;
+}
+
+static inline void
+vk_descriptor_set_layout_unref(struct vk_device *device,
+ struct vk_descriptor_set_layout *layout)
+{
+ assert(layout && layout->ref_cnt >= 1);
+ if (p_atomic_dec_zero(&layout->ref_cnt))
+ layout->destroy(device, layout);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_DESCRIPTOR_SET_LAYOUT_H */
+
diff --git a/src/vulkan/runtime/vk_descriptor_update_template.c b/src/vulkan/runtime/vk_descriptor_update_template.c
new file mode 100644
index 00000000000..2d4ff52f25e
--- /dev/null
+++ b/src/vulkan/runtime/vk_descriptor_update_template.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ * Copyright © 2022 Collabora, Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_descriptor_update_template.h"
+
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateDescriptorUpdateTemplate(VkDevice _device,
+ const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_descriptor_update_template *template;
+
+ uint32_t entry_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; i++) {
+ if (pCreateInfo->pDescriptorUpdateEntries[i].descriptorCount > 0)
+ entry_count++;
+ }
+
+ size_t size = sizeof(*template) + entry_count * sizeof(template->entries[0]);
+ template = vk_object_alloc(device, pAllocator, size,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
+ if (template == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ template->type = pCreateInfo->templateType;
+ template->bind_point = pCreateInfo->pipelineBindPoint;
+
+ if (template->type == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET)
+ template->set = pCreateInfo->set;
+
+ uint32_t entry_idx = 0;
+ template->entry_count = entry_count;
+ for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; i++) {
+ const VkDescriptorUpdateTemplateEntry *pEntry =
+ &pCreateInfo->pDescriptorUpdateEntries[i];
+
+ if (pEntry->descriptorCount == 0)
+ continue;
+
+ template->entries[entry_idx++] = (struct vk_descriptor_template_entry) {
+ .type = pEntry->descriptorType,
+ .binding = pEntry->dstBinding,
+ .array_element = pEntry->dstArrayElement,
+ .array_count = pEntry->descriptorCount,
+ .offset = pEntry->offset,
+ .stride = pEntry->stride,
+ };
+ }
+ assert(entry_idx == entry_count);
+
+ *pDescriptorUpdateTemplate =
+ vk_descriptor_update_template_to_handle(template);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyDescriptorUpdateTemplate(VkDevice _device,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_descriptor_update_template, template,
+ descriptorUpdateTemplate);
+
+ if (!template)
+ return;
+
+ vk_object_free(device, pAllocator, template);
+}
diff --git a/src/vulkan/runtime/vk_descriptor_update_template.h b/src/vulkan/runtime/vk_descriptor_update_template.h
new file mode 100644
index 00000000000..ee4ccdf3006
--- /dev/null
+++ b/src/vulkan/runtime/vk_descriptor_update_template.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ * Copyright © 2022 Collabora, Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_DESCRIPTOR_UPDATE_TEMPLATE_H
+#define VK_DESCRIPTOR_UPDATE_TEMPLATE_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_descriptor_template_entry {
+ /** VkDescriptorUpdateTemplateEntry::descriptorType */
+ VkDescriptorType type;
+
+ /** VkDescriptorUpdateTemplateEntry::dstBinding */
+ uint32_t binding;
+
+ /** VkDescriptorUpdateTemplateEntry::dstArrayElement */
+ uint32_t array_element;
+
+ /** VkDescriptorUpdateTemplateEntry::descriptorCount */
+ uint32_t array_count;
+
+ /** VkDescriptorUpdateTemplateEntry::offset
+ *
+ * Offset into the user provided data */
+ size_t offset;
+
+ /** VkDescriptorUpdateTemplateEntry::stride
+ *
+ * Stride between elements into the user provided data
+ */
+ size_t stride;
+};
+
+struct vk_descriptor_update_template {
+ struct vk_object_base base;
+
+ /** VkDescriptorUpdateTemplateCreateInfo::templateType */
+ VkDescriptorUpdateTemplateType type;
+
+ /** VkDescriptorUpdateTemplateCreateInfo::pipelineBindPoint */
+ VkPipelineBindPoint bind_point;
+
+ /** VkDescriptorUpdateTemplateCreateInfo::set
+ *
+ * The descriptor set this template corresponds to. This value is only
+ * valid if the template was created with the templateType
+ * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
+ */
+ uint8_t set;
+
+ /** VkDescriptorUpdateTemplateCreateInfo::descriptorUpdateEntryCount */
+ uint32_t entry_count;
+
+ /** Entries of the template */
+ struct vk_descriptor_template_entry entries[0];
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_descriptor_update_template, base,
+ VkDescriptorUpdateTemplate,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_DESCRIPTOR_UPDATE_TEMPLATE_H */
diff --git a/src/vulkan/util/vk_descriptors.c b/src/vulkan/runtime/vk_descriptors.c
index 7aee7e82124..ff79db9b390 100644
--- a/src/vulkan/util/vk_descriptors.c
+++ b/src/vulkan/runtime/vk_descriptors.c
@@ -25,6 +25,7 @@
#include <stdlib.h>
#include <string.h>
#include "vk_descriptors.h"
+#include "vk_common_entrypoints.h"
#include "util/macros.h"
static int
@@ -54,3 +55,49 @@ vk_create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings, unsigned
return VK_SUCCESS;
}
+
+/*
+ * For drivers that don't have mutable state in buffers, images, image views, or
+ * samplers, there's no need to save/restore anything to get the same
+ * descriptor back as long as the user uses the same GPU virtual address. In
+ * this case, the following EXT_descriptor_buffer functions are trivial.
+ */
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetBufferOpaqueCaptureDescriptorDataEXT(VkDevice device,
+ const VkBufferCaptureDescriptorDataInfoEXT *pInfo,
+ void *pData)
+{
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetImageOpaqueCaptureDescriptorDataEXT(VkDevice device,
+ const VkImageCaptureDescriptorDataInfoEXT *pInfo,
+ void *pData)
+{
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetImageViewOpaqueCaptureDescriptorDataEXT(VkDevice device,
+ const VkImageViewCaptureDescriptorDataInfoEXT *pInfo,
+ void *pData)
+{
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetSamplerOpaqueCaptureDescriptorDataEXT(VkDevice _device,
+ const VkSamplerCaptureDescriptorDataInfoEXT *pInfo,
+ void *pData)
+{
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetAccelerationStructureOpaqueCaptureDescriptorDataEXT(VkDevice device,
+ const VkAccelerationStructureCaptureDescriptorDataInfoEXT *pInfo,
+ void *pData)
+{
+ return VK_SUCCESS;
+}
diff --git a/src/vulkan/util/vk_descriptors.h b/src/vulkan/runtime/vk_descriptors.h
index a37eae8a741..220787ece37 100644
--- a/src/vulkan/util/vk_descriptors.h
+++ b/src/vulkan/runtime/vk_descriptors.h
@@ -29,7 +29,7 @@
extern "C" {
#endif
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
VkResult
vk_create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings, unsigned count,
diff --git a/src/vulkan/runtime/vk_device.c b/src/vulkan/runtime/vk_device.c
new file mode 100644
index 00000000000..31cb331e9ab
--- /dev/null
+++ b/src/vulkan/runtime/vk_device.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_device.h"
+
+#include "vk_common_entrypoints.h"
+#include "vk_instance.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_queue.h"
+#include "vk_sync.h"
+#include "vk_sync_timeline.h"
+#include "vk_util.h"
+#include "util/u_debug.h"
+#include "util/hash_table.h"
+#include "util/perf/cpu_trace.h"
+#include "util/ralloc.h"
+
+static enum vk_device_timeline_mode
+get_timeline_mode(struct vk_physical_device *physical_device)
+{
+ if (physical_device->supported_sync_types == NULL)
+ return VK_DEVICE_TIMELINE_MODE_NONE;
+
+ const struct vk_sync_type *timeline_type = NULL;
+ for (const struct vk_sync_type *const *t =
+ physical_device->supported_sync_types; *t; t++) {
+ if ((*t)->features & VK_SYNC_FEATURE_TIMELINE) {
+ /* We can only have one timeline mode */
+ assert(timeline_type == NULL);
+ timeline_type = *t;
+ }
+ }
+
+ if (timeline_type == NULL)
+ return VK_DEVICE_TIMELINE_MODE_NONE;
+
+ if (vk_sync_type_is_vk_sync_timeline(timeline_type))
+ return VK_DEVICE_TIMELINE_MODE_EMULATED;
+
+ if (timeline_type->features & VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL)
+ return VK_DEVICE_TIMELINE_MODE_NATIVE;
+
+ /* For assisted mode, we require a few additional things of all sync types
+ * which may be used as semaphores.
+ */
+ for (const struct vk_sync_type *const *t =
+ physical_device->supported_sync_types; *t; t++) {
+ if ((*t)->features & VK_SYNC_FEATURE_GPU_WAIT) {
+ assert((*t)->features & VK_SYNC_FEATURE_WAIT_PENDING);
+ if ((*t)->features & VK_SYNC_FEATURE_BINARY)
+ assert((*t)->features & VK_SYNC_FEATURE_CPU_RESET);
+ }
+ }
+
+ return VK_DEVICE_TIMELINE_MODE_ASSISTED;
+}
+
+static void
+collect_enabled_features(struct vk_device *device,
+ const VkDeviceCreateInfo *pCreateInfo)
+{
+ if (pCreateInfo->pEnabledFeatures)
+ vk_set_physical_device_features_1_0(&device->enabled_features, pCreateInfo->pEnabledFeatures);
+ vk_set_physical_device_features(&device->enabled_features, pCreateInfo->pNext);
+}
+
+VkResult
+vk_device_init(struct vk_device *device,
+ struct vk_physical_device *physical_device,
+ const struct vk_device_dispatch_table *dispatch_table,
+ const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc)
+{
+ memset(device, 0, sizeof(*device));
+ vk_object_base_init(device, &device->base, VK_OBJECT_TYPE_DEVICE);
+ if (alloc != NULL)
+ device->alloc = *alloc;
+ else
+ device->alloc = physical_device->instance->alloc;
+
+ device->physical = physical_device;
+
+ if (dispatch_table) {
+ device->dispatch_table = *dispatch_table;
+
+ /* Add common entrypoints without overwriting driver-provided ones. */
+ vk_device_dispatch_table_from_entrypoints(
+ &device->dispatch_table, &vk_common_device_entrypoints, false);
+ }
+
+ for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+ int idx;
+ for (idx = 0; idx < VK_DEVICE_EXTENSION_COUNT; idx++) {
+ if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
+ vk_device_extensions[idx].extensionName) == 0)
+ break;
+ }
+
+ if (idx >= VK_DEVICE_EXTENSION_COUNT)
+ return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
+ "%s not supported",
+ pCreateInfo->ppEnabledExtensionNames[i]);
+
+ if (!physical_device->supported_extensions.extensions[idx])
+ return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
+ "%s not supported",
+ pCreateInfo->ppEnabledExtensionNames[i]);
+
+#ifdef ANDROID_STRICT
+ if (!vk_android_allowed_device_extensions.extensions[idx])
+ return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
+ "%s not supported",
+ pCreateInfo->ppEnabledExtensionNames[i]);
+#endif
+
+ device->enabled_extensions.extensions[idx] = true;
+ }
+
+ VkResult result =
+ vk_physical_device_check_device_features(physical_device,
+ pCreateInfo);
+ if (result != VK_SUCCESS)
+ return result;
+
+ collect_enabled_features(device, pCreateInfo);
+
+ p_atomic_set(&device->private_data_next_index, 0);
+
+ list_inithead(&device->queues);
+
+ device->drm_fd = -1;
+ device->mem_cache = NULL;
+
+ device->timeline_mode = get_timeline_mode(physical_device);
+
+ switch (device->timeline_mode) {
+ case VK_DEVICE_TIMELINE_MODE_NONE:
+ case VK_DEVICE_TIMELINE_MODE_NATIVE:
+ device->submit_mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
+ break;
+
+ case VK_DEVICE_TIMELINE_MODE_EMULATED:
+ device->submit_mode = VK_QUEUE_SUBMIT_MODE_DEFERRED;
+ break;
+
+ case VK_DEVICE_TIMELINE_MODE_ASSISTED:
+ if (debug_get_bool_option("MESA_VK_ENABLE_SUBMIT_THREAD", false)) {
+ device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED;
+ } else {
+ device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
+ }
+ break;
+
+ default:
+ unreachable("Invalid timeline mode");
+ }
+
+#if DETECT_OS_ANDROID
+ mtx_init(&device->swapchain_private_mtx, mtx_plain);
+ device->swapchain_private = NULL;
+#endif /* DETECT_OS_ANDROID */
+
+ simple_mtx_init(&device->trace_mtx, mtx_plain);
+
+ return VK_SUCCESS;
+}
+
+void
+vk_device_finish(struct vk_device *device)
+{
+ /* Drivers should tear down their own queues */
+ assert(list_is_empty(&device->queues));
+
+ vk_memory_trace_finish(device);
+
+#if DETECT_OS_ANDROID
+ if (device->swapchain_private) {
+ hash_table_foreach(device->swapchain_private, entry)
+ util_sparse_array_finish(entry->data);
+ ralloc_free(device->swapchain_private);
+ }
+#endif /* DETECT_OS_ANDROID */
+
+ simple_mtx_destroy(&device->trace_mtx);
+
+ vk_object_base_finish(&device->base);
+}
+
+void
+vk_device_enable_threaded_submit(struct vk_device *device)
+{
+ /* This must be called before any queues are created */
+ assert(list_is_empty(&device->queues));
+
+ /* In order to use threaded submit, we need every sync type that can be
+ * used as a wait fence for vkQueueSubmit() to support WAIT_PENDING.
+ * It's required for cross-thread/process submit re-ordering.
+ */
+ for (const struct vk_sync_type *const *t =
+ device->physical->supported_sync_types; *t; t++) {
+ if ((*t)->features & VK_SYNC_FEATURE_GPU_WAIT)
+ assert((*t)->features & VK_SYNC_FEATURE_WAIT_PENDING);
+ }
+
+ /* Any binary vk_sync types which will be used as permanent semaphore
+ * payloads also need to support vk_sync_type::move, but that's a lot
+ * harder to assert since it only applies to permanent semaphore payloads.
+ */
+
+ if (device->submit_mode != VK_QUEUE_SUBMIT_MODE_THREADED)
+ device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
+}
+
+VkResult
+vk_device_flush(struct vk_device *device)
+{
+ if (device->submit_mode != VK_QUEUE_SUBMIT_MODE_DEFERRED)
+ return VK_SUCCESS;
+
+ bool progress;
+ do {
+ progress = false;
+
+ vk_foreach_queue(queue, device) {
+ uint32_t queue_submit_count;
+ VkResult result = vk_queue_flush(queue, &queue_submit_count);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ if (queue_submit_count)
+ progress = true;
+ }
+ } while (progress);
+
+ return VK_SUCCESS;
+}
+
+static const char *
+timeline_mode_str(struct vk_device *device)
+{
+ switch (device->timeline_mode) {
+#define CASE(X) case VK_DEVICE_TIMELINE_MODE_##X: return #X;
+ CASE(NONE)
+ CASE(EMULATED)
+ CASE(ASSISTED)
+ CASE(NATIVE)
+#undef CASE
+ default: return "UNKNOWN";
+ }
+}
+
+void
+_vk_device_report_lost(struct vk_device *device)
+{
+ assert(p_atomic_read(&device->_lost.lost) > 0);
+
+ device->_lost.reported = true;
+
+ vk_foreach_queue(queue, device) {
+ if (queue->_lost.lost) {
+ __vk_errorf(queue, VK_ERROR_DEVICE_LOST,
+ queue->_lost.error_file, queue->_lost.error_line,
+ "%s", queue->_lost.error_msg);
+ }
+ }
+
+ vk_logd(VK_LOG_OBJS(device), "Timeline mode is %s.",
+ timeline_mode_str(device));
+}
+
+VkResult
+_vk_device_set_lost(struct vk_device *device,
+ const char *file, int line,
+ const char *msg, ...)
+{
+ /* This flushes out any per-queue device lost messages */
+ if (vk_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
+ p_atomic_inc(&device->_lost.lost);
+ device->_lost.reported = true;
+
+ va_list ap;
+ va_start(ap, msg);
+ __vk_errorv(device, VK_ERROR_DEVICE_LOST, file, line, msg, ap);
+ va_end(ap);
+
+ vk_logd(VK_LOG_OBJS(device), "Timeline mode is %s.",
+ timeline_mode_str(device));
+
+ if (debug_get_bool_option("MESA_VK_ABORT_ON_DEVICE_LOSS", false))
+ abort();
+
+ return VK_ERROR_DEVICE_LOST;
+}
+
+PFN_vkVoidFunction
+vk_device_get_proc_addr(const struct vk_device *device,
+ const char *name)
+{
+ if (device == NULL || name == NULL)
+ return NULL;
+
+ struct vk_instance *instance = device->physical->instance;
+ return vk_device_dispatch_table_get_if_supported(&device->dispatch_table,
+ name,
+ instance->app_info.api_version,
+ &instance->enabled_extensions,
+ &device->enabled_extensions);
+}
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
+vk_common_GetDeviceProcAddr(VkDevice _device,
+ const char *pName)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ return vk_device_get_proc_addr(device, pName);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetDeviceQueue(VkDevice _device,
+ uint32_t queueFamilyIndex,
+ uint32_t queueIndex,
+ VkQueue *pQueue)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ const VkDeviceQueueInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
+ .pNext = NULL,
+ /* flags = 0 because (Vulkan spec 1.2.170 - vkGetDeviceQueue):
+ *
+ * "vkGetDeviceQueue must only be used to get queues that were
+ * created with the flags parameter of VkDeviceQueueCreateInfo set
+ * to zero. To get queues that were created with a non-zero flags
+ * parameter use vkGetDeviceQueue2."
+ */
+ .flags = 0,
+ .queueFamilyIndex = queueFamilyIndex,
+ .queueIndex = queueIndex,
+ };
+
+ device->dispatch_table.GetDeviceQueue2(_device, &info, pQueue);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetDeviceQueue2(VkDevice _device,
+ const VkDeviceQueueInfo2 *pQueueInfo,
+ VkQueue *pQueue)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ struct vk_queue *queue = NULL;
+ vk_foreach_queue(iter, device) {
+ if (iter->queue_family_index == pQueueInfo->queueFamilyIndex &&
+ iter->index_in_family == pQueueInfo->queueIndex) {
+ queue = iter;
+ break;
+ }
+ }
+
+ /* From the Vulkan 1.1.70 spec:
+ *
+ * "The queue returned by vkGetDeviceQueue2 must have the same flags
+ * value from this structure as that used at device creation time in a
+ * VkDeviceQueueCreateInfo instance. If no matching flags were specified
+ * at device creation time then pQueue will return VK_NULL_HANDLE."
+ */
+ if (queue && queue->flags == pQueueInfo->flags)
+ *pQueue = vk_queue_to_handle(queue);
+ else
+ *pQueue = VK_NULL_HANDLE;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_MapMemory(VkDevice _device,
+ VkDeviceMemory memory,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ void **ppData)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ const VkMemoryMapInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR,
+ .flags = flags,
+ .memory = memory,
+ .offset = offset,
+ .size = size,
+ };
+
+ return device->dispatch_table.MapMemory2KHR(_device, &info, ppData);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_UnmapMemory(VkDevice _device,
+ VkDeviceMemory memory)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ ASSERTED VkResult result;
+
+ const VkMemoryUnmapInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR,
+ .memory = memory,
+ };
+
+ result = device->dispatch_table.UnmapMemory2KHR(_device, &info);
+ assert(result == VK_SUCCESS);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetDeviceGroupPeerMemoryFeatures(
+ VkDevice device,
+ uint32_t heapIndex,
+ uint32_t localDeviceIndex,
+ uint32_t remoteDeviceIndex,
+ VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
+{
+ assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
+ *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
+ VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
+ VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
+ VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetImageMemoryRequirements(VkDevice _device,
+ VkImage image,
+ VkMemoryRequirements *pMemoryRequirements)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ VkImageMemoryRequirementsInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
+ .image = image,
+ };
+ VkMemoryRequirements2 reqs = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
+ };
+ device->dispatch_table.GetImageMemoryRequirements2(_device, &info, &reqs);
+
+ *pMemoryRequirements = reqs.memoryRequirements;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_BindImageMemory(VkDevice _device,
+ VkImage image,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ VkBindImageMemoryInfo bind = {
+ .sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,
+ .image = image,
+ .memory = memory,
+ .memoryOffset = memoryOffset,
+ };
+
+ return device->dispatch_table.BindImageMemory2(_device, 1, &bind);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetImageSparseMemoryRequirements(VkDevice _device,
+ VkImage image,
+ uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ VkImageSparseMemoryRequirementsInfo2 info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,
+ .image = image,
+ };
+
+ if (!pSparseMemoryRequirements) {
+ device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
+ &info,
+ pSparseMemoryRequirementCount,
+ NULL);
+ return;
+ }
+
+ STACK_ARRAY(VkSparseImageMemoryRequirements2, mem_reqs2, *pSparseMemoryRequirementCount);
+
+ for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i) {
+ mem_reqs2[i].sType = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2;
+ mem_reqs2[i].pNext = NULL;
+ }
+
+ device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
+ &info,
+ pSparseMemoryRequirementCount,
+ mem_reqs2);
+
+ for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i)
+ pSparseMemoryRequirements[i] = mem_reqs2[i].memoryRequirements;
+
+ STACK_ARRAY_FINISH(mem_reqs2);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_DeviceWaitIdle(VkDevice _device)
+{
+ MESA_TRACE_FUNC();
+
+ VK_FROM_HANDLE(vk_device, device, _device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+
+ vk_foreach_queue(queue, device) {
+ VkResult result = disp->QueueWaitIdle(vk_queue_to_handle(queue));
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+#ifndef _WIN32
+
+uint64_t
+vk_clock_gettime(clockid_t clock_id)
+{
+ struct timespec current;
+ int ret;
+
+ ret = clock_gettime(clock_id, &current);
+#ifdef CLOCK_MONOTONIC_RAW
+ if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
+ ret = clock_gettime(CLOCK_MONOTONIC, &current);
+#endif
+ if (ret < 0)
+ return 0;
+
+ return (uint64_t)current.tv_sec * 1000000000ULL + current.tv_nsec;
+}
+
+#endif //!_WIN32
+
+#define CORE_RENAMED_PROPERTY(ext_property, core_property) \
+ memcpy(&properties->ext_property, &core->core_property, sizeof(core->core_property))
+
+#define CORE_PROPERTY(property) CORE_RENAMED_PROPERTY(property, property)
+
+bool
+vk_get_physical_device_core_1_1_property_ext(struct VkBaseOutStructure *ext,
+ const VkPhysicalDeviceVulkan11Properties *core)
+{
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
+ VkPhysicalDeviceIDProperties *properties = (void *)ext;
+ CORE_PROPERTY(deviceUUID);
+ CORE_PROPERTY(driverUUID);
+ CORE_PROPERTY(deviceLUID);
+ CORE_PROPERTY(deviceNodeMask);
+ CORE_PROPERTY(deviceLUIDValid);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
+ VkPhysicalDeviceMaintenance3Properties *properties = (void *)ext;
+ CORE_PROPERTY(maxPerSetDescriptors);
+ CORE_PROPERTY(maxMemoryAllocationSize);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
+ VkPhysicalDeviceMultiviewProperties *properties = (void *)ext;
+ CORE_PROPERTY(maxMultiviewViewCount);
+ CORE_PROPERTY(maxMultiviewInstanceIndex);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
+ VkPhysicalDevicePointClippingProperties *properties = (void *) ext;
+ CORE_PROPERTY(pointClippingBehavior);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
+ VkPhysicalDeviceProtectedMemoryProperties *properties = (void *)ext;
+ CORE_PROPERTY(protectedNoFault);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
+ VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
+ CORE_PROPERTY(subgroupSize);
+ CORE_RENAMED_PROPERTY(supportedStages,
+ subgroupSupportedStages);
+ CORE_RENAMED_PROPERTY(supportedOperations,
+ subgroupSupportedOperations);
+ CORE_RENAMED_PROPERTY(quadOperationsInAllStages,
+ subgroupQuadOperationsInAllStages);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+ vk_copy_struct_guts(ext, (void *)core, sizeof(*core));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool
+vk_get_physical_device_core_1_2_property_ext(struct VkBaseOutStructure *ext,
+ const VkPhysicalDeviceVulkan12Properties *core)
+{
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES: {
+ VkPhysicalDeviceDepthStencilResolveProperties *properties = (void *)ext;
+ CORE_PROPERTY(supportedDepthResolveModes);
+ CORE_PROPERTY(supportedStencilResolveModes);
+ CORE_PROPERTY(independentResolveNone);
+ CORE_PROPERTY(independentResolve);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES: {
+ VkPhysicalDeviceDescriptorIndexingProperties *properties = (void *)ext;
+ CORE_PROPERTY(maxUpdateAfterBindDescriptorsInAllPools);
+ CORE_PROPERTY(shaderUniformBufferArrayNonUniformIndexingNative);
+ CORE_PROPERTY(shaderSampledImageArrayNonUniformIndexingNative);
+ CORE_PROPERTY(shaderStorageBufferArrayNonUniformIndexingNative);
+ CORE_PROPERTY(shaderStorageImageArrayNonUniformIndexingNative);
+ CORE_PROPERTY(shaderInputAttachmentArrayNonUniformIndexingNative);
+ CORE_PROPERTY(robustBufferAccessUpdateAfterBind);
+ CORE_PROPERTY(quadDivergentImplicitLod);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindSamplers);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindUniformBuffers);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindStorageBuffers);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindSampledImages);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindStorageImages);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindInputAttachments);
+ CORE_PROPERTY(maxPerStageUpdateAfterBindResources);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindSamplers);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindUniformBuffers);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindStorageBuffers);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindSampledImages);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindStorageImages);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindInputAttachments);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES: {
+ VkPhysicalDeviceDriverProperties *properties = (void *) ext;
+ CORE_PROPERTY(driverID);
+ CORE_PROPERTY(driverName);
+ CORE_PROPERTY(driverInfo);
+ CORE_PROPERTY(conformanceVersion);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
+ VkPhysicalDeviceSamplerFilterMinmaxProperties *properties = (void *)ext;
+ CORE_PROPERTY(filterMinmaxImageComponentMapping);
+ CORE_PROPERTY(filterMinmaxSingleComponentFormats);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES : {
+ VkPhysicalDeviceFloatControlsProperties *properties = (void *)ext;
+ CORE_PROPERTY(denormBehaviorIndependence);
+ CORE_PROPERTY(roundingModeIndependence);
+ CORE_PROPERTY(shaderDenormFlushToZeroFloat16);
+ CORE_PROPERTY(shaderDenormPreserveFloat16);
+ CORE_PROPERTY(shaderRoundingModeRTEFloat16);
+ CORE_PROPERTY(shaderRoundingModeRTZFloat16);
+ CORE_PROPERTY(shaderSignedZeroInfNanPreserveFloat16);
+ CORE_PROPERTY(shaderDenormFlushToZeroFloat32);
+ CORE_PROPERTY(shaderDenormPreserveFloat32);
+ CORE_PROPERTY(shaderRoundingModeRTEFloat32);
+ CORE_PROPERTY(shaderRoundingModeRTZFloat32);
+ CORE_PROPERTY(shaderSignedZeroInfNanPreserveFloat32);
+ CORE_PROPERTY(shaderDenormFlushToZeroFloat64);
+ CORE_PROPERTY(shaderDenormPreserveFloat64);
+ CORE_PROPERTY(shaderRoundingModeRTEFloat64);
+ CORE_PROPERTY(shaderRoundingModeRTZFloat64);
+ CORE_PROPERTY(shaderSignedZeroInfNanPreserveFloat64);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES: {
+ VkPhysicalDeviceTimelineSemaphoreProperties *properties = (void *) ext;
+ CORE_PROPERTY(maxTimelineSemaphoreValueDifference);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+ vk_copy_struct_guts(ext, (void *)core, sizeof(*core));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool
+vk_get_physical_device_core_1_3_property_ext(struct VkBaseOutStructure *ext,
+ const VkPhysicalDeviceVulkan13Properties *core)
+{
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES: {
+ VkPhysicalDeviceInlineUniformBlockProperties *properties = (void *)ext;
+ CORE_PROPERTY(maxInlineUniformBlockSize);
+ CORE_PROPERTY(maxPerStageDescriptorInlineUniformBlocks);
+ CORE_PROPERTY(maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
+ CORE_PROPERTY(maxDescriptorSetInlineUniformBlocks);
+ CORE_PROPERTY(maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES: {
+ VkPhysicalDeviceMaintenance4Properties *properties = (void *)ext;
+ CORE_PROPERTY(maxBufferSize);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES: {
+ VkPhysicalDeviceShaderIntegerDotProductProperties *properties = (void *)ext;
+
+#define IDP_PROPERTY(x) CORE_PROPERTY(integerDotProduct##x)
+ IDP_PROPERTY(8BitUnsignedAccelerated);
+ IDP_PROPERTY(8BitSignedAccelerated);
+ IDP_PROPERTY(8BitMixedSignednessAccelerated);
+ IDP_PROPERTY(4x8BitPackedUnsignedAccelerated);
+ IDP_PROPERTY(4x8BitPackedSignedAccelerated);
+ IDP_PROPERTY(4x8BitPackedMixedSignednessAccelerated);
+ IDP_PROPERTY(16BitUnsignedAccelerated);
+ IDP_PROPERTY(16BitSignedAccelerated);
+ IDP_PROPERTY(16BitMixedSignednessAccelerated);
+ IDP_PROPERTY(32BitUnsignedAccelerated);
+ IDP_PROPERTY(32BitSignedAccelerated);
+ IDP_PROPERTY(32BitMixedSignednessAccelerated);
+ IDP_PROPERTY(64BitUnsignedAccelerated);
+ IDP_PROPERTY(64BitSignedAccelerated);
+ IDP_PROPERTY(64BitMixedSignednessAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating8BitUnsignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating8BitSignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating8BitMixedSignednessAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating4x8BitPackedUnsignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating4x8BitPackedSignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating4x8BitPackedMixedSignednessAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating16BitUnsignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating16BitSignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating16BitMixedSignednessAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating32BitUnsignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating32BitSignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating32BitMixedSignednessAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating64BitUnsignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating64BitSignedAccelerated);
+ IDP_PROPERTY(AccumulatingSaturating64BitMixedSignednessAccelerated);
+#undef IDP_PROPERTY
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES: {
+ VkPhysicalDeviceSubgroupSizeControlProperties *properties = (void *)ext;
+ CORE_PROPERTY(minSubgroupSize);
+ CORE_PROPERTY(maxSubgroupSize);
+ CORE_PROPERTY(maxComputeWorkgroupSubgroups);
+ CORE_PROPERTY(requiredSubgroupSizeStages);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES: {
+ VkPhysicalDeviceTexelBufferAlignmentProperties *properties = (void *)ext;
+ CORE_PROPERTY(storageTexelBufferOffsetAlignmentBytes);
+ CORE_PROPERTY(storageTexelBufferOffsetSingleTexelAlignment);
+ CORE_PROPERTY(uniformTexelBufferOffsetAlignmentBytes);
+ CORE_PROPERTY(uniformTexelBufferOffsetSingleTexelAlignment);
+ return true;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES:
+ vk_copy_struct_guts(ext, (void *)core, sizeof(*core));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+#undef CORE_RENAMED_PROPERTY
+#undef CORE_PROPERTY
+
diff --git a/src/vulkan/runtime/vk_device.h b/src/vulkan/runtime/vk_device.h
new file mode 100644
index 00000000000..37e56771062
--- /dev/null
+++ b/src/vulkan/runtime/vk_device.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_DEVICE_H
+#define VK_DEVICE_H
+
+#include "rmv/vk_rmv_common.h"
+#include "vk_dispatch_table.h"
+#include "vk_extensions.h"
+#include "vk_object.h"
+#include "vk_physical_device_features.h"
+
+#include "util/list.h"
+#include "util/simple_mtx.h"
+#include "util/u_atomic.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_command_buffer_ops;
+struct vk_device_shader_ops;
+struct vk_sync;
+
+enum vk_queue_submit_mode {
+ /** Submits happen immediately
+ *
+ * `vkQueueSubmit()` and `vkQueueBindSparse()` call
+ * ``vk_queue::driver_submit`` directly for all submits and the last call to
+ * ``vk_queue::driver_submit`` will have completed by the time
+ * `vkQueueSubmit()` or `vkQueueBindSparse()` return.
+ */
+ VK_QUEUE_SUBMIT_MODE_IMMEDIATE,
+
+ /** Submits may be deferred until a future `vk_queue_flush()`
+ *
+ * Submits are added to the queue and `vk_queue_flush()` is called.
+ * However, any submits with unsatisfied dependencies will be left on the
+ * queue until a future `vk_queue_flush()` call. This is used for
+ * implementing emulated timeline semaphores without threading.
+ */
+ VK_QUEUE_SUBMIT_MODE_DEFERRED,
+
+ /** Submits will be added to the queue and handled later by a thread
+ *
+ * This places additional requirements on the vk_sync types used by the
+ * driver:
+ *
+ * 1. All `vk_sync` types which support `VK_SYNC_FEATURE_GPU_WAIT` also
+ * support `VK_SYNC_FEATURE_WAIT_PENDING` so that the threads can
+ * sort out when a given submit has all its dependencies resolved.
+ *
+ * 2. All binary `vk_sync` types which support `VK_SYNC_FEATURE_GPU_WAIT`
+ * also support `VK_SYNC_FEATURE_CPU_RESET` so we can reset
+ * semaphores after waiting on them.
+ *
+ * 3. All vk_sync types used as permanent payloads of semaphores support
+ * ``vk_sync_type::move`` so that it can move the pending signal into a
+ * temporary vk_sync and reset the semaphore.
+ *
+ * This is requied for shared timeline semaphores where we need to handle
+ * wait-before-signal by threading in the driver if we ever see an
+ * unresolve dependency.
+ */
+ VK_QUEUE_SUBMIT_MODE_THREADED,
+
+ /** Threaded but only if we need it to resolve dependencies
+ *
+ * This imposes all the same requirements on `vk_sync` types as
+ * `VK_QUEUE_SUBMIT_MODE_THREADED`.
+ */
+ VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND,
+};
+
+/** Base struct for VkDevice */
+struct vk_device {
+ struct vk_object_base base;
+
+ /** Allocator used to create this device
+ *
+ * This is used as a fall-back for when a NULL pAllocator is passed into a
+ * device-level create function such as vkCreateImage().
+ */
+ VkAllocationCallbacks alloc;
+
+ /** Pointer to the physical device */
+ struct vk_physical_device *physical;
+
+ /** Table of enabled extensions */
+ struct vk_device_extension_table enabled_extensions;
+
+ /** Table of enabled features */
+ struct vk_features enabled_features;
+
+ /** Device-level dispatch table */
+ struct vk_device_dispatch_table dispatch_table;
+
+ /** Command dispatch table
+ *
+ * This is used for emulated secondary command buffer support. To use
+ * emulated (trace/replay) secondary command buffers:
+ *
+ * 1. Provide your "real" command buffer dispatch table here. Because
+ * this doesn't get populated by vk_device_init(), the driver will have
+ * to add the vk_common entrypoints to this table itself.
+ *
+ * 2. Add vk_enqueue_unless_primary_device_entrypoint_table to your device
+ * level dispatch table.
+ */
+ const struct vk_device_dispatch_table *command_dispatch_table;
+
+ /** Command buffer vtable when using the common command pool */
+ const struct vk_command_buffer_ops *command_buffer_ops;
+
+ /** Shader vtable for VK_EXT_shader_object and common pipelines */
+ const struct vk_device_shader_ops *shader_ops;
+
+ /** Driver provided callback for capturing traces
+ *
+ * Triggers for this callback are:
+ * - Keyboard input (F12)
+ * - Creation of a trigger file
+ * - Reaching the trace frame
+ */
+ VkResult (*capture_trace)(VkQueue queue);
+
+ uint32_t current_frame;
+ bool trace_hotkey_trigger;
+ simple_mtx_t trace_mtx;
+
+ /* For VK_EXT_private_data */
+ uint32_t private_data_next_index;
+
+ struct list_head queues;
+
+ struct {
+ int lost;
+ bool reported;
+ } _lost;
+
+ /** Checks the status of this device
+ *
+ * This is expected to return either VK_SUCCESS or VK_ERROR_DEVICE_LOST.
+ * It is called before ``vk_queue::driver_submit`` and after every non-trivial
+ * wait operation to ensure the device is still around. This gives the
+ * driver a hook to ask the kernel if its device is still valid. If the
+ * kernel says the device has been lost, it MUST call vk_device_set_lost().
+ *
+ * This function may be called from any thread at any time.
+ */
+ VkResult (*check_status)(struct vk_device *device);
+
+ /** Creates a vk_sync that wraps a memory object
+ *
+ * This is always a one-shot object so it need not track any additional
+ * state. Since it's intended for synchronizing between processes using
+ * implicit synchronization mechanisms, no such tracking would be valid
+ * anyway.
+ *
+ * If `signal_memory` is set, the resulting vk_sync will be used to signal
+ * the memory object from a queue ``via vk_queue_submit::signals``. The common
+ * code guarantees that, by the time vkQueueSubmit() returns, the signal
+ * operation has been submitted to the kernel via the driver's
+ * ``vk_queue::driver_submit`` hook. This means that any vkQueueSubmit() call
+ * which needs implicit synchronization may block.
+ *
+ * If `signal_memory` is not set, it can be assumed that memory object
+ * already has a signal operation pending from some other process and we
+ * need only wait on it.
+ */
+ VkResult (*create_sync_for_memory)(struct vk_device *device,
+ VkDeviceMemory memory,
+ bool signal_memory,
+ struct vk_sync **sync_out);
+
+ /* Set by vk_device_set_drm_fd() */
+ int drm_fd;
+
+ /** Implicit pipeline cache, or NULL */
+ struct vk_pipeline_cache *mem_cache;
+
+ /** An enum describing how timeline semaphores work */
+ enum vk_device_timeline_mode {
+ /** Timeline semaphores are not supported */
+ VK_DEVICE_TIMELINE_MODE_NONE,
+
+ /** Timeline semaphores are emulated with vk_timeline
+ *
+ * In this mode, timeline semaphores are emulated using vk_timeline
+ * which is a collection of binary semaphores, one per time point.
+ * These timeline semaphores cannot be shared because the data structure
+ * exists entirely in userspace. These timelines are virtually
+ * invisible to the driver; all it sees are the binary vk_syncs, one per
+ * time point.
+ *
+ * To handle wait-before-signal, we place all vk_queue_submits in the
+ * queue's submit list in vkQueueSubmit() and call vk_device_flush() at
+ * key points such as the end of vkQueueSubmit() and vkSemaphoreSignal().
+ * This ensures that, as soon as a given submit's dependencies are fully
+ * resolvable, it gets submitted to the driver.
+ */
+ VK_DEVICE_TIMELINE_MODE_EMULATED,
+
+ /** Timeline semaphores are a kernel-assisted emulation
+ *
+ * In this mode, timeline semaphores are still technically an emulation
+ * in the sense that they don't support wait-before-signal natively.
+ * Instead, all GPU-waitable objects support a CPU wait-for-pending
+ * operation which lets the userspace driver wait until a given event
+ * on the (possibly shared) vk_sync is pending. The event is "pending"
+ * if a job has been submitted to the kernel (possibly from a different
+ * process) which will signal it. In vkQueueSubit, we use this wait
+ * mode to detect waits which are not yet pending and, the first time we
+ * do, spawn a thread to manage the queue. That thread waits for each
+ * submit's waits to all be pending before submitting to the driver
+ * queue.
+ *
+ * We have to be a bit more careful about a few things in this mode.
+ * In particular, we can never assume that any given wait operation is
+ * pending. For instance, when we go to export a sync file from a
+ * binary semaphore, we need to first wait for it to be pending. The
+ * spec guarantees that the vast majority of these waits return almost
+ * immediately, but we do need to insert them for correctness.
+ */
+ VK_DEVICE_TIMELINE_MODE_ASSISTED,
+
+ /** Timeline semaphores are 100% native
+ *
+ * In this mode, wait-before-signal is natively supported by the
+ * underlying timeline implementation. We can submit-and-forget and
+ * assume that dependencies will get resolved for us by the kernel.
+ * Currently, this isn't supported by any Linux primitives.
+ */
+ VK_DEVICE_TIMELINE_MODE_NATIVE,
+ } timeline_mode;
+
+ /** Per-device submit mode
+ *
+ * This represents the device-wide submit strategy which may be different
+ * from the per-queue submit mode. See vk_queue.submit.mode for more
+ * details.
+ */
+ enum vk_queue_submit_mode submit_mode;
+
+ struct vk_memory_trace_data memory_trace_data;
+
+ mtx_t swapchain_private_mtx;
+ struct hash_table *swapchain_private;
+ mtx_t swapchain_name_mtx;
+ struct hash_table *swapchain_name;
+};
+
+VK_DEFINE_HANDLE_CASTS(vk_device, base, VkDevice,
+ VK_OBJECT_TYPE_DEVICE);
+
+/** Initialize a vk_device
+ *
+ * Along with initializing the data structures in `vk_device`, this function
+ * checks that every extension specified by
+ * ``VkInstanceCreateInfo::ppEnabledExtensionNames`` is actually supported by
+ * the physical device and returns `VK_ERROR_EXTENSION_NOT_PRESENT` if an
+ * unsupported extension is requested. It also checks all the feature struct
+ * chained into the `pCreateInfo->pNext` chain against the features returned
+ * by `vkGetPhysicalDeviceFeatures2` and returns
+ * `VK_ERROR_FEATURE_NOT_PRESENT` if an unsupported feature is requested.
+ *
+ * :param device: |out| The device to initialize
+ * :param physical_device: |in| The physical device
+ * :param dispatch_table: |in| Device-level dispatch table
+ * :param pCreateInfo: |in| VkDeviceCreateInfo pointer passed to
+ * `vkCreateDevice()`
+ * :param alloc: |in| Allocation callbacks passed to
+ * `vkCreateDevice()`
+ */
+VkResult MUST_CHECK
+vk_device_init(struct vk_device *device,
+ struct vk_physical_device *physical_device,
+ const struct vk_device_dispatch_table *dispatch_table,
+ const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc);
+
+static inline void
+vk_device_set_drm_fd(struct vk_device *device, int drm_fd)
+{
+ device->drm_fd = drm_fd;
+}
+
+/** Tears down a vk_device
+ *
+ * :param device: |out| The device to tear down
+ */
+void
+vk_device_finish(struct vk_device *device);
+
+/** Enables threaded submit on this device
+ *
+ * This doesn't ensure that threaded submit will be used. It just disables
+ * the deferred submit option for emulated timeline semaphores and forces them
+ * to always use the threaded path. It also does some checks that the vk_sync
+ * types used by the driver work for threaded submit.
+ *
+ * This must be called before any queues are created.
+ */
+void vk_device_enable_threaded_submit(struct vk_device *device);
+
+static inline bool
+vk_device_supports_threaded_submit(const struct vk_device *device)
+{
+ return device->submit_mode == VK_QUEUE_SUBMIT_MODE_THREADED ||
+ device->submit_mode == VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
+}
+
+VkResult vk_device_flush(struct vk_device *device);
+
+VkResult PRINTFLIKE(4, 5)
+_vk_device_set_lost(struct vk_device *device,
+ const char *file, int line,
+ const char *msg, ...);
+
+#define vk_device_set_lost(device, ...) \
+ _vk_device_set_lost(device, __FILE__, __LINE__, __VA_ARGS__)
+
+void _vk_device_report_lost(struct vk_device *device);
+
+static inline bool
+vk_device_is_lost_no_report(struct vk_device *device)
+{
+ return p_atomic_read(&device->_lost.lost) > 0;
+}
+
+static inline bool
+vk_device_is_lost(struct vk_device *device)
+{
+ int lost = vk_device_is_lost_no_report(device);
+ if (unlikely(lost && !device->_lost.reported))
+ _vk_device_report_lost(device);
+ return lost;
+}
+
+static inline VkResult
+vk_device_check_status(struct vk_device *device)
+{
+ if (vk_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
+ if (!device->check_status)
+ return VK_SUCCESS;
+
+ VkResult result = device->check_status(device);
+
+ assert(result == VK_SUCCESS || result == VK_ERROR_DEVICE_LOST);
+ if (result == VK_ERROR_DEVICE_LOST)
+ assert(vk_device_is_lost_no_report(device));
+
+ return result;
+}
+
+#ifndef _WIN32
+
+uint64_t
+vk_clock_gettime(clockid_t clock_id);
+
+static inline uint64_t
+vk_time_max_deviation(uint64_t begin, uint64_t end, uint64_t max_clock_period)
+{
+ /*
+ * The maximum deviation is the sum of the interval over which we
+ * perform the sampling and the maximum period of any sampled
+ * clock. That's because the maximum skew between any two sampled
+ * clock edges is when the sampled clock with the largest period is
+ * sampled at the end of that period but right at the beginning of the
+ * sampling interval and some other clock is sampled right at the
+ * beginning of its sampling period and right at the end of the
+ * sampling interval. Let's assume the GPU has the longest clock
+ * period and that the application is sampling GPU and monotonic:
+ *
+ * s e
+ * w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
+ * Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
+ *
+ * g
+ * 0 1 2 3
+ * GPU -----_____-----_____-----_____-----_____
+ *
+ * m
+ * x y z 0 1 2 3 4 5 6 7 8 9 a b c
+ * Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
+ *
+ * Interval <----------------->
+ * Deviation <-------------------------->
+ *
+ * s = read(raw) 2
+ * g = read(GPU) 1
+ * m = read(monotonic) 2
+ * e = read(raw) b
+ *
+ * We round the sample interval up by one tick to cover sampling error
+ * in the interval clock
+ */
+
+ uint64_t sample_interval = end - begin + 1;
+
+ return sample_interval + max_clock_period;
+}
+
+#endif //!_WIN32
+
+PFN_vkVoidFunction
+vk_device_get_proc_addr(const struct vk_device *device,
+ const char *name);
+
+bool vk_get_physical_device_core_1_1_property_ext(struct VkBaseOutStructure *ext,
+ const VkPhysicalDeviceVulkan11Properties *core);
+bool vk_get_physical_device_core_1_2_property_ext(struct VkBaseOutStructure *ext,
+ const VkPhysicalDeviceVulkan12Properties *core);
+bool vk_get_physical_device_core_1_3_property_ext(struct VkBaseOutStructure *ext,
+ const VkPhysicalDeviceVulkan13Properties *core);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_DEVICE_H */
diff --git a/src/vulkan/runtime/vk_device_memory.c b/src/vulkan/runtime/vk_device_memory.c
new file mode 100644
index 00000000000..e0a742e198e
--- /dev/null
+++ b/src/vulkan/runtime/vk_device_memory.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright © 2023 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_device_memory.h"
+
+#include "vk_android.h"
+#include "vk_common_entrypoints.h"
+#include "vk_util.h"
+
+#if DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26
+#include <vndk/hardware_buffer.h>
+#endif
+
+void *
+vk_device_memory_create(struct vk_device *device,
+ const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_device_memory *mem =
+ vk_object_zalloc(device, alloc, size, VK_OBJECT_TYPE_DEVICE_MEMORY);
+ if (mem == NULL)
+ return NULL;
+
+ assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+
+ mem->size = pAllocateInfo->allocationSize;
+ mem->memory_type_index = pAllocateInfo->memoryTypeIndex;
+
+ vk_foreach_struct_const(ext, pAllocateInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO: {
+ const VkExportMemoryAllocateInfo *export_info = (void *)ext;
+ mem->export_handle_types = export_info->handleTypes;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: {
+#if DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26
+ const VkImportAndroidHardwareBufferInfoANDROID *ahb_info = (void *)ext;
+
+ assert(mem->import_handle_type == 0);
+ mem->import_handle_type =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
+
+ /* From the Vulkan 1.3.242 spec:
+ *
+ * "If the vkAllocateMemory command succeeds, the implementation
+ * must acquire a reference to the imported hardware buffer, which
+ * it must release when the device memory object is freed. If the
+ * command fails, the implementation must not retain a
+ * reference."
+ *
+ * We assume that if the driver fails to create its memory object,
+ * it will call vk_device_memory_destroy which will delete our
+ * reference.
+ */
+ AHardwareBuffer_acquire(ahb_info->buffer);
+ mem->ahardware_buffer = ahb_info->buffer;
+ break;
+#else
+ unreachable("AHardwareBuffer import requires Android >= 26");
+#endif /* DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26 */
+ }
+
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR: {
+ const VkImportMemoryFdInfoKHR *fd_info = (void *)ext;
+ if (fd_info->handleType) {
+ assert(fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
+ fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ assert(mem->import_handle_type == 0);
+ mem->import_handle_type = fd_info->handleType;
+ }
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT: {
+ const VkImportMemoryHostPointerInfoEXT *host_ptr_info = (void *)ext;
+ if (host_ptr_info->handleType) {
+ assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT ||
+ host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT);
+
+ assert(mem->import_handle_type == 0);
+ mem->import_handle_type = host_ptr_info->handleType;
+ mem->host_ptr = host_ptr_info->pHostPointer;
+ }
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR: {
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ const VkImportMemoryWin32HandleInfoKHR *w32h_info = (void *)ext;
+ if (w32h_info->handleType) {
+ assert(w32h_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
+ w32h_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT ||
+ w32h_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT ||
+ w32h_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT ||
+ w32h_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT ||
+ w32h_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT);
+ assert(mem->import_handle_type == 0);
+ mem->import_handle_type = w32h_info->handleType;
+ }
+ break;
+#else
+ unreachable("Win32 platform support disabled");
+#endif
+ }
+
+ case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO: {
+ const VkMemoryAllocateFlagsInfo *flags_info = (void *)ext;
+ mem->alloc_flags = flags_info->flags;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ /* From the Vulkan Specification 1.3.261:
+ *
+ * VUID-VkMemoryAllocateInfo-allocationSize-07897
+ *
+ * "If the parameters do not define an import or export operation,
+ * allocationSize must be greater than 0."
+ */
+ if (!mem->import_handle_type && !mem->export_handle_types)
+ assert(pAllocateInfo->allocationSize > 0);
+
+ /* From the Vulkan Specification 1.3.261:
+ *
+ * VUID-VkMemoryAllocateInfo-allocationSize-07899
+ *
+ * "If the parameters define an export operation and the handle type is
+ * not VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID,
+ * allocationSize must be greater than 0."
+ */
+ if (mem->export_handle_types &&
+ mem->export_handle_types !=
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
+ assert(pAllocateInfo->allocationSize > 0);
+
+ if ((mem->export_handle_types &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) &&
+ mem->ahardware_buffer == NULL) {
+ /* If we need to be able to export an Android hardware buffer but none
+ * is provided as an import, create a new one.
+ */
+ mem->ahardware_buffer = vk_alloc_ahardware_buffer(pAllocateInfo);
+ if (mem->ahardware_buffer == NULL) {
+ vk_device_memory_destroy(device, alloc, mem);
+ return NULL;
+ }
+ }
+
+ return mem;
+}
+
+void
+vk_device_memory_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_device_memory *mem)
+{
+
+#if DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26
+ if (mem->ahardware_buffer)
+ AHardwareBuffer_release(mem->ahardware_buffer);
+#endif /* DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26 */
+
+ vk_object_free(device, alloc, mem);
+}
+
+#if DETECT_OS_ANDROID && ANDROID_API_LEVEL >= 26
+VkResult
+vk_common_GetMemoryAndroidHardwareBufferANDROID(
+ VkDevice _device,
+ const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
+ struct AHardwareBuffer **pBuffer)
+{
+ VK_FROM_HANDLE(vk_device_memory, mem, pInfo->memory);
+
+ /* Some quotes from Vulkan spec:
+ *
+ * "If the device memory was created by importing an Android hardware
+ * buffer, vkGetMemoryAndroidHardwareBufferANDROID must return that same
+ * Android hardware buffer object."
+ *
+ * "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
+ * must have been included in VkExportMemoryAllocateInfo::handleTypes
+ * when memory was created."
+ */
+ if (mem->ahardware_buffer) {
+ *pBuffer = mem->ahardware_buffer;
+ /* Increase refcount. */
+ AHardwareBuffer_acquire(*pBuffer);
+ return VK_SUCCESS;
+ }
+
+ return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+}
+#endif
diff --git a/src/vulkan/runtime/vk_device_memory.h b/src/vulkan/runtime/vk_device_memory.h
new file mode 100644
index 00000000000..6e490172011
--- /dev/null
+++ b/src/vulkan/runtime/vk_device_memory.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright © 2023 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_DEVICE_MEMORY_H
+#define VK_DEVICE_MEMORY_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct AHardwareBuffer;
+
+struct vk_device_memory {
+ struct vk_object_base base;
+
+ /* VkMemoryAllocateFlagsInfo::flags */
+ VkMemoryAllocateFlags alloc_flags;
+
+ /* VkMemoryAllocateInfo::allocationSize */
+ VkDeviceSize size;
+
+ /* VkMemoryAllocateInfo::memoryTypeIndex */
+ uint32_t memory_type_index;
+
+ /* Import handle type (if any) */
+ VkExternalMemoryHandleTypeFlags import_handle_type;
+
+ /* VkExportMemoryAllocateInfo::handleTypes */
+ VkExternalMemoryHandleTypeFlags export_handle_types;
+
+ /* VkImportMemoryHostPointerInfoEXT::pHostPointer */
+ void *host_ptr;
+
+ /* VkImportAndroidHardwareBufferInfoANDROID::buffer */
+ struct AHardwareBuffer *ahardware_buffer;
+};
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_device_memory, base, VkDeviceMemory,
+ VK_OBJECT_TYPE_DEVICE_MEMORY);
+
+void *vk_device_memory_create(struct vk_device *device,
+ const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+void vk_device_memory_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_device_memory *mem);
+
+static inline uint64_t
+vk_device_memory_range(const struct vk_device_memory *mem,
+ uint64_t offset, uint64_t range)
+{
+ assert(offset <= mem->size);
+ if (range == VK_WHOLE_SIZE) {
+ return mem->size - offset;
+ } else {
+ assert(range + offset >= range);
+ assert(range + offset <= mem->size);
+ return range;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_DEVICE_MEMORY_H */
diff --git a/src/vulkan/runtime/vk_drm_syncobj.c b/src/vulkan/runtime/vk_drm_syncobj.c
new file mode 100644
index 00000000000..38da5e123cb
--- /dev/null
+++ b/src/vulkan/runtime/vk_drm_syncobj.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_drm_syncobj.h"
+
+#include <sched.h>
+#include <xf86drm.h>
+
+#include "drm-uapi/drm.h"
+
+#include "util/os_time.h"
+
+#include "vk_device.h"
+#include "vk_log.h"
+#include "vk_util.h"
+
+static struct vk_drm_syncobj *
+to_drm_syncobj(struct vk_sync *sync)
+{
+ assert(vk_sync_type_is_drm_syncobj(sync->type));
+ return container_of(sync, struct vk_drm_syncobj, base);
+}
+
+static VkResult
+vk_drm_syncobj_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ uint32_t flags = 0;
+ if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value)
+ flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
+
+ assert(device->drm_fd >= 0);
+ int err = drmSyncobjCreate(device->drm_fd, flags, &sobj->syncobj);
+ if (err < 0) {
+ return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
+ "DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
+ }
+
+ if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) {
+ err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj,
+ &initial_value, 1);
+ if (err < 0) {
+ vk_drm_syncobj_finish(device, sync);
+ return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
+ "DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+vk_drm_syncobj_finish(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
+ assert(err == 0);
+}
+
+static VkResult
+vk_drm_syncobj_signal(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t value)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ int err;
+ if (sync->flags & VK_SYNC_IS_TIMELINE)
+ err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj, &value, 1);
+ else
+ err = drmSyncobjSignal(device->drm_fd, &sobj->syncobj, 1);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_SIGNAL failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_get_value(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t *value)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ int err = drmSyncobjQuery(device->drm_fd, &sobj->syncobj, value, 1);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_QUERY failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_reset(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ int err = drmSyncobjReset(device->drm_fd, &sobj->syncobj, 1);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_RESET failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+sync_has_sync_file(struct vk_device *device, struct vk_sync *sync)
+{
+ uint32_t handle = to_drm_syncobj(sync)->syncobj;
+
+ int fd = -1;
+ int err = drmSyncobjExportSyncFile(device->drm_fd, handle, &fd);
+ if (!err) {
+ close(fd);
+ return VK_SUCCESS;
+ }
+
+ /* On the off chance the sync_file export repeatedly fails for some
+ * unexpected reason, we want to ensure this function will return success
+ * eventually. Do a zero-time syncobj wait if the export failed.
+ */
+ err = drmSyncobjWait(device->drm_fd, &handle, 1, 0 /* timeout */,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ NULL /* first_signaled */);
+ if (!err) {
+ return VK_SUCCESS;
+ } else if (errno == ETIME) {
+ return VK_TIMEOUT;
+ } else {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_WAIT failed: %m");
+ }
+}
+
+static VkResult
+spin_wait_for_sync_file(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ if (wait_flags & VK_SYNC_WAIT_ANY) {
+ while (1) {
+ for (uint32_t i = 0; i < wait_count; i++) {
+ VkResult result = sync_has_sync_file(device, waits[i].sync);
+ if (result != VK_TIMEOUT)
+ return result;
+ }
+
+ if (os_time_get_nano() >= abs_timeout_ns)
+ return VK_TIMEOUT;
+
+ sched_yield();
+ }
+ } else {
+ for (uint32_t i = 0; i < wait_count; i++) {
+ while (1) {
+ VkResult result = sync_has_sync_file(device, waits[i].sync);
+ if (result != VK_TIMEOUT)
+ return result;
+
+ if (os_time_get_nano() >= abs_timeout_ns)
+ return VK_TIMEOUT;
+
+ sched_yield();
+ }
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_wait_many(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ if ((wait_flags & VK_SYNC_WAIT_PENDING) &&
+ !(waits[0].sync->type->features & VK_SYNC_FEATURE_TIMELINE)) {
+ /* Sadly, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE was never implemented
+ * for drivers that don't support timelines. Instead, we have to spin
+ * on DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE until it succeeds.
+ */
+ return spin_wait_for_sync_file(device, wait_count, waits,
+ wait_flags, abs_timeout_ns);
+ }
+
+ /* Syncobj timeouts are signed */
+ abs_timeout_ns = MIN2(abs_timeout_ns, (uint64_t)INT64_MAX);
+
+ STACK_ARRAY(uint32_t, handles, wait_count);
+ STACK_ARRAY(uint64_t, wait_values, wait_count);
+
+ uint32_t j = 0;
+ bool has_timeline = false;
+ for (uint32_t i = 0; i < wait_count; i++) {
+ /* The syncobj API doesn't like wait values of 0 but it's safe to skip
+ * them because a wait for 0 is a no-op.
+ */
+ if (waits[i].sync->flags & VK_SYNC_IS_TIMELINE) {
+ if (waits[i].wait_value == 0)
+ continue;
+
+ has_timeline = true;
+ }
+
+ handles[j] = to_drm_syncobj(waits[i].sync)->syncobj;
+ wait_values[j] = waits[i].wait_value;
+ j++;
+ }
+ assert(j <= wait_count);
+ wait_count = j;
+
+ uint32_t syncobj_wait_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ if (!(wait_flags & VK_SYNC_WAIT_ANY))
+ syncobj_wait_flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
+
+ assert(device->drm_fd >= 0);
+ int err;
+ if (wait_count == 0) {
+ err = 0;
+ } else if (wait_flags & VK_SYNC_WAIT_PENDING) {
+ /* We always use a timeline wait for WAIT_PENDING, even for binary
+ * syncobjs because the non-timeline wait doesn't support
+ * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE.
+ */
+ err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values,
+ wait_count, abs_timeout_ns,
+ syncobj_wait_flags |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
+ NULL /* first_signaled */);
+ } else if (has_timeline) {
+ err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values,
+ wait_count, abs_timeout_ns,
+ syncobj_wait_flags,
+ NULL /* first_signaled */);
+ } else {
+ err = drmSyncobjWait(device->drm_fd, handles,
+ wait_count, abs_timeout_ns,
+ syncobj_wait_flags,
+ NULL /* first_signaled */);
+ }
+
+ STACK_ARRAY_FINISH(handles);
+ STACK_ARRAY_FINISH(wait_values);
+
+ if (err && errno == ETIME) {
+ return VK_TIMEOUT;
+ } else if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_WAIT failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_import_opaque_fd(struct vk_device *device,
+ struct vk_sync *sync,
+ int fd)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ uint32_t new_handle;
+ int err = drmSyncobjFDToHandle(device->drm_fd, fd, &new_handle);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
+ }
+
+ err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
+ assert(!err);
+
+ sobj->syncobj = new_handle;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_export_opaque_fd(struct vk_device *device,
+ struct vk_sync *sync,
+ int *fd)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ int err = drmSyncobjHandleToFD(device->drm_fd, sobj->syncobj, fd);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_import_sync_file(struct vk_device *device,
+ struct vk_sync *sync,
+ int sync_file)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ int err = drmSyncobjImportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_export_sync_file(struct vk_device *device,
+ struct vk_sync *sync,
+ int *sync_file)
+{
+ struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
+
+ assert(device->drm_fd >= 0);
+ int err = drmSyncobjExportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
+ if (err) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_drm_syncobj_move(struct vk_device *device,
+ struct vk_sync *dst,
+ struct vk_sync *src)
+{
+ struct vk_drm_syncobj *dst_sobj = to_drm_syncobj(dst);
+ struct vk_drm_syncobj *src_sobj = to_drm_syncobj(src);
+ VkResult result;
+
+ if (!(dst->flags & VK_SYNC_IS_SHARED) &&
+ !(src->flags & VK_SYNC_IS_SHARED)) {
+ result = vk_drm_syncobj_reset(device, dst);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ uint32_t tmp = dst_sobj->syncobj;
+ dst_sobj->syncobj = src_sobj->syncobj;
+ src_sobj->syncobj = tmp;
+
+ return VK_SUCCESS;
+ } else {
+ int fd;
+ result = vk_drm_syncobj_export_sync_file(device, src, &fd);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = vk_drm_syncobj_import_sync_file(device, dst, fd);
+ if (fd >= 0)
+ close(fd);
+ if (result != VK_SUCCESS)
+ return result;
+
+ return vk_drm_syncobj_reset(device, src);
+ }
+}
+
+struct vk_sync_type
+vk_drm_syncobj_get_type(int drm_fd)
+{
+ uint32_t syncobj = 0;
+ int err = drmSyncobjCreate(drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
+ if (err < 0)
+ return (struct vk_sync_type) { .features = 0 };
+
+ struct vk_sync_type type = {
+ .size = sizeof(struct vk_drm_syncobj),
+ .features = VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_GPU_WAIT |
+ VK_SYNC_FEATURE_CPU_RESET |
+ VK_SYNC_FEATURE_CPU_SIGNAL |
+ VK_SYNC_FEATURE_WAIT_PENDING,
+ .init = vk_drm_syncobj_init,
+ .finish = vk_drm_syncobj_finish,
+ .signal = vk_drm_syncobj_signal,
+ .reset = vk_drm_syncobj_reset,
+ .move = vk_drm_syncobj_move,
+ .import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
+ .export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
+ .import_sync_file = vk_drm_syncobj_import_sync_file,
+ .export_sync_file = vk_drm_syncobj_export_sync_file,
+ };
+
+ err = drmSyncobjWait(drm_fd, &syncobj, 1, 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
+ NULL /* first_signaled */);
+ if (err == 0) {
+ type.wait_many = vk_drm_syncobj_wait_many;
+ type.features |= VK_SYNC_FEATURE_CPU_WAIT |
+ VK_SYNC_FEATURE_WAIT_ANY;
+ }
+
+ uint64_t cap;
+ err = drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap);
+ if (err == 0 && cap != 0) {
+ type.get_value = vk_drm_syncobj_get_value;
+ type.features |= VK_SYNC_FEATURE_TIMELINE;
+ }
+
+ err = drmSyncobjDestroy(drm_fd, syncobj);
+ assert(err == 0);
+
+ return type;
+}
diff --git a/src/vulkan/util/vk_physical_device.h b/src/vulkan/runtime/vk_drm_syncobj.h
index fea39ae9d17..d4987f403da 100644
--- a/src/vulkan/util/vk_physical_device.h
+++ b/src/vulkan/runtime/vk_drm_syncobj.h
@@ -20,40 +20,44 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef VK_PHYSICAL_DEVICE_H
-#define VK_PHYSICAL_DEVICE_H
+#ifndef VK_DRM_SYNCOBJ_H
+#define VK_DRM_SYNCOBJ_H
-#include "vk_dispatch_table.h"
-#include "vk_extensions.h"
-#include "vk_object.h"
+#include "vk_sync.h"
+
+#include "util/macros.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct vk_physical_device {
- struct vk_object_base base;
- struct vk_instance *instance;
+struct vk_drm_syncobj {
+ struct vk_sync base;
+ uint32_t syncobj;
+};
- struct vk_device_extension_table supported_extensions;
+void vk_drm_syncobj_finish(struct vk_device *device,
+ struct vk_sync *sync);
- struct vk_physical_device_dispatch_table dispatch_table;
-};
+static inline bool
+vk_sync_type_is_drm_syncobj(const struct vk_sync_type *type)
+{
+ return type->finish == vk_drm_syncobj_finish;
+}
-VK_DEFINE_HANDLE_CASTS(vk_physical_device, base, VkPhysicalDevice,
- VK_OBJECT_TYPE_PHYSICAL_DEVICE)
+static inline struct vk_drm_syncobj *
+vk_sync_as_drm_syncobj(struct vk_sync *sync)
+{
+ if (!vk_sync_type_is_drm_syncobj(sync->type))
+ return NULL;
-VkResult MUST_CHECK
-vk_physical_device_init(struct vk_physical_device *physical_device,
- struct vk_instance *instance,
- const struct vk_device_extension_table *supported_extensions,
- const struct vk_physical_device_dispatch_table *dispatch_table);
+ return container_of(sync, struct vk_drm_syncobj, base);
+}
-void
-vk_physical_device_finish(struct vk_physical_device *physical_device);
+struct vk_sync_type vk_drm_syncobj_get_type(int drm_fd);
#ifdef __cplusplus
}
#endif
-#endif /* VK_PHYSICAL_DEVICE_H */
+#endif /* VK_DRM_SYNCOBJ_H */
diff --git a/src/vulkan/runtime/vk_fence.c b/src/vulkan/runtime/vk_fence.c
new file mode 100644
index 00000000000..77cb5a3a47f
--- /dev/null
+++ b/src/vulkan/runtime/vk_fence.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_fence.h"
+
+#include "util/os_time.h"
+#include "util/perf/cpu_trace.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_util.h"
+
+static VkExternalFenceHandleTypeFlags
+vk_sync_fence_import_types(const struct vk_sync_type *type)
+{
+ VkExternalFenceHandleTypeFlags handle_types = 0;
+
+ if (type->import_opaque_fd)
+ handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+ if (type->import_sync_file)
+ handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ return handle_types;
+}
+
+static VkExternalFenceHandleTypeFlags
+vk_sync_fence_export_types(const struct vk_sync_type *type)
+{
+ VkExternalFenceHandleTypeFlags handle_types = 0;
+
+ if (type->export_opaque_fd)
+ handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+ if (type->export_sync_file)
+ handle_types |= VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ return handle_types;
+}
+
+static VkExternalFenceHandleTypeFlags
+vk_sync_fence_handle_types(const struct vk_sync_type *type)
+{
+ return vk_sync_fence_export_types(type) &
+ vk_sync_fence_import_types(type);
+}
+
+static const struct vk_sync_type *
+get_fence_sync_type(struct vk_physical_device *pdevice,
+ VkExternalFenceHandleTypeFlags handle_types)
+{
+ static const enum vk_sync_features req_features =
+ VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_CPU_WAIT |
+ VK_SYNC_FEATURE_CPU_RESET;
+
+ for (const struct vk_sync_type *const *t =
+ pdevice->supported_sync_types; *t; t++) {
+ if (req_features & ~(*t)->features)
+ continue;
+
+ if (handle_types & ~vk_sync_fence_handle_types(*t))
+ continue;
+
+ return *t;
+ }
+
+ return NULL;
+}
+
+VkResult
+vk_fence_create(struct vk_device *device,
+ const VkFenceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ struct vk_fence **fence_out)
+{
+ struct vk_fence *fence;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
+
+ const VkExportFenceCreateInfo *export =
+ vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
+ VkExternalFenceHandleTypeFlags handle_types =
+ export ? export->handleTypes : 0;
+
+ const struct vk_sync_type *sync_type =
+ get_fence_sync_type(device->physical, handle_types);
+ if (sync_type == NULL) {
+ /* We should always be able to get a fence type for internal */
+ assert(get_fence_sync_type(device->physical, 0) != NULL);
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "Combination of external handle types is unsupported "
+ "for VkFence creation.");
+ }
+
+ /* Allocate a vk_fence + vk_sync implementation. Because the permanent
+ * field of vk_fence is the base field of the vk_sync implementation, we
+ * can make the 2 structures overlap.
+ */
+ size_t size = offsetof(struct vk_fence, permanent) + sync_type->size;
+ fence = vk_object_zalloc(device, pAllocator, size, VK_OBJECT_TYPE_FENCE);
+ if (fence == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ enum vk_sync_flags sync_flags = 0;
+ if (handle_types)
+ sync_flags |= VK_SYNC_IS_SHAREABLE;
+
+ bool signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
+ VkResult result = vk_sync_init(device, &fence->permanent,
+ sync_type, sync_flags, signaled);
+ if (result != VK_SUCCESS) {
+ vk_object_free(device, pAllocator, fence);
+ return result;
+ }
+
+ *fence_out = fence;
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateFence(VkDevice _device,
+ const VkFenceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFence *pFence)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_fence *fence = NULL;
+
+ VkResult result = vk_fence_create(device, pCreateInfo, pAllocator, &fence);
+ if (result != VK_SUCCESS)
+ return result;
+
+ *pFence = vk_fence_to_handle(fence);
+
+ return VK_SUCCESS;
+}
+
+void
+vk_fence_reset_temporary(struct vk_device *device,
+ struct vk_fence *fence)
+{
+ if (fence->temporary == NULL)
+ return;
+
+ vk_sync_destroy(device, fence->temporary);
+ fence->temporary = NULL;
+}
+
+void
+vk_fence_destroy(struct vk_device *device,
+ struct vk_fence *fence,
+ const VkAllocationCallbacks *pAllocator)
+{
+ vk_fence_reset_temporary(device, fence);
+ vk_sync_finish(device, &fence->permanent);
+
+ vk_object_free(device, pAllocator, fence);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyFence(VkDevice _device,
+ VkFence _fence,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_fence, fence, _fence);
+
+ if (fence == NULL)
+ return;
+
+ vk_fence_destroy(device, fence, pAllocator);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_ResetFences(VkDevice _device,
+ uint32_t fenceCount,
+ const VkFence *pFences)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ VK_FROM_HANDLE(vk_fence, fence, pFences[i]);
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "If any member of pFences currently has its payload imported with
+ * temporary permanence, that fence’s prior permanent payload is
+ * first restored. The remaining operations described therefore
+ * operate on the restored payload."
+ */
+ vk_fence_reset_temporary(device, fence);
+
+ VkResult result = vk_sync_reset(device, &fence->permanent);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetFenceStatus(VkDevice _device,
+ VkFence _fence)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_fence, fence, _fence);
+
+ if (vk_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
+ VkResult result = vk_sync_wait(device, vk_fence_get_active_sync(fence),
+ 0 /* wait_value */,
+ VK_SYNC_WAIT_COMPLETE,
+ 0 /* abs_timeout_ns */);
+ if (result == VK_TIMEOUT)
+ return VK_NOT_READY;
+ else
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_WaitForFences(VkDevice _device,
+ uint32_t fenceCount,
+ const VkFence *pFences,
+ VkBool32 waitAll,
+ uint64_t timeout)
+{
+ MESA_TRACE_FUNC();
+
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ if (vk_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
+ if (fenceCount == 0)
+ return VK_SUCCESS;
+
+ uint64_t abs_timeout_ns = os_time_get_absolute_timeout(timeout);
+
+ STACK_ARRAY(struct vk_sync_wait, waits, fenceCount);
+
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ VK_FROM_HANDLE(vk_fence, fence, pFences[i]);
+ waits[i] = (struct vk_sync_wait) {
+ .sync = vk_fence_get_active_sync(fence),
+ .stage_mask = ~(VkPipelineStageFlags2)0,
+ };
+ }
+
+ enum vk_sync_wait_flags wait_flags = VK_SYNC_WAIT_COMPLETE;
+ if (!waitAll)
+ wait_flags |= VK_SYNC_WAIT_ANY;
+
+ VkResult result = vk_sync_wait_many(device, fenceCount, waits,
+ wait_flags, abs_timeout_ns);
+
+ STACK_ARRAY_FINISH(waits);
+
+ VkResult device_status = vk_device_check_status(device);
+ if (device_status != VK_SUCCESS)
+ return device_status;
+
+ return result;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetPhysicalDeviceExternalFenceProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
+ VkExternalFenceProperties *pExternalFenceProperties)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+
+ assert(pExternalFenceInfo->sType ==
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO);
+ const VkExternalFenceHandleTypeFlagBits handle_type =
+ pExternalFenceInfo->handleType;
+
+ const struct vk_sync_type *sync_type =
+ get_fence_sync_type(pdevice, handle_type);
+ if (sync_type == NULL) {
+ pExternalFenceProperties->exportFromImportedHandleTypes = 0;
+ pExternalFenceProperties->compatibleHandleTypes = 0;
+ pExternalFenceProperties->externalFenceFeatures = 0;
+ return;
+ }
+
+ VkExternalFenceHandleTypeFlagBits import =
+ vk_sync_fence_import_types(sync_type);
+ VkExternalFenceHandleTypeFlagBits export =
+ vk_sync_fence_export_types(sync_type);
+
+ if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT) {
+ const struct vk_sync_type *opaque_sync_type =
+ get_fence_sync_type(pdevice, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT);
+
+ /* If we're a different vk_sync_type than the one selected when only
+ * OPAQUE_FD is set, then we can't import/export OPAQUE_FD. Put
+ * differently, there can only be one OPAQUE_FD sync type.
+ */
+ if (sync_type != opaque_sync_type) {
+ import &= ~VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
+ export &= ~VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT;
+ }
+ }
+
+ VkExternalFenceHandleTypeFlags compatible = import & export;
+ VkExternalFenceFeatureFlags features = 0;
+ if (handle_type & export)
+ features |= VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
+ if (handle_type & import)
+ features |= VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
+
+ pExternalFenceProperties->exportFromImportedHandleTypes = export;
+ pExternalFenceProperties->compatibleHandleTypes = compatible;
+ pExternalFenceProperties->externalFenceFeatures = features;
+}
+
+#ifndef _WIN32
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_ImportFenceFdKHR(VkDevice _device,
+ const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_fence, fence, pImportFenceFdInfo->fence);
+
+ assert(pImportFenceFdInfo->sType ==
+ VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
+
+ const int fd = pImportFenceFdInfo->fd;
+ const VkExternalFenceHandleTypeFlagBits handle_type =
+ pImportFenceFdInfo->handleType;
+
+ struct vk_sync *temporary = NULL, *sync;
+ if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
+ const struct vk_sync_type *sync_type =
+ get_fence_sync_type(device->physical, handle_type);
+
+ VkResult result = vk_sync_create(device, sync_type, 0 /* flags */,
+ 0 /* initial_value */, &temporary);
+ if (result != VK_SUCCESS)
+ return result;
+
+ sync = temporary;
+ } else {
+ sync = &fence->permanent;
+ }
+ assert(handle_type & vk_sync_fence_handle_types(sync->type));
+
+ VkResult result;
+ switch (pImportFenceFdInfo->handleType) {
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ result = vk_sync_import_opaque_fd(device, sync, fd);
+ break;
+
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
+ result = vk_sync_import_sync_file(device, sync, fd);
+ break;
+
+ default:
+ result = vk_error(fence, VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ }
+
+ if (result != VK_SUCCESS) {
+ if (temporary != NULL)
+ vk_sync_destroy(device, temporary);
+ return result;
+ }
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Importing a fence payload from a file descriptor transfers
+ * ownership of the file descriptor from the application to the
+ * Vulkan implementation. The application must not perform any
+ * operations on the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ if (fd != -1)
+ close(fd);
+
+ if (temporary) {
+ vk_fence_reset_temporary(device, fence);
+ fence->temporary = temporary;
+ }
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetFenceFdKHR(VkDevice _device,
+ const VkFenceGetFdInfoKHR *pGetFdInfo,
+ int *pFd)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_fence, fence, pGetFdInfo->fence);
+
+ assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
+
+ struct vk_sync *sync = vk_fence_get_active_sync(fence);
+
+ VkResult result;
+ switch (pGetFdInfo->handleType) {
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ result = vk_sync_export_opaque_fd(device, sync, pFd);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ break;
+
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
+ /* There's no direct spec quote for this but the same rules as for
+ * semaphore export apply. We can't export a sync file from a fence
+ * if the fence event hasn't been submitted to the kernel yet.
+ */
+ if (vk_device_supports_threaded_submit(device)) {
+ result = vk_sync_wait(device, sync, 0,
+ VK_SYNC_WAIT_PENDING,
+ UINT64_MAX);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ result = vk_sync_export_sync_file(device, sync, pFd);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Export operations have the same transference as the specified
+ * handle type’s import operations. Additionally, exporting a fence
+ * payload to a handle with copy transference has the same side
+ * effects on the source fence’s payload as executing a fence reset
+ * operation."
+ *
+ * In other words, exporting a sync file also resets the fence. We
+ * only care about this for the permanent payload because the temporary
+ * payload will be destroyed below.
+ */
+ if (sync == &fence->permanent) {
+ result = vk_sync_reset(device, sync);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+ break;
+
+ default:
+ unreachable("Invalid fence export handle type");
+ }
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Export operations have the same transference as the specified
+ * handle type’s import operations. [...] If the fence was using a
+ * temporarily imported payload, the fence’s prior permanent payload
+ * will be restored.
+ */
+ vk_fence_reset_temporary(device, fence);
+
+ return VK_SUCCESS;
+}
+
+#endif /* !defined(_WIN32) */
diff --git a/src/vulkan/runtime/vk_fence.h b/src/vulkan/runtime/vk_fence.h
new file mode 100644
index 00000000000..12cb1ab315a
--- /dev/null
+++ b/src/vulkan/runtime/vk_fence.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_FENCE_H
+#define VK_FENCE_H
+
+#include "vk_object.h"
+#include "vk_sync.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_sync;
+
+struct vk_fence {
+ struct vk_object_base base;
+
+ /* Temporary fence state.
+ *
+ * A fence *may* have temporary state. That state is added to the fence by
+ * an import operation and is reset back to NULL when the fence is reset.
+ * A fence with temporary state cannot be signaled because the fence must
+ * already be signaled before the temporary state can be exported from the
+ * fence in the other process and imported here.
+ */
+ struct vk_sync *temporary;
+
+ /** Permanent fence state.
+ *
+ * Every fence has some form of permanent state.
+ *
+ * This field must be last
+ */
+ alignas(8) struct vk_sync permanent;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_fence, base, VkFence,
+ VK_OBJECT_TYPE_FENCE);
+
+VkResult vk_fence_create(struct vk_device *device,
+ const VkFenceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ struct vk_fence **fence_out);
+
+void vk_fence_destroy(struct vk_device *device,
+ struct vk_fence *fence,
+ const VkAllocationCallbacks *pAllocator);
+
+void vk_fence_reset_temporary(struct vk_device *device,
+ struct vk_fence *fence);
+
+static inline struct vk_sync *
+vk_fence_get_active_sync(struct vk_fence *fence)
+{
+ return fence->temporary ? fence->temporary : &fence->permanent;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_FENCE_H */
diff --git a/src/vulkan/runtime/vk_format_info_gen.py b/src/vulkan/runtime/vk_format_info_gen.py
new file mode 100644
index 00000000000..29cb4ebe65f
--- /dev/null
+++ b/src/vulkan/runtime/vk_format_info_gen.py
@@ -0,0 +1,245 @@
+COPYRIGHT=u"""
+/* Copyright © 2022 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+import os
+import re
+from collections import namedtuple
+import xml.etree.ElementTree as et
+
+from mako.template import Template
+
+TEMPLATE_H = Template(COPYRIGHT + """\
+/* This file generated from ${filename}, don't edit directly. */
+
+#ifndef VK_FORMAT_INFO_H
+#define VK_FORMAT_INFO_H
+
+#include <vulkan/vulkan_core.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum vk_format_class {
+ MESA_VK_FORMAT_CLASS_UNKNOWN,
+% for name in format_classes:
+ ${to_enum_name('MESA_VK_FORMAT_CLASS_', name)},
+% endfor
+};
+
+struct vk_format_class_info {
+ const VkFormat *formats;
+ uint32_t format_count;
+};
+
+const struct vk_format_class_info *
+vk_format_class_get_info(enum vk_format_class class);
+
+const struct vk_format_class_info *
+vk_format_get_class_info(VkFormat format);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+""")
+
+TEMPLATE_C = Template(COPYRIGHT + """
+/* This file generated from ${filename}, don't edit directly. */
+
+#include "${header}"
+
+#include "util/macros.h"
+
+#include "vk_format.h"
+
+struct vk_format_info {
+ enum vk_format_class class;
+};
+
+% for id, ext in extensions.items():
+static const struct vk_format_info ext${id}_format_infos[] = {
+% for name, format in ext.formats.items():
+ [${format.offset}] = {
+ .class = ${to_enum_name('MESA_VK_FORMAT_CLASS_', format.cls)},
+ },
+% endfor
+};
+
+% endfor
+static const struct vk_format_info *
+vk_format_get_info(VkFormat format)
+{
+ uint32_t extnumber =
+ format < 1000000000 ? 0 : (((format % 1000000000) / 1000) + 1);
+ uint32_t offset = format % 1000;
+
+ switch (extnumber) {
+% for id, ext in extensions.items():
+ case ${id}:
+ assert(offset < ARRAY_SIZE(ext${id}_format_infos));
+ return &ext${id}_format_infos[offset];
+% endfor
+ default:
+ unreachable("Invalid extension");
+ }
+}
+
+% for clsname, cls in format_classes.items():
+% if len(cls.formats) > 0:
+static const VkFormat ${to_enum_name('MESA_VK_FORMAT_CLASS_', clsname).lower() + '_formats'}[] = {
+% for fname in cls.formats:
+ ${fname},
+% endfor
+% endif
+};
+
+% endfor
+static const struct vk_format_class_info class_infos[] = {
+% for clsname, cls in format_classes.items():
+ [${to_enum_name('MESA_VK_FORMAT_CLASS_', clsname)}] = {
+% if len(cls.formats) > 0:
+ .formats = ${to_enum_name('MESA_VK_FORMAT_CLASS_', clsname).lower() + '_formats'},
+ .format_count = ARRAY_SIZE(${to_enum_name('MESA_VK_FORMAT_CLASS_', clsname).lower() + '_formats'}),
+% else:
+ 0
+% endif
+ },
+% endfor
+};
+
+const struct vk_format_class_info *
+vk_format_class_get_info(enum vk_format_class class)
+{
+ assert(class < ARRAY_SIZE(class_infos));
+ return &class_infos[class];
+}
+
+const struct vk_format_class_info *
+vk_format_get_class_info(VkFormat format)
+{
+ const struct vk_format_info *format_info = vk_format_get_info(format);
+ return &class_infos[format_info->class];
+}
+""")
+
+def to_enum_name(prefix, name):
+ return "%s" % prefix + re.sub('([^A-Za-z0-9_])', '_', name).upper()
+
+Format = namedtuple('Format', ['name', 'cls', 'ext', 'offset'])
+FormatClass = namedtuple('FormatClass', ['name', 'formats'])
+Extension = namedtuple('Extension', ['id', 'formats'])
+
+def get_formats(doc):
+ """Extract the formats from the registry."""
+ formats = {}
+
+ for fmt in doc.findall('./formats/format'):
+ xpath = './/enum[@name="{}"]'.format(fmt.attrib['name'])
+ enum = doc.find(xpath)
+ ext = None
+ if 'extends' in enum.attrib:
+ assert(enum.attrib['extends'] == 'VkFormat')
+ if 'extnumber' in enum.attrib:
+ ext = int(enum.attrib['extnumber'])
+ else:
+ xpath = xpath + '/..'
+ parent = doc.find(xpath)
+ while parent != None and ext == None:
+ if parent.tag == 'extension':
+ assert('number' in parent.attrib)
+ ext = parent.attrib['number']
+ xpath = xpath + '/..'
+ parent = doc.find(xpath)
+ offset = int(enum.attrib['offset'])
+ else:
+ ext = 0
+ offset = int(enum.attrib['value'])
+
+ assert(ext != None)
+ format = Format(fmt.attrib['name'], fmt.attrib['class'], ext, offset)
+ formats[format.name] = format
+
+ return formats
+
+def get_formats_from_xml(xml_files):
+ formats = {}
+
+ for filename in xml_files:
+ doc = et.parse(filename)
+ formats.update(get_formats(doc))
+
+ return formats
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--out-c', required=True, help='Output C file.')
+ parser.add_argument('--out-h', required=True, help='Output H file.')
+ parser.add_argument('--xml',
+ help='Vulkan API XML file.',
+ required=True, action='append', dest='xml_files')
+ args = parser.parse_args()
+
+ formats = get_formats_from_xml(args.xml_files)
+ classes = {}
+ extensions = {}
+ for n, f in formats.items():
+ if f.cls not in classes:
+ classes[f.cls] = FormatClass(f.cls, {})
+ classes[f.cls].formats[f.name] = f
+ if f.ext not in extensions:
+ extensions[f.ext] = Extension(f.cls, {})
+ extensions[f.ext].formats[f.name] = f
+
+ assert os.path.dirname(args.out_c) == os.path.dirname(args.out_h)
+
+ environment = {
+ 'header': os.path.basename(args.out_h),
+ 'formats': formats,
+ 'format_classes': classes,
+ 'extensions': extensions,
+ 'filename': os.path.basename(__file__),
+ 'to_enum_name': to_enum_name,
+ }
+
+ try:
+ with open(args.out_h, 'w', encoding='utf-8') as f:
+ guard = os.path.basename(args.out_h).replace('.', '_').upper()
+ f.write(TEMPLATE_H.render(guard=guard, **environment))
+ with open(args.out_c, 'w', encoding='utf-8') as f:
+ f.write(TEMPLATE_C.render(**environment))
+ except Exception:
+ # In the event there's an error, this imports some helpers from mako
+ # to print a useful stack trace and prints it, then exits with
+ # status 1, if python is run with debug; otherwise it just raises
+ # the exception
+ import sys
+ from mako import exceptions
+ print(exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/vulkan/runtime/vk_framebuffer.c b/src/vulkan/runtime/vk_framebuffer.c
new file mode 100644
index 00000000000..f28dce1ffb1
--- /dev/null
+++ b/src/vulkan/runtime/vk_framebuffer.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_framebuffer.h"
+
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateFramebuffer(VkDevice _device,
+ const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFramebuffer *pFramebuffer)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_framebuffer *framebuffer;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
+
+ size_t size = sizeof(*framebuffer);
+
+ /* VK_KHR_imageless_framebuffer extension says:
+ *
+ * If flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT,
+ * parameter pAttachments is ignored.
+ */
+ if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT))
+ size += sizeof(VkImageView) * pCreateInfo->attachmentCount;
+
+ framebuffer = vk_object_alloc(device, pAllocator, size,
+ VK_OBJECT_TYPE_FRAMEBUFFER);
+ if (framebuffer == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ framebuffer->flags = pCreateInfo->flags;
+ framebuffer->width = pCreateInfo->width;
+ framebuffer->height = pCreateInfo->height;
+ framebuffer->layers = pCreateInfo->layers;
+
+ if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) {
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++)
+ framebuffer->attachments[i] = pCreateInfo->pAttachments[i];
+ framebuffer->attachment_count = pCreateInfo->attachmentCount;
+ }
+
+ *pFramebuffer = vk_framebuffer_to_handle(framebuffer);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyFramebuffer(VkDevice _device,
+ VkFramebuffer _framebuffer,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_framebuffer, framebuffer, _framebuffer);
+
+ if (!framebuffer)
+ return;
+
+ vk_object_free(device, pAllocator, framebuffer);
+}
diff --git a/src/vulkan/wsi/wsi_common_x11.h b/src/vulkan/runtime/vk_framebuffer.h
index e4b1e94a8c8..a0f4b61a797 100644
--- a/src/vulkan/wsi/wsi_common_x11.h
+++ b/src/vulkan/runtime/vk_framebuffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2015 Intel Corporation
+ * Copyright © 2021 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,22 +20,35 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef WSI_COMMON_X11_H
-#define WSI_COMMON_X11_H
+#ifndef VK_FRAMEBUFFER_H
+#define VK_FRAMEBUFFER_H
-#include "wsi_common.h"
+#include "vk_object.h"
-VkBool32 wsi_get_physical_device_xcb_presentation_support(
- struct wsi_device *wsi_device,
- uint32_t queueFamilyIndex,
- xcb_connection_t* connection,
- xcb_visualid_t visual_id);
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_framebuffer {
+ struct vk_object_base base;
+
+ /** VkFramebufferCreateInfo::flags */
+ VkFramebufferCreateFlags flags;
+
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
-VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
- const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface);
+ uint32_t attachment_count;
+ VkImageView attachments[];
+};
-VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
- const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface);
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_framebuffer, base, VkFramebuffer,
+ VK_OBJECT_TYPE_FRAMEBUFFER)
+
+#ifdef __cplusplus
+}
#endif
+
+#endif /* VK_FRAMEBUFFER_H */
+
diff --git a/src/vulkan/runtime/vk_graphics_state.c b/src/vulkan/runtime/vk_graphics_state.c
new file mode 100644
index 00000000000..3f875a33d50
--- /dev/null
+++ b/src/vulkan/runtime/vk_graphics_state.c
@@ -0,0 +1,3280 @@
+#include "vk_graphics_state.h"
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+#include "vk_pipeline.h"
+#include "vk_render_pass.h"
+#include "vk_standard_sample_locations.h"
+#include "vk_util.h"
+
+#include <assert.h>
+
+enum mesa_vk_graphics_state_groups {
+ MESA_VK_GRAPHICS_STATE_VERTEX_INPUT_BIT = (1 << 0),
+ MESA_VK_GRAPHICS_STATE_INPUT_ASSEMBLY_BIT = (1 << 1),
+ MESA_VK_GRAPHICS_STATE_TESSELLATION_BIT = (1 << 2),
+ MESA_VK_GRAPHICS_STATE_VIEWPORT_BIT = (1 << 3),
+ MESA_VK_GRAPHICS_STATE_DISCARD_RECTANGLES_BIT = (1 << 4),
+ MESA_VK_GRAPHICS_STATE_RASTERIZATION_BIT = (1 << 5),
+ MESA_VK_GRAPHICS_STATE_FRAGMENT_SHADING_RATE_BIT = (1 << 6),
+ MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT = (1 << 7),
+ MESA_VK_GRAPHICS_STATE_DEPTH_STENCIL_BIT = (1 << 8),
+ MESA_VK_GRAPHICS_STATE_COLOR_BLEND_BIT = (1 << 9),
+ MESA_VK_GRAPHICS_STATE_INPUT_ATTACHMENT_MAP_BIT = (1 << 10),
+ MESA_VK_GRAPHICS_STATE_COLOR_ATTACHMENT_MAP_BIT = (1 << 11),
+ MESA_VK_GRAPHICS_STATE_RENDER_PASS_BIT = (1 << 12),
+};
+
+static void
+clear_all_dynamic_state(BITSET_WORD *dynamic)
+{
+ /* Clear the whole array so there are no undefined bits at the top */
+ memset(dynamic, 0, sizeof(*dynamic) *
+ BITSET_WORDS(MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX));
+}
+
+static void
+get_dynamic_state_groups(BITSET_WORD *dynamic,
+ enum mesa_vk_graphics_state_groups groups)
+{
+ clear_all_dynamic_state(dynamic);
+
+ if (groups & MESA_VK_GRAPHICS_STATE_VERTEX_INPUT_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VI);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VI_BINDINGS_VALID);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VI_BINDING_STRIDES);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_INPUT_ASSEMBLY_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_IA_PRIMITIVE_TOPOLOGY);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_TESSELLATION_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_TS_PATCH_CONTROL_POINTS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_VIEWPORT_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VP_VIEWPORT_COUNT);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VP_VIEWPORTS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VP_SCISSOR_COUNT);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VP_SCISSORS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_DISCARD_RECTANGLES_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DR_RECTANGLES);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DR_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DR_MODE);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_RASTERIZATION_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_RASTERIZER_DISCARD_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_DEPTH_CLAMP_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_DEPTH_CLIP_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_POLYGON_MODE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_CULL_MODE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_FRONT_FACE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_CONSERVATIVE_MODE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_RASTERIZATION_ORDER_AMD);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_PROVOKING_VERTEX);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_RASTERIZATION_STREAM);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_LINE_WIDTH);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_LINE_MODE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_LINE_STIPPLE_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RS_LINE_STIPPLE);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_FRAGMENT_SHADING_RATE_BIT)
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_FSR);
+
+ if (groups & MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_MASK);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_COVERAGE_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_ONE_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_DEPTH_STENCIL_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_BOUNDS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_OP);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_COLOR_BLEND_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_LOGIC_OP_ENABLE);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_LOGIC_OP);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_ATTACHMENT_COUNT);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_BLEND_ENABLES);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_WRITE_MASKS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS);
+ }
+
+ if (groups & MESA_VK_GRAPHICS_STATE_COLOR_ATTACHMENT_MAP_BIT)
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_COLOR_ATTACHMENT_MAP);
+
+ if (groups & MESA_VK_GRAPHICS_STATE_INPUT_ATTACHMENT_MAP_BIT)
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP);
+
+ if (groups & MESA_VK_GRAPHICS_STATE_RENDER_PASS_BIT) {
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_RP_ATTACHMENTS);
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_ATTACHMENT_FEEDBACK_LOOP_ENABLE);
+ }
+}
+
+static enum mesa_vk_graphics_state_groups
+fully_dynamic_state_groups(const BITSET_WORD *dynamic)
+{
+ enum mesa_vk_graphics_state_groups groups = 0;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_VI) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_VI_BINDING_STRIDES) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_VI_BINDINGS_VALID))
+ groups |= MESA_VK_GRAPHICS_STATE_VERTEX_INPUT_BIT;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_TS_PATCH_CONTROL_POINTS) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN))
+ groups |= MESA_VK_GRAPHICS_STATE_TESSELLATION_BIT;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_FSR))
+ groups |= MESA_VK_GRAPHICS_STATE_FRAGMENT_SHADING_RATE_BIT;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_ENABLE) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_BOUNDS) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_OP) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE))
+ groups |= MESA_VK_GRAPHICS_STATE_DEPTH_STENCIL_BIT;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_LOGIC_OP_ENABLE) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_LOGIC_OP) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_ATTACHMENT_COUNT) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_BLEND_ENABLES) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_WRITE_MASKS) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS))
+ groups |= MESA_VK_GRAPHICS_STATE_COLOR_BLEND_BIT;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_COLOR_ATTACHMENT_MAP))
+ groups |= MESA_VK_GRAPHICS_STATE_COLOR_ATTACHMENT_MAP_BIT;
+
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP))
+ groups |= MESA_VK_GRAPHICS_STATE_INPUT_ATTACHMENT_MAP_BIT;
+
+ return groups;
+}
+
+static void
+validate_dynamic_state_groups(const BITSET_WORD *dynamic,
+ enum mesa_vk_graphics_state_groups groups)
+{
+#ifndef NDEBUG
+ BITSET_DECLARE(all_dynamic, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ get_dynamic_state_groups(all_dynamic, groups);
+
+ for (uint32_t w = 0; w < ARRAY_SIZE(all_dynamic); w++)
+ assert(!(dynamic[w] & ~all_dynamic[w]));
+#endif
+}
+
+void
+vk_get_dynamic_graphics_states(BITSET_WORD *dynamic,
+ const VkPipelineDynamicStateCreateInfo *info)
+{
+ clear_all_dynamic_state(dynamic);
+
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * "pDynamicState is a pointer to a VkPipelineDynamicStateCreateInfo
+ * structure defining which properties of the pipeline state object are
+ * dynamic and can be changed independently of the pipeline state. This
+ * can be NULL, which means no state in the pipeline is considered
+ * dynamic."
+ */
+ if (info == NULL)
+ return;
+
+#define CASE(VK, MESA) \
+ case VK_DYNAMIC_STATE_##VK: \
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_##MESA); \
+ break;
+
+#define CASE2(VK, MESA1, MESA2) \
+ case VK_DYNAMIC_STATE_##VK: \
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_##MESA1); \
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_##MESA2); \
+ break;
+
+#define CASE3(VK, MESA1, MESA2, MESA3) \
+ case VK_DYNAMIC_STATE_##VK: \
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_##MESA1); \
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_##MESA2); \
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_##MESA3); \
+ break;
+
+ for (uint32_t i = 0; i < info->dynamicStateCount; i++) {
+ switch (info->pDynamicStates[i]) {
+ CASE3(VERTEX_INPUT_EXT, VI, VI_BINDINGS_VALID, VI_BINDING_STRIDES)
+ CASE( VERTEX_INPUT_BINDING_STRIDE, VI_BINDING_STRIDES)
+ CASE( VIEWPORT, VP_VIEWPORTS)
+ CASE( SCISSOR, VP_SCISSORS)
+ CASE( LINE_WIDTH, RS_LINE_WIDTH)
+ CASE( DEPTH_BIAS, RS_DEPTH_BIAS_FACTORS)
+ CASE( BLEND_CONSTANTS, CB_BLEND_CONSTANTS)
+ CASE( DEPTH_BOUNDS, DS_DEPTH_BOUNDS_TEST_BOUNDS)
+ CASE( STENCIL_COMPARE_MASK, DS_STENCIL_COMPARE_MASK)
+ CASE( STENCIL_WRITE_MASK, DS_STENCIL_WRITE_MASK)
+ CASE( STENCIL_REFERENCE, DS_STENCIL_REFERENCE)
+ CASE( CULL_MODE, RS_CULL_MODE)
+ CASE( FRONT_FACE, RS_FRONT_FACE)
+ CASE( PRIMITIVE_TOPOLOGY, IA_PRIMITIVE_TOPOLOGY)
+ CASE2(VIEWPORT_WITH_COUNT, VP_VIEWPORT_COUNT, VP_VIEWPORTS)
+ CASE2(SCISSOR_WITH_COUNT, VP_SCISSOR_COUNT, VP_SCISSORS)
+ CASE( DEPTH_TEST_ENABLE, DS_DEPTH_TEST_ENABLE)
+ CASE( DEPTH_WRITE_ENABLE, DS_DEPTH_WRITE_ENABLE)
+ CASE( DEPTH_COMPARE_OP, DS_DEPTH_COMPARE_OP)
+ CASE( DEPTH_BOUNDS_TEST_ENABLE, DS_DEPTH_BOUNDS_TEST_ENABLE)
+ CASE( STENCIL_TEST_ENABLE, DS_STENCIL_TEST_ENABLE)
+ CASE( STENCIL_OP, DS_STENCIL_OP)
+ CASE( RASTERIZER_DISCARD_ENABLE, RS_RASTERIZER_DISCARD_ENABLE)
+ CASE( DEPTH_BIAS_ENABLE, RS_DEPTH_BIAS_ENABLE)
+ CASE( PRIMITIVE_RESTART_ENABLE, IA_PRIMITIVE_RESTART_ENABLE)
+ CASE( DISCARD_RECTANGLE_EXT, DR_RECTANGLES)
+ CASE( DISCARD_RECTANGLE_ENABLE_EXT, DR_ENABLE)
+ CASE( DISCARD_RECTANGLE_MODE_EXT, DR_MODE)
+ CASE( SAMPLE_LOCATIONS_EXT, MS_SAMPLE_LOCATIONS)
+ CASE( FRAGMENT_SHADING_RATE_KHR, FSR)
+ CASE( LINE_STIPPLE_EXT, RS_LINE_STIPPLE)
+ CASE( PATCH_CONTROL_POINTS_EXT, TS_PATCH_CONTROL_POINTS)
+ CASE( LOGIC_OP_EXT, CB_LOGIC_OP)
+ CASE( COLOR_WRITE_ENABLE_EXT, CB_COLOR_WRITE_ENABLES)
+ CASE( TESSELLATION_DOMAIN_ORIGIN_EXT, TS_DOMAIN_ORIGIN)
+ CASE( DEPTH_CLAMP_ENABLE_EXT, RS_DEPTH_CLAMP_ENABLE)
+ CASE( POLYGON_MODE_EXT, RS_POLYGON_MODE)
+ CASE( RASTERIZATION_SAMPLES_EXT, MS_RASTERIZATION_SAMPLES)
+ CASE( SAMPLE_MASK_EXT, MS_SAMPLE_MASK)
+ CASE( ALPHA_TO_COVERAGE_ENABLE_EXT, MS_ALPHA_TO_COVERAGE_ENABLE)
+ CASE( ALPHA_TO_ONE_ENABLE_EXT, MS_ALPHA_TO_ONE_ENABLE)
+ CASE( LOGIC_OP_ENABLE_EXT, CB_LOGIC_OP_ENABLE)
+ CASE( COLOR_BLEND_ENABLE_EXT, CB_BLEND_ENABLES)
+ CASE( COLOR_BLEND_EQUATION_EXT, CB_BLEND_EQUATIONS)
+ CASE( COLOR_WRITE_MASK_EXT, CB_WRITE_MASKS)
+ CASE( RASTERIZATION_STREAM_EXT, RS_RASTERIZATION_STREAM)
+ CASE( CONSERVATIVE_RASTERIZATION_MODE_EXT, RS_CONSERVATIVE_MODE)
+ CASE( DEPTH_CLIP_ENABLE_EXT, RS_DEPTH_CLIP_ENABLE)
+ CASE( SAMPLE_LOCATIONS_ENABLE_EXT, MS_SAMPLE_LOCATIONS_ENABLE)
+ CASE( PROVOKING_VERTEX_MODE_EXT, RS_PROVOKING_VERTEX)
+ CASE( LINE_RASTERIZATION_MODE_EXT, RS_LINE_MODE)
+ CASE( LINE_STIPPLE_ENABLE_EXT, RS_LINE_STIPPLE_ENABLE)
+ CASE( DEPTH_CLIP_NEGATIVE_ONE_TO_ONE_EXT, VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE)
+ CASE( ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT, ATTACHMENT_FEEDBACK_LOOP_ENABLE)
+ default:
+ unreachable("Unsupported dynamic graphics state");
+ }
+ }
+
+ /* attachmentCount is ignored if all of the states using it are dyanmic.
+ *
+ * TODO: Handle advanced blending here when supported.
+ */
+ if (BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_BLEND_ENABLES) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS) &&
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_CB_WRITE_MASKS))
+ BITSET_SET(dynamic, MESA_VK_DYNAMIC_CB_ATTACHMENT_COUNT);
+}
+
+#define IS_DYNAMIC(STATE) \
+ BITSET_TEST(dynamic, MESA_VK_DYNAMIC_##STATE)
+
+#define IS_NEEDED(STATE) \
+ BITSET_TEST(needed, MESA_VK_DYNAMIC_##STATE)
+
+static void
+vk_vertex_input_state_init(struct vk_vertex_input_state *vi,
+ const BITSET_WORD *dynamic,
+ const VkPipelineVertexInputStateCreateInfo *vi_info)
+{
+ assert(!IS_DYNAMIC(VI));
+
+ memset(vi, 0, sizeof(*vi));
+ if (!vi_info)
+ return;
+
+ for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
+ const VkVertexInputBindingDescription *desc =
+ &vi_info->pVertexBindingDescriptions[i];
+
+ assert(desc->binding < MESA_VK_MAX_VERTEX_BINDINGS);
+ assert(desc->stride <= MESA_VK_MAX_VERTEX_BINDING_STRIDE);
+ assert(desc->inputRate <= 1);
+
+ const uint32_t b = desc->binding;
+ vi->bindings_valid |= BITFIELD_BIT(b);
+ vi->bindings[b].stride = desc->stride;
+ vi->bindings[b].input_rate = desc->inputRate;
+ vi->bindings[b].divisor = 1;
+ }
+
+ for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
+ const VkVertexInputAttributeDescription *desc =
+ &vi_info->pVertexAttributeDescriptions[i];
+
+ assert(desc->location < MESA_VK_MAX_VERTEX_ATTRIBUTES);
+ assert(desc->binding < MESA_VK_MAX_VERTEX_BINDINGS);
+ assert(vi->bindings_valid & BITFIELD_BIT(desc->binding));
+
+ const uint32_t a = desc->location;
+ vi->attributes_valid |= BITFIELD_BIT(a);
+ vi->attributes[a].binding = desc->binding;
+ vi->attributes[a].format = desc->format;
+ vi->attributes[a].offset = desc->offset;
+ }
+
+ const VkPipelineVertexInputDivisorStateCreateInfoKHR *vi_div_state =
+ vk_find_struct_const(vi_info->pNext,
+ PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR);
+ if (vi_div_state) {
+ for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
+ const VkVertexInputBindingDivisorDescriptionKHR *desc =
+ &vi_div_state->pVertexBindingDivisors[i];
+
+ assert(desc->binding < MESA_VK_MAX_VERTEX_BINDINGS);
+ assert(vi->bindings_valid & BITFIELD_BIT(desc->binding));
+
+ const uint32_t b = desc->binding;
+ vi->bindings[b].divisor = desc->divisor;
+ }
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_vi(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_vertex_input_state *vi)
+{
+ if (IS_NEEDED(VI))
+ *dst->vi = *vi;
+
+ if (IS_NEEDED(VI_BINDINGS_VALID))
+ dst->vi_bindings_valid = vi->bindings_valid;
+
+ if (IS_NEEDED(VI_BINDING_STRIDES)) {
+ for (uint32_t b = 0; b < MESA_VK_MAX_VERTEX_BINDINGS; b++) {
+ if (vi->bindings_valid & BITFIELD_BIT(b))
+ dst->vi_binding_strides[b] = vi->bindings[b].stride;
+ else
+ dst->vi_binding_strides[b] = 0;
+ }
+ }
+}
+
+static void
+vk_input_assembly_state_init(struct vk_input_assembly_state *ia,
+ const BITSET_WORD *dynamic,
+ const VkPipelineInputAssemblyStateCreateInfo *ia_info)
+{
+ memset(ia, 0, sizeof(*ia));
+ if (!ia_info)
+ return;
+
+ /* From the Vulkan 1.3.224 spec:
+ *
+ * "VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY specifies that the topology
+ * state in VkPipelineInputAssemblyStateCreateInfo only specifies the
+ * topology class, and the specific topology order and adjacency must be
+ * set dynamically with vkCmdSetPrimitiveTopology before any drawing
+ * commands."
+ */
+ assert(ia_info->topology <= UINT8_MAX);
+ ia->primitive_topology = ia_info->topology;
+
+ ia->primitive_restart_enable = ia_info->primitiveRestartEnable;
+}
+
+static void
+vk_dynamic_graphics_state_init_ia(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_input_assembly_state *ia)
+{
+ dst->ia = *ia;
+}
+
+static void
+vk_tessellation_state_init(struct vk_tessellation_state *ts,
+ const BITSET_WORD *dynamic,
+ const VkPipelineTessellationStateCreateInfo *ts_info)
+{
+ *ts = (struct vk_tessellation_state) {
+ .domain_origin = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT,
+ };
+ if (!ts_info)
+ return;
+
+ if (!IS_DYNAMIC(TS_PATCH_CONTROL_POINTS)) {
+ assert(ts_info->patchControlPoints <= UINT8_MAX);
+ ts->patch_control_points = ts_info->patchControlPoints;
+ }
+
+ if (!IS_DYNAMIC(TS_DOMAIN_ORIGIN)) {
+ const VkPipelineTessellationDomainOriginStateCreateInfo *ts_do_info =
+ vk_find_struct_const(ts_info->pNext,
+ PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
+ if (ts_do_info != NULL) {
+ assert(ts_do_info->domainOrigin <= UINT8_MAX);
+ ts->domain_origin = ts_do_info->domainOrigin;
+ }
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_ts(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_tessellation_state *ts)
+{
+ dst->ts = *ts;
+}
+
+static void
+vk_viewport_state_init(struct vk_viewport_state *vp,
+ const BITSET_WORD *dynamic,
+ const VkPipelineViewportStateCreateInfo *vp_info)
+{
+ memset(vp, 0, sizeof(*vp));
+ if (!vp_info)
+ return;
+
+ if (!IS_DYNAMIC(VP_VIEWPORT_COUNT)) {
+ assert(vp_info->viewportCount <= MESA_VK_MAX_VIEWPORTS);
+ vp->viewport_count = vp_info->viewportCount;
+ }
+
+ if (!IS_DYNAMIC(VP_VIEWPORTS)) {
+ assert(!IS_DYNAMIC(VP_VIEWPORT_COUNT));
+ typed_memcpy(vp->viewports, vp_info->pViewports,
+ vp_info->viewportCount);
+ }
+
+ if (!IS_DYNAMIC(VP_SCISSOR_COUNT)) {
+ assert(vp_info->scissorCount <= MESA_VK_MAX_SCISSORS);
+ vp->scissor_count = vp_info->scissorCount;
+ }
+
+ if (!IS_DYNAMIC(VP_SCISSORS)) {
+ assert(!IS_DYNAMIC(VP_SCISSOR_COUNT));
+ typed_memcpy(vp->scissors, vp_info->pScissors,
+ vp_info->scissorCount);
+ }
+
+ if (!IS_DYNAMIC(VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE)) {
+ const VkPipelineViewportDepthClipControlCreateInfoEXT *vp_dcc_info =
+ vk_find_struct_const(vp_info->pNext,
+ PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT);
+ if (vp_dcc_info != NULL)
+ vp->depth_clip_negative_one_to_one = vp_dcc_info->negativeOneToOne;
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_vp(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_viewport_state *vp)
+{
+ dst->vp.viewport_count = vp->viewport_count;
+ if (IS_NEEDED(VP_VIEWPORTS))
+ typed_memcpy(dst->vp.viewports, vp->viewports, vp->viewport_count);
+
+ dst->vp.scissor_count = vp->scissor_count;
+ if (IS_NEEDED(VP_SCISSORS))
+ typed_memcpy(dst->vp.scissors, vp->scissors, vp->scissor_count);
+
+ dst->vp.depth_clip_negative_one_to_one = vp->depth_clip_negative_one_to_one;
+}
+
+static void
+vk_discard_rectangles_state_init(struct vk_discard_rectangles_state *dr,
+ const BITSET_WORD *dynamic,
+ const VkPipelineDiscardRectangleStateCreateInfoEXT *dr_info)
+{
+ memset(dr, 0, sizeof(*dr));
+
+ if (dr_info == NULL)
+ return;
+
+ assert(dr_info->discardRectangleCount <= MESA_VK_MAX_DISCARD_RECTANGLES);
+ dr->mode = dr_info->discardRectangleMode;
+ dr->rectangle_count = dr_info->discardRectangleCount;
+
+ if (!IS_DYNAMIC(DR_RECTANGLES)) {
+ typed_memcpy(dr->rectangles, dr_info->pDiscardRectangles,
+ dr_info->discardRectangleCount);
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_dr(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_discard_rectangles_state *dr)
+{
+ dst->dr.enable = dr->rectangle_count > 0;
+ dst->dr.mode = dr->mode;
+ dst->dr.rectangle_count = dr->rectangle_count;
+ typed_memcpy(dst->dr.rectangles, dr->rectangles, dr->rectangle_count);
+}
+
+static void
+vk_rasterization_state_init(struct vk_rasterization_state *rs,
+ const BITSET_WORD *dynamic,
+ const VkPipelineRasterizationStateCreateInfo *rs_info)
+{
+ *rs = (struct vk_rasterization_state) {
+ .rasterizer_discard_enable = false,
+ .conservative_mode = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT,
+ .extra_primitive_overestimation_size = 0.0f,
+ .rasterization_order_amd = VK_RASTERIZATION_ORDER_STRICT_AMD,
+ .provoking_vertex = VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT,
+ .line.mode = VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR,
+ .depth_clip_enable = IS_DYNAMIC(RS_DEPTH_CLAMP_ENABLE) ? VK_MESA_DEPTH_CLIP_ENABLE_NOT_CLAMP : VK_MESA_DEPTH_CLIP_ENABLE_FALSE,
+ .depth_bias.representation = VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORMAT_EXT,
+ .depth_bias.exact = false,
+ };
+ if (!rs_info)
+ return;
+
+ if (!IS_DYNAMIC(RS_RASTERIZER_DISCARD_ENABLE))
+ rs->rasterizer_discard_enable = rs_info->rasterizerDiscardEnable;
+
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * "If VkPipelineRasterizationDepthClipStateCreateInfoEXT is present in
+ * the graphics pipeline state then depth clipping is disabled if
+ * VkPipelineRasterizationDepthClipStateCreateInfoEXT::depthClipEnable
+ * is VK_FALSE. Otherwise, if
+ * VkPipelineRasterizationDepthClipStateCreateInfoEXT is not present,
+ * depth clipping is disabled when
+ * VkPipelineRasterizationStateCreateInfo::depthClampEnable is VK_TRUE.
+ */
+ if (!IS_DYNAMIC(RS_DEPTH_CLAMP_ENABLE)) {
+ rs->depth_clamp_enable = rs_info->depthClampEnable;
+ rs->depth_clip_enable = rs_info->depthClampEnable ?
+ VK_MESA_DEPTH_CLIP_ENABLE_FALSE :
+ VK_MESA_DEPTH_CLIP_ENABLE_TRUE;
+ }
+
+ rs->polygon_mode = rs_info->polygonMode;
+
+ rs->cull_mode = rs_info->cullMode;
+ rs->front_face = rs_info->frontFace;
+ rs->depth_bias.enable = rs_info->depthBiasEnable;
+ if ((rs_info->depthBiasEnable || IS_DYNAMIC(RS_DEPTH_BIAS_ENABLE)) &&
+ !IS_DYNAMIC(RS_DEPTH_BIAS_FACTORS)) {
+ rs->depth_bias.constant = rs_info->depthBiasConstantFactor;
+ rs->depth_bias.clamp = rs_info->depthBiasClamp;
+ rs->depth_bias.slope = rs_info->depthBiasSlopeFactor;
+ }
+ rs->line.width = rs_info->lineWidth;
+
+ vk_foreach_struct_const(ext, rs_info->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT: {
+ const VkPipelineRasterizationConservativeStateCreateInfoEXT *rcs_info =
+ (const VkPipelineRasterizationConservativeStateCreateInfoEXT *)ext;
+ rs->conservative_mode = rcs_info->conservativeRasterizationMode;
+ rs->extra_primitive_overestimation_size =
+ rcs_info->extraPrimitiveOverestimationSize;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT: {
+ const VkPipelineRasterizationDepthClipStateCreateInfoEXT *rdc_info =
+ (const VkPipelineRasterizationDepthClipStateCreateInfoEXT *)ext;
+ rs->depth_clip_enable = rdc_info->depthClipEnable ?
+ VK_MESA_DEPTH_CLIP_ENABLE_TRUE :
+ VK_MESA_DEPTH_CLIP_ENABLE_FALSE;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT: {
+ const VkPipelineRasterizationLineStateCreateInfoKHR *rl_info =
+ (const VkPipelineRasterizationLineStateCreateInfoKHR *)ext;
+ rs->line.mode = rl_info->lineRasterizationMode;
+ if (!IS_DYNAMIC(RS_LINE_STIPPLE_ENABLE))
+ rs->line.stipple.enable = rl_info->stippledLineEnable;
+ if ((IS_DYNAMIC(RS_LINE_STIPPLE_ENABLE) || rs->line.stipple.enable) && !IS_DYNAMIC(RS_LINE_STIPPLE)) {
+ rs->line.stipple.factor = rl_info->lineStippleFactor;
+ rs->line.stipple.pattern = rl_info->lineStipplePattern;
+ }
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT: {
+ const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *rpv_info =
+ (const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *)ext;
+ rs->provoking_vertex = rpv_info->provokingVertexMode;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD: {
+ const VkPipelineRasterizationStateRasterizationOrderAMD *rro_info =
+ (const VkPipelineRasterizationStateRasterizationOrderAMD *)ext;
+ rs->rasterization_order_amd = rro_info->rasterizationOrder;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT: {
+ const VkPipelineRasterizationStateStreamCreateInfoEXT *rss_info =
+ (const VkPipelineRasterizationStateStreamCreateInfoEXT *)ext;
+ rs->rasterization_stream = rss_info->rasterizationStream;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_DEPTH_BIAS_REPRESENTATION_INFO_EXT: {
+ const VkDepthBiasRepresentationInfoEXT *dbr_info =
+ (const VkDepthBiasRepresentationInfoEXT *)ext;
+ if (!IS_DYNAMIC(RS_DEPTH_BIAS_FACTORS)) {
+ rs->depth_bias.representation = dbr_info->depthBiasRepresentation;
+ rs->depth_bias.exact = dbr_info->depthBiasExact;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_rs(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_rasterization_state *rs)
+{
+ dst->rs = *rs;
+}
+
+static void
+vk_fragment_shading_rate_state_init(
+ struct vk_fragment_shading_rate_state *fsr,
+ const BITSET_WORD *dynamic,
+ const VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_info)
+{
+ if (fsr_info != NULL) {
+ fsr->fragment_size = fsr_info->fragmentSize;
+ fsr->combiner_ops[0] = fsr_info->combinerOps[0];
+ fsr->combiner_ops[1] = fsr_info->combinerOps[1];
+ } else {
+ fsr->fragment_size = (VkExtent2D) { 1, 1 };
+ fsr->combiner_ops[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR;
+ fsr->combiner_ops[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR;
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_fsr(
+ struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_fragment_shading_rate_state *fsr)
+{
+ dst->fsr = *fsr;
+}
+
+static void
+vk_sample_locations_state_init(struct vk_sample_locations_state *sl,
+ const VkSampleLocationsInfoEXT *sl_info)
+{
+ sl->per_pixel = sl_info->sampleLocationsPerPixel;
+ sl->grid_size = sl_info->sampleLocationGridSize;
+
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527
+ *
+ * "sampleLocationsCount must equal sampleLocationsPerPixel *
+ * sampleLocationGridSize.width * sampleLocationGridSize.height"
+ */
+ assert(sl_info->sampleLocationsCount ==
+ sl_info->sampleLocationsPerPixel *
+ sl_info->sampleLocationGridSize.width *
+ sl_info->sampleLocationGridSize.height);
+
+ assert(sl_info->sampleLocationsCount <= MESA_VK_MAX_SAMPLE_LOCATIONS);
+ typed_memcpy(sl->locations, sl_info->pSampleLocations,
+ sl_info->sampleLocationsCount);
+}
+
+static void
+vk_multisample_state_init(struct vk_multisample_state *ms,
+ const BITSET_WORD *dynamic,
+ const VkPipelineMultisampleStateCreateInfo *ms_info)
+{
+ memset(ms, 0, sizeof(*ms));
+ if (!ms_info)
+ return;
+
+ if (!IS_DYNAMIC(MS_RASTERIZATION_SAMPLES)) {
+ assert(ms_info->rasterizationSamples <= MESA_VK_MAX_SAMPLES);
+ ms->rasterization_samples = ms_info->rasterizationSamples;
+ }
+
+ ms->sample_shading_enable = ms_info->sampleShadingEnable;
+ ms->min_sample_shading = ms_info->minSampleShading;
+
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * "If pSampleMask is NULL, it is treated as if the mask has all bits
+ * set to 1."
+ */
+ ms->sample_mask = ms_info->pSampleMask ? *ms_info->pSampleMask : ~0;
+
+ ms->alpha_to_coverage_enable = ms_info->alphaToCoverageEnable;
+ ms->alpha_to_one_enable = ms_info->alphaToOneEnable;
+
+ /* These get filled in by vk_multisample_sample_locations_state_init() */
+ ms->sample_locations_enable = false;
+ ms->sample_locations = NULL;
+}
+
+static bool
+needs_sample_locations_state(
+ const BITSET_WORD *dynamic,
+ const VkPipelineSampleLocationsStateCreateInfoEXT *sl_info)
+{
+ return !IS_DYNAMIC(MS_SAMPLE_LOCATIONS) &&
+ (IS_DYNAMIC(MS_SAMPLE_LOCATIONS_ENABLE) ||
+ (sl_info != NULL && sl_info->sampleLocationsEnable));
+}
+
+static void
+vk_multisample_sample_locations_state_init(
+ struct vk_multisample_state *ms,
+ struct vk_sample_locations_state *sl,
+ const BITSET_WORD *dynamic,
+ const VkPipelineMultisampleStateCreateInfo *ms_info,
+ const VkPipelineSampleLocationsStateCreateInfoEXT *sl_info)
+{
+ ms->sample_locations_enable =
+ IS_DYNAMIC(MS_SAMPLE_LOCATIONS_ENABLE) ||
+ (sl_info != NULL && sl_info->sampleLocationsEnable);
+
+ assert(ms->sample_locations == NULL);
+ if (!IS_DYNAMIC(MS_SAMPLE_LOCATIONS)) {
+ if (ms->sample_locations_enable) {
+ vk_sample_locations_state_init(sl, &sl_info->sampleLocationsInfo);
+ ms->sample_locations = sl;
+ } else if (!IS_DYNAMIC(MS_RASTERIZATION_SAMPLES)) {
+ /* Otherwise, pre-populate with the standard sample locations. If
+ * the driver doesn't support standard sample locations, it probably
+ * doesn't support custom locations either and can completely ignore
+ * this state.
+ */
+ ms->sample_locations =
+ vk_standard_sample_locations_state(ms_info->rasterizationSamples);
+ }
+ /* In the case that the rasterization samples are dynamic we cannot
+ * pre-populate with a specific set of standard sample locations
+ */
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_ms(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_multisample_state *ms)
+{
+ dst->ms.rasterization_samples = ms->rasterization_samples;
+ dst->ms.sample_mask = ms->sample_mask;
+ dst->ms.alpha_to_coverage_enable = ms->alpha_to_coverage_enable;
+ dst->ms.alpha_to_one_enable = ms->alpha_to_one_enable;
+ dst->ms.sample_locations_enable = ms->sample_locations_enable;
+
+ if (IS_NEEDED(MS_SAMPLE_LOCATIONS) && ms->sample_locations)
+ *dst->ms.sample_locations = *ms->sample_locations;
+}
+
+static void
+vk_stencil_test_face_state_init(struct vk_stencil_test_face_state *face,
+ const VkStencilOpState *info)
+{
+ face->op.fail = info->failOp;
+ face->op.pass = info->passOp;
+ face->op.depth_fail = info->depthFailOp;
+ face->op.compare = info->compareOp;
+ face->compare_mask = info->compareMask;
+ face->write_mask = info->writeMask;
+ face->reference = info->reference;
+}
+
+static void
+vk_depth_stencil_state_init(struct vk_depth_stencil_state *ds,
+ const BITSET_WORD *dynamic,
+ const VkPipelineDepthStencilStateCreateInfo *ds_info)
+{
+ *ds = (struct vk_depth_stencil_state) {
+ .stencil.write_enable = true,
+ };
+ if (!ds_info)
+ return;
+
+ ds->depth.test_enable = ds_info->depthTestEnable;
+ ds->depth.write_enable = ds_info->depthWriteEnable;
+ ds->depth.compare_op = ds_info->depthCompareOp;
+ ds->depth.bounds_test.enable = ds_info->depthBoundsTestEnable;
+ ds->depth.bounds_test.min = ds_info->minDepthBounds;
+ ds->depth.bounds_test.max = ds_info->maxDepthBounds;
+ ds->stencil.test_enable = ds_info->stencilTestEnable;
+ vk_stencil_test_face_state_init(&ds->stencil.front, &ds_info->front);
+ vk_stencil_test_face_state_init(&ds->stencil.back, &ds_info->back);
+}
+
+static void
+vk_dynamic_graphics_state_init_ds(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_depth_stencil_state *ds)
+{
+ dst->ds = *ds;
+}
+
+static bool
+optimize_stencil_face(struct vk_stencil_test_face_state *face,
+ VkCompareOp depthCompareOp,
+ bool consider_write_mask)
+{
+ /* If compareOp is ALWAYS then the stencil test will never fail and failOp
+ * will never happen. Set failOp to KEEP in this case.
+ */
+ if (face->op.compare == VK_COMPARE_OP_ALWAYS)
+ face->op.fail = VK_STENCIL_OP_KEEP;
+
+ /* If compareOp is NEVER or depthCompareOp is NEVER then one of the depth
+ * or stencil tests will fail and passOp will never happen.
+ */
+ if (face->op.compare == VK_COMPARE_OP_NEVER ||
+ depthCompareOp == VK_COMPARE_OP_NEVER)
+ face->op.pass = VK_STENCIL_OP_KEEP;
+
+ /* If compareOp is NEVER or depthCompareOp is ALWAYS then either the
+ * stencil test will fail or the depth test will pass. In either case,
+ * depthFailOp will never happen.
+ */
+ if (face->op.compare == VK_COMPARE_OP_NEVER ||
+ depthCompareOp == VK_COMPARE_OP_ALWAYS)
+ face->op.depth_fail = VK_STENCIL_OP_KEEP;
+
+ /* If the write mask is zero, nothing will be written to the stencil buffer
+ * so it's as if all operations are KEEP.
+ */
+ if (consider_write_mask && face->write_mask == 0) {
+ face->op.pass = VK_STENCIL_OP_KEEP;
+ face->op.fail = VK_STENCIL_OP_KEEP;
+ face->op.depth_fail = VK_STENCIL_OP_KEEP;
+ }
+
+ return face->op.fail != VK_STENCIL_OP_KEEP ||
+ face->op.depth_fail != VK_STENCIL_OP_KEEP ||
+ face->op.pass != VK_STENCIL_OP_KEEP;
+}
+
+void
+vk_optimize_depth_stencil_state(struct vk_depth_stencil_state *ds,
+ VkImageAspectFlags ds_aspects,
+ bool consider_write_mask)
+{
+ /* stencil.write_enable is a dummy right now that should always be true */
+ assert(ds->stencil.write_enable);
+
+ /* From the Vulkan 1.3.221 spec:
+ *
+ * "If there is no depth attachment then the depth test is skipped."
+ */
+ if (!(ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
+ ds->depth.test_enable = false;
+
+ /* From the Vulkan 1.3.221 spec:
+ *
+ * "...or if there is no stencil attachment, the coverage mask is
+ * unmodified by this operation."
+ */
+ if (!(ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT))
+ ds->stencil.test_enable = false;
+
+ /* If the depth test is disabled, we won't be writing anything. Make sure we
+ * treat the test as always passing later on as well.
+ */
+ if (!ds->depth.test_enable) {
+ ds->depth.write_enable = false;
+ ds->depth.compare_op = VK_COMPARE_OP_ALWAYS;
+ }
+
+ /* If the stencil test is disabled, we won't be writing anything. Make sure
+ * we treat the test as always passing later on as well.
+ */
+ if (!ds->stencil.test_enable) {
+ ds->stencil.write_enable = false;
+ ds->stencil.front.op.compare = VK_COMPARE_OP_ALWAYS;
+ ds->stencil.back.op.compare = VK_COMPARE_OP_ALWAYS;
+ }
+
+ /* If the stencil test is enabled and always fails, then we will never get
+ * to the depth test so we can just disable the depth test entirely.
+ */
+ if (ds->stencil.test_enable &&
+ ds->stencil.front.op.compare == VK_COMPARE_OP_NEVER &&
+ ds->stencil.back.op.compare == VK_COMPARE_OP_NEVER) {
+ ds->depth.test_enable = false;
+ ds->depth.write_enable = false;
+ }
+
+ /* If depthCompareOp is EQUAL then the value we would be writing to the
+ * depth buffer is the same as the value that's already there so there's no
+ * point in writing it.
+ */
+ if (ds->depth.compare_op == VK_COMPARE_OP_EQUAL)
+ ds->depth.write_enable = false;
+
+ /* If the stencil ops are such that we don't actually ever modify the
+ * stencil buffer, we should disable writes.
+ */
+ if (!optimize_stencil_face(&ds->stencil.front, ds->depth.compare_op,
+ consider_write_mask) &&
+ !optimize_stencil_face(&ds->stencil.back, ds->depth.compare_op,
+ consider_write_mask))
+ ds->stencil.write_enable = false;
+
+ /* If the depth test always passes and we never write out depth, that's the
+ * same as if the depth test is disabled entirely.
+ */
+ if (ds->depth.compare_op == VK_COMPARE_OP_ALWAYS && !ds->depth.write_enable)
+ ds->depth.test_enable = false;
+
+ /* If the stencil test always passes and we never write out stencil, that's
+ * the same as if the stencil test is disabled entirely.
+ */
+ if (ds->stencil.front.op.compare == VK_COMPARE_OP_ALWAYS &&
+ ds->stencil.back.op.compare == VK_COMPARE_OP_ALWAYS &&
+ !ds->stencil.write_enable)
+ ds->stencil.test_enable = false;
+}
+
+static void
+vk_color_blend_state_init(struct vk_color_blend_state *cb,
+ const BITSET_WORD *dynamic,
+ const VkPipelineColorBlendStateCreateInfo *cb_info)
+{
+ *cb = (struct vk_color_blend_state) {
+ .color_write_enables = BITFIELD_MASK(MESA_VK_MAX_COLOR_ATTACHMENTS),
+ };
+ if (!cb_info)
+ return;
+
+ cb->logic_op_enable = cb_info->logicOpEnable;
+ cb->logic_op = cb_info->logicOp;
+
+ assert(cb_info->attachmentCount <= MESA_VK_MAX_COLOR_ATTACHMENTS);
+ cb->attachment_count = cb_info->attachmentCount;
+ /* pAttachments is ignored if any of these is not set */
+ bool full_dynamic = IS_DYNAMIC(CB_BLEND_ENABLES) && IS_DYNAMIC(CB_BLEND_EQUATIONS) && IS_DYNAMIC(CB_WRITE_MASKS);
+ for (uint32_t a = 0; a < cb_info->attachmentCount; a++) {
+ const VkPipelineColorBlendAttachmentState *att = full_dynamic ? NULL : &cb_info->pAttachments[a];
+
+ cb->attachments[a] = (struct vk_color_blend_attachment_state) {
+ .blend_enable = IS_DYNAMIC(CB_BLEND_ENABLES) || att->blendEnable,
+ .src_color_blend_factor = IS_DYNAMIC(CB_BLEND_EQUATIONS) ? 0 : att->srcColorBlendFactor,
+ .dst_color_blend_factor = IS_DYNAMIC(CB_BLEND_EQUATIONS) ? 0 : att->dstColorBlendFactor,
+ .src_alpha_blend_factor = IS_DYNAMIC(CB_BLEND_EQUATIONS) ? 0 : att->srcAlphaBlendFactor,
+ .dst_alpha_blend_factor = IS_DYNAMIC(CB_BLEND_EQUATIONS) ? 0 : att->dstAlphaBlendFactor,
+ .write_mask = IS_DYNAMIC(CB_WRITE_MASKS) ? 0xf : att->colorWriteMask,
+ .color_blend_op = IS_DYNAMIC(CB_BLEND_EQUATIONS) ? 0 : att->colorBlendOp,
+ .alpha_blend_op = IS_DYNAMIC(CB_BLEND_EQUATIONS) ? 0 : att->alphaBlendOp,
+ };
+ }
+
+ for (uint32_t i = 0; i < 4; i++)
+ cb->blend_constants[i] = cb_info->blendConstants[i];
+
+ const VkPipelineColorWriteCreateInfoEXT *cw_info =
+ vk_find_struct_const(cb_info->pNext, PIPELINE_COLOR_WRITE_CREATE_INFO_EXT);
+ if (!IS_DYNAMIC(CB_COLOR_WRITE_ENABLES) && cw_info != NULL) {
+ uint8_t color_write_enables = 0;
+ assert(cb_info->attachmentCount == cw_info->attachmentCount);
+ for (uint32_t a = 0; a < cw_info->attachmentCount; a++) {
+ if (cw_info->pColorWriteEnables[a])
+ color_write_enables |= BITFIELD_BIT(a);
+ }
+ cb->color_write_enables = color_write_enables;
+ } else {
+ cb->color_write_enables = BITFIELD_MASK(MESA_VK_MAX_COLOR_ATTACHMENTS);
+ }
+}
+
+static void
+vk_input_attachment_location_state_init(struct vk_input_attachment_location_state *ial,
+ const BITSET_WORD *dynamic,
+ const VkRenderingInputAttachmentIndexInfoKHR *ial_info)
+{
+ *ial = (struct vk_input_attachment_location_state) {
+ .color_map = { 0, 1, 2, 3, 4, 5, 6, 7 },
+ .depth_att = MESA_VK_ATTACHMENT_UNUSED,
+ .stencil_att = MESA_VK_ATTACHMENT_UNUSED,
+ };
+ if (!ial_info)
+ return;
+
+ for (uint32_t a = 0; a < MIN2(ial_info->colorAttachmentCount,
+ MESA_VK_MAX_COLOR_ATTACHMENTS); a++) {
+ ial->color_map[a] =
+ ial_info->pColorAttachmentInputIndices[a] == VK_ATTACHMENT_UNUSED ?
+ MESA_VK_ATTACHMENT_UNUSED : ial_info->pColorAttachmentInputIndices[a];
+ }
+ ial->depth_att = ial_info->pDepthInputAttachmentIndex != NULL ?
+ *ial_info->pDepthInputAttachmentIndex : MESA_VK_ATTACHMENT_UNUSED;
+ ial->stencil_att = ial_info->pStencilInputAttachmentIndex != NULL ?
+ *ial_info->pStencilInputAttachmentIndex : MESA_VK_ATTACHMENT_UNUSED;
+}
+
+static void
+vk_color_attachment_location_state_init(struct vk_color_attachment_location_state *cal,
+ const BITSET_WORD *dynamic,
+ const VkRenderingAttachmentLocationInfoKHR *cal_info)
+{
+ *cal = (struct vk_color_attachment_location_state) {
+ .color_map = { 0, 1, 2, 3, 4, 5, 6, 7 },
+ };
+ if (!cal_info)
+ return;
+
+ for (uint32_t a = 0; a < MIN2(cal_info->colorAttachmentCount,
+ MESA_VK_MAX_COLOR_ATTACHMENTS); a++) {
+ cal->color_map[a] =
+ cal_info->pColorAttachmentLocations[a] == VK_ATTACHMENT_UNUSED ?
+ MESA_VK_ATTACHMENT_UNUSED : cal_info->pColorAttachmentLocations[a];
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_cb(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_color_blend_state *cb)
+{
+ dst->cb.logic_op_enable = cb->logic_op_enable;
+ dst->cb.logic_op = cb->logic_op;
+ dst->cb.color_write_enables = cb->color_write_enables;
+ dst->cb.attachment_count = cb->attachment_count;
+
+ if (IS_NEEDED(CB_BLEND_ENABLES) ||
+ IS_NEEDED(CB_BLEND_EQUATIONS) ||
+ IS_NEEDED(CB_WRITE_MASKS)) {
+ typed_memcpy(dst->cb.attachments, cb->attachments, cb->attachment_count);
+ }
+
+ if (IS_NEEDED(CB_BLEND_CONSTANTS))
+ typed_memcpy(dst->cb.blend_constants, cb->blend_constants, 4);
+}
+
+static void
+vk_dynamic_graphics_state_init_ial(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_input_attachment_location_state *ial)
+{
+ if (IS_NEEDED(INPUT_ATTACHMENT_MAP)) {
+ typed_memcpy(dst->ial.color_map, ial->color_map, MESA_VK_MAX_COLOR_ATTACHMENTS);
+ dst->ial.depth_att = ial->depth_att;
+ dst->ial.stencil_att = ial->stencil_att;
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_cal(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_color_attachment_location_state *cal)
+{
+ if (IS_NEEDED(COLOR_ATTACHMENT_MAP))
+ typed_memcpy(dst->cal.color_map, cal->color_map, MESA_VK_MAX_COLOR_ATTACHMENTS);
+}
+
+static void
+vk_pipeline_flags_init(struct vk_graphics_pipeline_state *state,
+ VkPipelineCreateFlags2KHR driver_rp_flags,
+ bool has_driver_rp,
+ const VkGraphicsPipelineCreateInfo *info,
+ const BITSET_WORD *dynamic,
+ VkGraphicsPipelineLibraryFlagsEXT lib)
+{
+ VkPipelineCreateFlags2KHR valid_pipeline_flags = 0;
+ VkPipelineCreateFlags2KHR valid_renderpass_flags = 0;
+ if (lib & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
+ valid_renderpass_flags |=
+ VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR |
+ VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT;
+ valid_pipeline_flags |=
+ VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR |
+ VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT;
+ }
+ if (lib & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT) {
+ valid_renderpass_flags |=
+ VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT |
+ VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ if (!IS_DYNAMIC(ATTACHMENT_FEEDBACK_LOOP_ENABLE)) {
+ valid_pipeline_flags |=
+ VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT |
+ VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ }
+ }
+ const VkPipelineCreateFlags2KHR renderpass_flags =
+ (has_driver_rp ? driver_rp_flags :
+ vk_get_pipeline_rendering_flags(info)) & valid_renderpass_flags;
+
+ const VkPipelineCreateFlags2KHR pipeline_flags =
+ vk_graphics_pipeline_create_flags(info) & valid_pipeline_flags;
+
+ bool pipeline_feedback_loop = pipeline_flags &
+ (VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT |
+ VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT);
+
+ bool renderpass_feedback_loop = renderpass_flags &
+ (VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT |
+ VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT);
+
+ state->pipeline_flags |= renderpass_flags | pipeline_flags;
+ state->feedback_loop_not_input_only |=
+ pipeline_feedback_loop || (!has_driver_rp && renderpass_feedback_loop);
+}
+
+static void
+vk_render_pass_state_init(struct vk_render_pass_state *rp,
+ const struct vk_render_pass_state *old_rp,
+ const struct vk_render_pass_state *driver_rp,
+ const VkGraphicsPipelineCreateInfo *info,
+ VkGraphicsPipelineLibraryFlagsEXT lib)
+{
+ /* If we already have render pass state and it has attachment info, then
+ * it's complete and we don't need a new one. The one caveat here is that
+ * we may need to add in some rendering flags.
+ */
+ if (old_rp != NULL && vk_render_pass_state_has_attachment_info(old_rp)) {
+ *rp = *old_rp;
+ return;
+ }
+
+ *rp = (struct vk_render_pass_state) {
+ .depth_attachment_format = VK_FORMAT_UNDEFINED,
+ .stencil_attachment_format = VK_FORMAT_UNDEFINED,
+ };
+
+ if (info->renderPass != VK_NULL_HANDLE && driver_rp != NULL) {
+ *rp = *driver_rp;
+ return;
+ }
+
+ const VkPipelineRenderingCreateInfo *r_info =
+ vk_get_pipeline_rendering_create_info(info);
+
+ if (r_info == NULL)
+ return;
+
+ rp->view_mask = r_info->viewMask;
+
+ /* From the Vulkan 1.3.218 spec description of pre-rasterization state:
+ *
+ * "Fragment shader state is defined by:
+ * ...
+ * * VkRenderPass and subpass parameter
+ * * The viewMask parameter of VkPipelineRenderingCreateInfo (formats
+ * are ignored)"
+ *
+ * The description of fragment shader state contains identical text.
+ *
+ * If we have a render pass then we have full information. Even if we're
+ * dynamic-rendering-only, the presence of a render pass means the
+ * rendering info came from a vk_render_pass and is therefore complete.
+ * Otherwise, all we can grab is the view mask and we have to leave the
+ * rest for later.
+ */
+ if (info->renderPass == VK_NULL_HANDLE &&
+ !(lib & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT)) {
+ rp->attachments = MESA_VK_RP_ATTACHMENT_INFO_INVALID;
+ return;
+ }
+
+ assert(r_info->colorAttachmentCount <= MESA_VK_MAX_COLOR_ATTACHMENTS);
+ rp->color_attachment_count = r_info->colorAttachmentCount;
+ for (uint32_t i = 0; i < r_info->colorAttachmentCount; i++) {
+ rp->color_attachment_formats[i] = r_info->pColorAttachmentFormats[i];
+ if (r_info->pColorAttachmentFormats[i] != VK_FORMAT_UNDEFINED)
+ rp->attachments |= MESA_VK_RP_ATTACHMENT_COLOR_BIT(i);
+ }
+
+ rp->depth_attachment_format = r_info->depthAttachmentFormat;
+ if (r_info->depthAttachmentFormat != VK_FORMAT_UNDEFINED)
+ rp->attachments |= MESA_VK_RP_ATTACHMENT_DEPTH_BIT;
+
+ rp->stencil_attachment_format = r_info->stencilAttachmentFormat;
+ if (r_info->stencilAttachmentFormat != VK_FORMAT_UNDEFINED)
+ rp->attachments |= MESA_VK_RP_ATTACHMENT_STENCIL_BIT;
+
+ const VkAttachmentSampleCountInfoAMD *asc_info =
+ vk_get_pipeline_sample_count_info_amd(info);
+ if (asc_info != NULL) {
+ assert(asc_info->colorAttachmentCount == rp->color_attachment_count);
+ for (uint32_t i = 0; i < asc_info->colorAttachmentCount; i++) {
+ rp->color_attachment_samples[i] = asc_info->pColorAttachmentSamples[i];
+ }
+
+ rp->depth_stencil_attachment_samples = asc_info->depthStencilAttachmentSamples;
+ }
+}
+
+static void
+vk_dynamic_graphics_state_init_rp(struct vk_dynamic_graphics_state *dst,
+ const BITSET_WORD *needed,
+ const struct vk_render_pass_state *rp)
+{
+ dst->rp.attachments = rp->attachments;
+}
+
+#define FOREACH_STATE_GROUP(f) \
+ f(MESA_VK_GRAPHICS_STATE_VERTEX_INPUT_BIT, \
+ vk_vertex_input_state, vi); \
+ f(MESA_VK_GRAPHICS_STATE_INPUT_ASSEMBLY_BIT, \
+ vk_input_assembly_state, ia); \
+ f(MESA_VK_GRAPHICS_STATE_TESSELLATION_BIT, \
+ vk_tessellation_state, ts); \
+ f(MESA_VK_GRAPHICS_STATE_VIEWPORT_BIT, \
+ vk_viewport_state, vp); \
+ f(MESA_VK_GRAPHICS_STATE_DISCARD_RECTANGLES_BIT, \
+ vk_discard_rectangles_state, dr); \
+ f(MESA_VK_GRAPHICS_STATE_RASTERIZATION_BIT, \
+ vk_rasterization_state, rs); \
+ f(MESA_VK_GRAPHICS_STATE_FRAGMENT_SHADING_RATE_BIT, \
+ vk_fragment_shading_rate_state, fsr); \
+ f(MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT, \
+ vk_multisample_state, ms); \
+ f(MESA_VK_GRAPHICS_STATE_DEPTH_STENCIL_BIT, \
+ vk_depth_stencil_state, ds); \
+ f(MESA_VK_GRAPHICS_STATE_COLOR_BLEND_BIT, \
+ vk_color_blend_state, cb); \
+ f(MESA_VK_GRAPHICS_STATE_INPUT_ATTACHMENT_MAP_BIT, \
+ vk_input_attachment_location_state, ial); \
+ f(MESA_VK_GRAPHICS_STATE_COLOR_ATTACHMENT_MAP_BIT, \
+ vk_color_attachment_location_state, cal); \
+ f(MESA_VK_GRAPHICS_STATE_RENDER_PASS_BIT, \
+ vk_render_pass_state, rp);
+
+static enum mesa_vk_graphics_state_groups
+vk_graphics_pipeline_state_groups(const struct vk_graphics_pipeline_state *state)
+{
+ /* For now, we just validate dynamic state */
+ enum mesa_vk_graphics_state_groups groups = 0;
+
+#define FILL_HAS(STATE, type, s) \
+ if (state->s != NULL) groups |= STATE
+
+ FOREACH_STATE_GROUP(FILL_HAS)
+
+#undef FILL_HAS
+
+ return groups | fully_dynamic_state_groups(state->dynamic);
+}
+
+void
+vk_graphics_pipeline_get_state(const struct vk_graphics_pipeline_state *state,
+ BITSET_WORD *set_state_out)
+{
+ /* For now, we just validate dynamic state */
+ enum mesa_vk_graphics_state_groups groups = 0;
+
+#define FILL_HAS(STATE, type, s) \
+ if (state->s != NULL) groups |= STATE
+
+ FOREACH_STATE_GROUP(FILL_HAS)
+
+#undef FILL_HAS
+
+ BITSET_DECLARE(set_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ get_dynamic_state_groups(set_state, groups);
+ BITSET_ANDNOT(set_state, set_state, state->dynamic);
+ memcpy(set_state_out, set_state, sizeof(set_state));
+}
+
+static void
+vk_graphics_pipeline_state_validate(const struct vk_graphics_pipeline_state *state)
+{
+#ifndef NDEBUG
+ /* For now, we just validate dynamic state */
+ enum mesa_vk_graphics_state_groups groups =
+ vk_graphics_pipeline_state_groups(state);
+ validate_dynamic_state_groups(state->dynamic, groups);
+#endif
+}
+
+static bool
+may_have_rasterization(const struct vk_graphics_pipeline_state *state,
+ const BITSET_WORD *dynamic,
+ const VkGraphicsPipelineCreateInfo *info)
+{
+ if (state->rs) {
+ /* We default rasterizer_discard_enable to false when dynamic */
+ return !state->rs->rasterizer_discard_enable;
+ } else {
+ return IS_DYNAMIC(RS_RASTERIZER_DISCARD_ENABLE) ||
+ !info->pRasterizationState->rasterizerDiscardEnable;
+ }
+}
+
+VkResult
+vk_graphics_pipeline_state_fill(const struct vk_device *device,
+ struct vk_graphics_pipeline_state *state,
+ const VkGraphicsPipelineCreateInfo *info,
+ const struct vk_render_pass_state *driver_rp,
+ VkPipelineCreateFlags2KHR driver_rp_flags,
+ struct vk_graphics_pipeline_all_state *all,
+ const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ void **alloc_ptr_out)
+{
+ vk_graphics_pipeline_state_validate(state);
+
+ BITSET_DECLARE(dynamic, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ vk_get_dynamic_graphics_states(dynamic, info->pDynamicState);
+
+ /*
+ * First, figure out which library-level shader/state groups we need
+ */
+
+ VkGraphicsPipelineLibraryFlagsEXT lib;
+ const VkGraphicsPipelineLibraryCreateInfoEXT *gpl_info =
+ vk_find_struct_const(info->pNext, GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT);
+ const VkPipelineLibraryCreateInfoKHR *lib_info =
+ vk_find_struct_const(info->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
+
+ VkPipelineCreateFlags2KHR pipeline_flags = vk_graphics_pipeline_create_flags(info);
+
+ VkShaderStageFlagBits allowed_stages;
+ if (!(pipeline_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)) {
+ allowed_stages = VK_SHADER_STAGE_ALL_GRAPHICS |
+ VK_SHADER_STAGE_TASK_BIT_EXT |
+ VK_SHADER_STAGE_MESH_BIT_EXT;
+ } else if (gpl_info) {
+ allowed_stages = 0;
+
+ /* If we're creating a pipeline library without pre-rasterization,
+ * discard all the associated stages.
+ */
+ if (gpl_info->flags &
+ VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
+ allowed_stages |= (VK_SHADER_STAGE_VERTEX_BIT |
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
+ VK_SHADER_STAGE_GEOMETRY_BIT |
+ VK_SHADER_STAGE_TASK_BIT_EXT |
+ VK_SHADER_STAGE_MESH_BIT_EXT);
+ }
+
+ /* If we're creating a pipeline library without fragment shader,
+ * discard that stage.
+ */
+ if (gpl_info->flags &
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)
+ allowed_stages |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ } else {
+ /* VkGraphicsPipelineLibraryCreateInfoEXT was omitted, flags should
+ * be assumed to be empty and therefore no shader stage should be
+ * considered.
+ */
+ allowed_stages = 0;
+ }
+
+ for (uint32_t i = 0; i < info->stageCount; i++) {
+ state->shader_stages |= info->pStages[i].stage & allowed_stages;
+ }
+
+ /* In case we return early */
+ if (alloc_ptr_out != NULL)
+ *alloc_ptr_out = NULL;
+
+ if (gpl_info) {
+ lib = gpl_info->flags;
+ } else if ((lib_info && lib_info->libraryCount > 0) ||
+ (pipeline_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)) {
+ /*
+ * From the Vulkan 1.3.210 spec:
+ * "If this structure is omitted, and either VkGraphicsPipelineCreateInfo::flags
+ * includes VK_PIPELINE_CREATE_LIBRARY_BIT_KHR or the
+ * VkGraphicsPipelineCreateInfo::pNext chain includes a
+ * VkPipelineLibraryCreateInfoKHR structure with a libraryCount greater than 0,
+ * it is as if flags is 0. Otherwise if this structure is omitted, it is as if
+ * flags includes all possible subsets of the graphics pipeline."
+ */
+ lib = 0;
+ } else {
+ /* We're building a complete pipeline. From the Vulkan 1.3.218 spec:
+ *
+ * "A complete graphics pipeline always includes pre-rasterization
+ * shader state, with other subsets included depending on that state.
+ * If the pre-rasterization shader state includes a vertex shader,
+ * then vertex input state is included in a complete graphics
+ * pipeline. If the value of
+ * VkPipelineRasterizationStateCreateInfo::rasterizerDiscardEnable in
+ * the pre-rasterization shader state is VK_FALSE or the
+ * VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE dynamic state is
+ * enabled fragment shader state and fragment output interface state
+ * is included in a complete graphics pipeline."
+ */
+ lib = VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT;
+
+ if (state->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
+ lib |= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
+
+ if (may_have_rasterization(state, dynamic, info)) {
+ lib |= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
+ lib |= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
+ }
+ }
+
+ /*
+ * Next, turn those into individual states. Among other things, this
+ * de-duplicates things like FSR and multisample state which appear in
+ * multiple library groups.
+ */
+
+ enum mesa_vk_graphics_state_groups needs = 0;
+ if (lib & VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT) {
+ needs |= MESA_VK_GRAPHICS_STATE_VERTEX_INPUT_BIT;
+ needs |= MESA_VK_GRAPHICS_STATE_INPUT_ASSEMBLY_BIT;
+ }
+
+ /* Other stuff potentially depends on this so gather it early */
+ struct vk_render_pass_state rp;
+ if (lib & (VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT |
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT |
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT)) {
+ vk_render_pass_state_init(&rp, state->rp, driver_rp, info, lib);
+
+ needs |= MESA_VK_GRAPHICS_STATE_RENDER_PASS_BIT;
+
+ /* If the old state was incomplete but the new one isn't, set state->rp
+ * to NULL so it gets replaced with the new version.
+ */
+ if (state->rp != NULL &&
+ !vk_render_pass_state_has_attachment_info(state->rp) &&
+ !vk_render_pass_state_has_attachment_info(&rp))
+ state->rp = NULL;
+ }
+
+ if (lib & (VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT |
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT)) {
+ vk_pipeline_flags_init(state, driver_rp_flags, !!driver_rp, info, dynamic, lib);
+ }
+
+ if (lib & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * VUID-VkGraphicsPipelineCreateInfo-stage-02096
+ *
+ * "If the pipeline is being created with pre-rasterization shader
+ * state the stage member of one element of pStages must be either
+ * VK_SHADER_STAGE_VERTEX_BIT or VK_SHADER_STAGE_MESH_BIT_EXT"
+ */
+ assert(state->shader_stages & (VK_SHADER_STAGE_VERTEX_BIT |
+ VK_SHADER_STAGE_MESH_BIT_EXT));
+
+ if (state->shader_stages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
+ needs |= MESA_VK_GRAPHICS_STATE_TESSELLATION_BIT;
+
+ if (may_have_rasterization(state, dynamic, info))
+ needs |= MESA_VK_GRAPHICS_STATE_VIEWPORT_BIT;
+
+ needs |= MESA_VK_GRAPHICS_STATE_DISCARD_RECTANGLES_BIT;
+ needs |= MESA_VK_GRAPHICS_STATE_RASTERIZATION_BIT;
+ needs |= MESA_VK_GRAPHICS_STATE_FRAGMENT_SHADING_RATE_BIT;
+ }
+
+ if (lib & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
+ needs |= MESA_VK_GRAPHICS_STATE_FRAGMENT_SHADING_RATE_BIT;
+
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * "Fragment shader state is defined by:
+ * ...
+ * - VkPipelineMultisampleStateCreateInfo if sample shading is
+ * enabled or renderpass is not VK_NULL_HANDLE"
+ *
+ * and
+ *
+ * VUID-VkGraphicsPipelineCreateInfo-pMultisampleState-06629
+ *
+ * "If the pipeline is being created with fragment shader state
+ * pMultisampleState must be NULL or a valid pointer to a valid
+ * VkPipelineMultisampleStateCreateInfo structure"
+ *
+ * so we can reliably detect when to include it based on the
+ * pMultisampleState pointer.
+ */
+ if (info->pMultisampleState != NULL)
+ needs |= MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT;
+
+ /* From the Vulkan 1.3.218 spec:
+ *
+ * VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
+ *
+ * "If renderPass is not VK_NULL_HANDLE, the pipeline is being
+ * created with fragment shader state, and subpass uses a
+ * depth/stencil attachment, pDepthStencilState must be a valid
+ * pointer to a valid VkPipelineDepthStencilStateCreateInfo
+ * structure"
+ *
+ * VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
+ *
+ * "If renderPass is VK_NULL_HANDLE, the pipeline is being created
+ * with fragment shader state and fragment output interface state,
+ * and either of VkPipelineRenderingCreateInfo::depthAttachmentFormat
+ * or VkPipelineRenderingCreateInfo::stencilAttachmentFormat are not
+ * VK_FORMAT_UNDEFINED, pDepthStencilState must be a valid pointer to
+ * a valid VkPipelineDepthStencilStateCreateInfo structure"
+ *
+ * VUID-VkGraphicsPipelineCreateInfo-renderPass-06590
+ *
+ * "If renderPass is VK_NULL_HANDLE and the pipeline is being created
+ * with fragment shader state but not fragment output interface
+ * state, pDepthStencilState must be a valid pointer to a valid
+ * VkPipelineDepthStencilStateCreateInfo structure"
+ *
+ * In the first case, we'll have a real set of aspects in rp. In the
+ * second case, where we have both fragment shader and fragment output
+ * state, we will also have a valid set of aspects. In the third case
+ * where we only have fragment shader state and no render pass, the
+ * vk_render_pass_state will be incomplete.
+ */
+ if (!vk_render_pass_state_has_attachment_info(&rp) ||
+ (rp.attachments & (MESA_VK_RP_ATTACHMENT_DEPTH_BIT |
+ MESA_VK_RP_ATTACHMENT_STENCIL_BIT)))
+ needs |= MESA_VK_GRAPHICS_STATE_DEPTH_STENCIL_BIT;
+
+ needs |= MESA_VK_GRAPHICS_STATE_INPUT_ATTACHMENT_MAP_BIT;
+ }
+
+ if (lib & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT) {
+ if (rp.attachments & MESA_VK_RP_ATTACHMENT_ANY_COLOR_BITS)
+ needs |= MESA_VK_GRAPHICS_STATE_COLOR_BLEND_BIT;
+
+ needs |= MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT;
+
+ needs |= MESA_VK_GRAPHICS_STATE_COLOR_ATTACHMENT_MAP_BIT;
+ }
+
+ /*
+ * Next, Filter off any states we already have.
+ */
+
+#define FILTER_NEEDS(STATE, type, s) \
+ if (state->s != NULL) needs &= ~STATE
+
+ FOREACH_STATE_GROUP(FILTER_NEEDS)
+
+#undef FILTER_NEEDS
+
+ /* Filter dynamic state down to just what we're adding */
+ BITSET_DECLARE(dynamic_filter, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ get_dynamic_state_groups(dynamic_filter, needs);
+
+ /* Attachment feedback loop state is part of the renderpass state in mesa
+ * because attachment feedback loops can also come from the render pass,
+ * but in Vulkan it is part of the fragment output interface. The
+ * renderpass state also exists, possibly in an incomplete state, in other
+ * stages for things like the view mask, but it does not contain the
+ * feedback loop flags. In those other stages we have to ignore
+ * VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT, even though it is
+ * part of a state group that exists in those stages.
+ */
+ if (!(lib &
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT)) {
+ BITSET_CLEAR(dynamic_filter,
+ MESA_VK_DYNAMIC_ATTACHMENT_FEEDBACK_LOOP_ENABLE);
+ }
+
+ BITSET_AND(dynamic, dynamic, dynamic_filter);
+
+ /* And add it in */
+ BITSET_OR(state->dynamic, state->dynamic, dynamic);
+
+ /*
+ * If a state is fully dynamic, we don't need to even allocate them. Do
+ * this after we've filtered dynamic state because we still want them to
+ * show up in the dynamic state but don't want the actual state.
+ */
+ needs &= ~fully_dynamic_state_groups(state->dynamic);
+
+ /* If we don't need to set up any new states, bail early */
+ if (needs == 0)
+ return VK_SUCCESS;
+
+ /*
+ * Now, ensure that we have space for each of the states we're going to
+ * fill. If all != NULL, we'll pull from that. Otherwise, we need to
+ * allocate memory.
+ */
+
+ VK_MULTIALLOC(ma);
+
+#define ENSURE_STATE_IF_NEEDED(STATE, type, s) \
+ struct type *new_##s = NULL; \
+ if (needs & STATE) { \
+ if (all == NULL) { \
+ vk_multialloc_add(&ma, &new_##s, struct type, 1); \
+ } else { \
+ new_##s = &all->s; \
+ } \
+ }
+
+ FOREACH_STATE_GROUP(ENSURE_STATE_IF_NEEDED)
+
+#undef ENSURE_STATE_IF_NEEDED
+
+ /* Sample locations are a bit special. We don't want to waste the memory
+ * for 64 floats if we don't need to. Also, we set up standard sample
+ * locations if no user-provided sample locations are available.
+ */
+ const VkPipelineSampleLocationsStateCreateInfoEXT *sl_info = NULL;
+ struct vk_sample_locations_state *new_sl = NULL;
+ if (needs & MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT) {
+ if (info->pMultisampleState)
+ sl_info = vk_find_struct_const(info->pMultisampleState->pNext,
+ PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
+ if (needs_sample_locations_state(dynamic, sl_info)) {
+ if (all == NULL) {
+ vk_multialloc_add(&ma, &new_sl, struct vk_sample_locations_state, 1);
+ } else {
+ new_sl = &all->ms_sample_locations;
+ }
+ }
+ }
+
+ /*
+ * Allocate memory, if needed
+ */
+
+ if (ma.size > 0) {
+ assert(all == NULL);
+ *alloc_ptr_out = vk_multialloc_alloc2(&ma, &device->alloc, alloc, scope);
+ if (*alloc_ptr_out == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ /*
+ * Create aliases for various input infos so we can use or FOREACH macro
+ */
+
+#define INFO_ALIAS(_State, s) \
+ const VkPipeline##_State##StateCreateInfo *s##_info = info->p##_State##State
+
+ INFO_ALIAS(VertexInput, vi);
+ INFO_ALIAS(InputAssembly, ia);
+ INFO_ALIAS(Tessellation, ts);
+ INFO_ALIAS(Viewport, vp);
+ INFO_ALIAS(Rasterization, rs);
+ INFO_ALIAS(Multisample, ms);
+ INFO_ALIAS(DepthStencil, ds);
+ INFO_ALIAS(ColorBlend, cb);
+
+#undef INFO_ALIAS
+
+ const VkPipelineDiscardRectangleStateCreateInfoEXT *dr_info =
+ vk_find_struct_const(info->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT);
+
+ const VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_info =
+ vk_find_struct_const(info->pNext, PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR);
+
+ const VkRenderingInputAttachmentIndexInfoKHR *ial_info =
+ vk_find_struct_const(info->pNext, RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR);
+ const VkRenderingAttachmentLocationInfoKHR *cal_info =
+ vk_find_struct_const(info->pNext, RENDERING_ATTACHMENT_LOCATION_INFO_KHR);
+
+ /*
+ * Finally, fill out all the states
+ */
+
+#define INIT_STATE_IF_NEEDED(STATE, type, s) \
+ if (needs & STATE) { \
+ type##_init(new_##s, dynamic, s##_info); \
+ state->s = new_##s; \
+ }
+
+ /* render pass state is special and we just copy it */
+#define vk_render_pass_state_init(s, d, i) *s = rp
+
+ FOREACH_STATE_GROUP(INIT_STATE_IF_NEEDED)
+
+#undef vk_render_pass_state_init
+#undef INIT_STATE_IF_NEEDED
+
+ if (needs & MESA_VK_GRAPHICS_STATE_MULTISAMPLE_BIT) {
+ vk_multisample_sample_locations_state_init(new_ms, new_sl, dynamic,
+ ms_info, sl_info);
+ }
+
+ return VK_SUCCESS;
+}
+
+#undef IS_DYNAMIC
+#undef IS_NEEDED
+
+void
+vk_graphics_pipeline_state_merge(struct vk_graphics_pipeline_state *dst,
+ const struct vk_graphics_pipeline_state *src)
+{
+ vk_graphics_pipeline_state_validate(dst);
+ vk_graphics_pipeline_state_validate(src);
+
+ BITSET_OR(dst->dynamic, dst->dynamic, src->dynamic);
+
+ dst->shader_stages |= src->shader_stages;
+
+ dst->pipeline_flags |= src->pipeline_flags;
+ dst->feedback_loop_not_input_only |= src->feedback_loop_not_input_only;
+
+ /* Render pass state needs special care because a render pass state may be
+ * incomplete (view mask only). See vk_render_pass_state_init().
+ */
+ if (dst->rp != NULL && src->rp != NULL &&
+ !vk_render_pass_state_has_attachment_info(dst->rp) &&
+ vk_render_pass_state_has_attachment_info(src->rp))
+ dst->rp = src->rp;
+
+#define MERGE(STATE, type, state) \
+ if (dst->state == NULL && src->state != NULL) dst->state = src->state;
+
+ FOREACH_STATE_GROUP(MERGE)
+
+#undef MERGE
+}
+
+static bool
+is_group_all_dynamic(const struct vk_graphics_pipeline_state *state,
+ enum mesa_vk_graphics_state_groups group)
+{
+ /* Render pass is a bit special, because it contains always-static state
+ * (e.g. the view mask). It's never all dynamic.
+ */
+ if (group == MESA_VK_GRAPHICS_STATE_RENDER_PASS_BIT)
+ return false;
+
+ BITSET_DECLARE(group_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ BITSET_DECLARE(dynamic_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ get_dynamic_state_groups(group_state, group);
+ BITSET_AND(dynamic_state, group_state, state->dynamic);
+ return BITSET_EQUAL(dynamic_state, group_state);
+}
+
+VkResult
+vk_graphics_pipeline_state_copy(const struct vk_device *device,
+ struct vk_graphics_pipeline_state *state,
+ const struct vk_graphics_pipeline_state *old_state,
+ const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ void **alloc_ptr_out)
+{
+ vk_graphics_pipeline_state_validate(old_state);
+
+ VK_MULTIALLOC(ma);
+
+#define ENSURE_STATE_IF_NEEDED(STATE, type, s) \
+ struct type *new_##s = NULL; \
+ if (old_state->s && !is_group_all_dynamic(state, STATE)) { \
+ vk_multialloc_add(&ma, &new_##s, struct type, 1); \
+ }
+
+ FOREACH_STATE_GROUP(ENSURE_STATE_IF_NEEDED)
+
+#undef ENSURE_STATE_IF_NEEDED
+
+ /* Sample locations are a bit special. */
+ struct vk_sample_locations_state *new_sample_locations = NULL;
+ if (old_state->ms && old_state->ms->sample_locations &&
+ !BITSET_TEST(old_state->dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS)) {
+ assert(old_state->ms->sample_locations);
+ vk_multialloc_add(&ma, &new_sample_locations,
+ struct vk_sample_locations_state, 1);
+ }
+
+ if (ma.size > 0) {
+ *alloc_ptr_out = vk_multialloc_alloc2(&ma, &device->alloc, alloc, scope);
+ if (*alloc_ptr_out == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ if (new_sample_locations) {
+ *new_sample_locations = *old_state->ms->sample_locations;
+ }
+
+#define COPY_STATE_IF_NEEDED(STATE, type, s) \
+ if (new_##s) { \
+ *new_##s = *old_state->s; \
+ } \
+ state->s = new_##s;
+
+ FOREACH_STATE_GROUP(COPY_STATE_IF_NEEDED)
+
+ if (new_ms) {
+ new_ms->sample_locations = new_sample_locations;
+ }
+
+ state->shader_stages = old_state->shader_stages;
+ BITSET_COPY(state->dynamic, old_state->dynamic);
+
+#undef COPY_STATE_IF_NEEDED
+
+ state->pipeline_flags = old_state->pipeline_flags;
+ state->feedback_loop_not_input_only =
+ old_state->feedback_loop_not_input_only;
+
+ vk_graphics_pipeline_state_validate(state);
+ return VK_SUCCESS;
+}
+
+static const struct vk_dynamic_graphics_state vk_default_dynamic_graphics_state = {
+ .rs = {
+ .line = {
+ .width = 1.0f,
+ },
+ },
+ .fsr = {
+ .fragment_size = {1u, 1u},
+ .combiner_ops = {
+ VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR,
+ VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR,
+ },
+ },
+ .ds = {
+ .depth = {
+ .bounds_test = {
+ .min = 0.0f,
+ .max = 1.0f,
+ },
+ },
+ .stencil = {
+ .write_enable = true,
+ .front = {
+ .compare_mask = -1,
+ .write_mask = -1,
+ },
+ .back = {
+ .compare_mask = -1,
+ .write_mask = -1,
+ },
+ },
+ },
+ .cb = {
+ .color_write_enables = 0xffu,
+ .attachment_count = MESA_VK_MAX_COLOR_ATTACHMENTS,
+ },
+ .ial = {
+ .color_map = { 0, 1, 2, 3, 4, 5, 6, 7 },
+ .depth_att = MESA_VK_ATTACHMENT_UNUSED,
+ .stencil_att = MESA_VK_ATTACHMENT_UNUSED,
+ },
+ .cal = {
+ .color_map = { 0, 1, 2, 3, 4, 5, 6, 7 },
+ },
+};
+
+void
+vk_dynamic_graphics_state_init(struct vk_dynamic_graphics_state *dyn)
+{
+ *dyn = vk_default_dynamic_graphics_state;
+}
+
+void
+vk_dynamic_graphics_state_clear(struct vk_dynamic_graphics_state *dyn)
+{
+ struct vk_vertex_input_state *vi = dyn->vi;
+ struct vk_sample_locations_state *sl = dyn->ms.sample_locations;
+
+ *dyn = vk_default_dynamic_graphics_state;
+
+ if (vi != NULL) {
+ memset(vi, 0, sizeof(*vi));
+ dyn->vi = vi;
+ }
+
+ if (sl != NULL) {
+ memset(sl, 0, sizeof(*sl));
+ dyn->ms.sample_locations = sl;
+ }
+}
+
+void
+vk_dynamic_graphics_state_fill(struct vk_dynamic_graphics_state *dyn,
+ const struct vk_graphics_pipeline_state *p)
+{
+ /* This funciton (and the individual vk_dynamic_graphics_state_init_*
+ * functions it calls) are a bit sloppy. Instead of checking every single
+ * bit, we just copy everything and set the bits the right way at the end
+ * based on what groups we actually had.
+ */
+ enum mesa_vk_graphics_state_groups groups = 0;
+
+ BITSET_DECLARE(needed, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ BITSET_COPY(needed, p->dynamic);
+ BITSET_NOT(needed);
+
+ /* We only want to copy these if the driver has filled out the relevant
+ * pointer in the dynamic state struct. If not, they don't support them
+ * as dynamic state and we should leave them alone.
+ */
+ if (dyn->vi == NULL)
+ BITSET_CLEAR(needed, MESA_VK_DYNAMIC_VI);
+ if (dyn->ms.sample_locations == NULL)
+ BITSET_CLEAR(needed, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS);
+
+#define INIT_DYNAMIC_STATE(STATE, type, s) \
+ if (p->s != NULL) { \
+ vk_dynamic_graphics_state_init_##s(dyn, needed, p->s); \
+ groups |= STATE; \
+ }
+
+ FOREACH_STATE_GROUP(INIT_DYNAMIC_STATE);
+
+#undef INIT_DYNAMIC_STATE
+
+ /* Feedback loop state is weird: implicit feedback loops from the
+ * renderpass and dynamically-enabled feedback loops can in theory both be
+ * enabled independently, so we can't just use one field; instead drivers
+ * have to OR the pipeline state (in vk_render_pass_state::pipeline_flags)
+ * and dynamic state. Due to this it isn't worth tracking
+ * implicit render pass flags vs. pipeline flags in the pipeline state, and
+ * we just combine the two in vk_render_pass_flags_init() and don't bother
+ * setting the dynamic state from the pipeline here, instead just making
+ * sure the dynamic state is reset to 0 when feedback loop state is static.
+ */
+ dyn->feedback_loops = 0;
+
+ get_dynamic_state_groups(dyn->set, groups);
+
+ /* Vertex input state is always included in a complete pipeline. If p->vi
+ * is NULL, that means that it has been precompiled by the driver, but we
+ * should still track vi_bindings_valid.
+ */
+ BITSET_SET(dyn->set, MESA_VK_DYNAMIC_VI_BINDINGS_VALID);
+
+ /* If the pipeline doesn't render any color attachments, we should still
+ * keep track of the fact that it writes 0 attachments, even though none of
+ * the other blend states will be initialized. Normally this would be
+ * initialized with the other blend states.
+ */
+ if (!p->rp || !(p->rp->attachments & MESA_VK_RP_ATTACHMENT_ANY_COLOR_BITS)) {
+ dyn->cb.attachment_count = 0;
+ BITSET_SET(dyn->set, MESA_VK_DYNAMIC_CB_ATTACHMENT_COUNT);
+ }
+
+ /* Mask off all but the groups we actually found */
+ BITSET_AND(dyn->set, dyn->set, needed);
+}
+
+#define SET_DYN_VALUE(dst, STATE, state, value) do { \
+ if (!BITSET_TEST((dst)->set, MESA_VK_DYNAMIC_##STATE) || \
+ (dst)->state != (value)) { \
+ (dst)->state = (value); \
+ assert((dst)->state == (value)); \
+ BITSET_SET(dst->set, MESA_VK_DYNAMIC_##STATE); \
+ BITSET_SET(dst->dirty, MESA_VK_DYNAMIC_##STATE); \
+ } \
+} while(0)
+
+#define SET_DYN_BOOL(dst, STATE, state, value) \
+ SET_DYN_VALUE(dst, STATE, state, (bool)value);
+
+#define SET_DYN_ARRAY(dst, STATE, state, start, count, src) do { \
+ assert(start + count <= ARRAY_SIZE((dst)->state)); \
+ STATIC_ASSERT(sizeof(*(dst)->state) == sizeof(*(src))); \
+ const size_t __state_size = sizeof(*(dst)->state) * (count); \
+ if (!BITSET_TEST((dst)->set, MESA_VK_DYNAMIC_##STATE) || \
+ memcmp((dst)->state + start, src, __state_size)) { \
+ memcpy((dst)->state + start, src, __state_size); \
+ BITSET_SET(dst->set, MESA_VK_DYNAMIC_##STATE); \
+ BITSET_SET(dst->dirty, MESA_VK_DYNAMIC_##STATE); \
+ } \
+} while(0)
+
+void
+vk_dynamic_graphics_state_copy(struct vk_dynamic_graphics_state *dst,
+ const struct vk_dynamic_graphics_state *src)
+{
+#define IS_SET_IN_SRC(STATE) \
+ BITSET_TEST(src->set, MESA_VK_DYNAMIC_##STATE)
+
+#define COPY_MEMBER(STATE, state) \
+ SET_DYN_VALUE(dst, STATE, state, src->state)
+
+#define COPY_ARRAY(STATE, state, count) \
+ SET_DYN_ARRAY(dst, STATE, state, 0, count, src->state)
+
+#define COPY_IF_SET(STATE, state) \
+ if (IS_SET_IN_SRC(STATE)) SET_DYN_VALUE(dst, STATE, state, src->state)
+
+ if (IS_SET_IN_SRC(VI)) {
+ assert(dst->vi != NULL);
+ COPY_MEMBER(VI, vi->bindings_valid);
+ u_foreach_bit(b, src->vi->bindings_valid) {
+ COPY_MEMBER(VI, vi->bindings[b].stride);
+ COPY_MEMBER(VI, vi->bindings[b].input_rate);
+ COPY_MEMBER(VI, vi->bindings[b].divisor);
+ }
+ COPY_MEMBER(VI, vi->attributes_valid);
+ u_foreach_bit(a, src->vi->attributes_valid) {
+ COPY_MEMBER(VI, vi->attributes[a].binding);
+ COPY_MEMBER(VI, vi->attributes[a].format);
+ COPY_MEMBER(VI, vi->attributes[a].offset);
+ }
+ }
+
+ if (IS_SET_IN_SRC(VI_BINDINGS_VALID))
+ COPY_MEMBER(VI_BINDINGS_VALID, vi_bindings_valid);
+
+ if (IS_SET_IN_SRC(VI_BINDING_STRIDES)) {
+ assert(IS_SET_IN_SRC(VI_BINDINGS_VALID));
+ u_foreach_bit(a, src->vi_bindings_valid) {
+ COPY_MEMBER(VI_BINDING_STRIDES, vi_binding_strides[a]);
+ }
+ }
+
+ COPY_IF_SET(IA_PRIMITIVE_TOPOLOGY, ia.primitive_topology);
+ COPY_IF_SET(IA_PRIMITIVE_RESTART_ENABLE, ia.primitive_restart_enable);
+ COPY_IF_SET(TS_PATCH_CONTROL_POINTS, ts.patch_control_points);
+ COPY_IF_SET(TS_DOMAIN_ORIGIN, ts.domain_origin);
+
+ COPY_IF_SET(VP_VIEWPORT_COUNT, vp.viewport_count);
+ if (IS_SET_IN_SRC(VP_VIEWPORTS)) {
+ assert(IS_SET_IN_SRC(VP_VIEWPORT_COUNT));
+ COPY_ARRAY(VP_VIEWPORTS, vp.viewports, src->vp.viewport_count);
+ }
+
+ COPY_IF_SET(VP_SCISSOR_COUNT, vp.scissor_count);
+ if (IS_SET_IN_SRC(VP_SCISSORS)) {
+ assert(IS_SET_IN_SRC(VP_SCISSOR_COUNT));
+ COPY_ARRAY(VP_SCISSORS, vp.scissors, src->vp.scissor_count);
+ }
+
+ COPY_IF_SET(VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE,
+ vp.depth_clip_negative_one_to_one);
+
+ COPY_IF_SET(DR_ENABLE, dr.enable);
+ COPY_IF_SET(DR_MODE, dr.mode);
+ if (IS_SET_IN_SRC(DR_RECTANGLES)) {
+ COPY_MEMBER(DR_RECTANGLES, dr.rectangle_count);
+ COPY_ARRAY(DR_RECTANGLES, dr.rectangles, src->dr.rectangle_count);
+ }
+
+ COPY_IF_SET(RS_RASTERIZER_DISCARD_ENABLE, rs.rasterizer_discard_enable);
+ COPY_IF_SET(RS_DEPTH_CLAMP_ENABLE, rs.depth_clamp_enable);
+ COPY_IF_SET(RS_DEPTH_CLIP_ENABLE, rs.depth_clip_enable);
+ COPY_IF_SET(RS_POLYGON_MODE, rs.polygon_mode);
+ COPY_IF_SET(RS_CULL_MODE, rs.cull_mode);
+ COPY_IF_SET(RS_FRONT_FACE, rs.front_face);
+ COPY_IF_SET(RS_CONSERVATIVE_MODE, rs.conservative_mode);
+ COPY_IF_SET(RS_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE,
+ rs.extra_primitive_overestimation_size);
+ COPY_IF_SET(RS_RASTERIZATION_ORDER_AMD, rs.rasterization_order_amd);
+ COPY_IF_SET(RS_PROVOKING_VERTEX, rs.provoking_vertex);
+ COPY_IF_SET(RS_RASTERIZATION_STREAM, rs.rasterization_stream);
+ COPY_IF_SET(RS_DEPTH_BIAS_ENABLE, rs.depth_bias.enable);
+ COPY_IF_SET(RS_DEPTH_BIAS_FACTORS, rs.depth_bias.constant);
+ COPY_IF_SET(RS_DEPTH_BIAS_FACTORS, rs.depth_bias.clamp);
+ COPY_IF_SET(RS_DEPTH_BIAS_FACTORS, rs.depth_bias.slope);
+ COPY_IF_SET(RS_DEPTH_BIAS_FACTORS, rs.depth_bias.representation);
+ COPY_IF_SET(RS_DEPTH_BIAS_FACTORS, rs.depth_bias.exact);
+ COPY_IF_SET(RS_LINE_WIDTH, rs.line.width);
+ COPY_IF_SET(RS_LINE_MODE, rs.line.mode);
+ COPY_IF_SET(RS_LINE_STIPPLE_ENABLE, rs.line.stipple.enable);
+ COPY_IF_SET(RS_LINE_STIPPLE, rs.line.stipple.factor);
+ COPY_IF_SET(RS_LINE_STIPPLE, rs.line.stipple.pattern);
+
+ COPY_IF_SET(FSR, fsr.fragment_size.width);
+ COPY_IF_SET(FSR, fsr.fragment_size.height);
+ COPY_IF_SET(FSR, fsr.combiner_ops[0]);
+ COPY_IF_SET(FSR, fsr.combiner_ops[1]);
+
+ COPY_IF_SET(MS_RASTERIZATION_SAMPLES, ms.rasterization_samples);
+ COPY_IF_SET(MS_SAMPLE_MASK, ms.sample_mask);
+ COPY_IF_SET(MS_ALPHA_TO_COVERAGE_ENABLE, ms.alpha_to_coverage_enable);
+ COPY_IF_SET(MS_ALPHA_TO_ONE_ENABLE, ms.alpha_to_one_enable);
+ COPY_IF_SET(MS_SAMPLE_LOCATIONS_ENABLE, ms.sample_locations_enable);
+
+ if (IS_SET_IN_SRC(MS_SAMPLE_LOCATIONS)) {
+ assert(dst->ms.sample_locations != NULL);
+ COPY_MEMBER(MS_SAMPLE_LOCATIONS, ms.sample_locations->per_pixel);
+ COPY_MEMBER(MS_SAMPLE_LOCATIONS, ms.sample_locations->grid_size.width);
+ COPY_MEMBER(MS_SAMPLE_LOCATIONS, ms.sample_locations->grid_size.height);
+ const uint32_t sl_count = src->ms.sample_locations->per_pixel *
+ src->ms.sample_locations->grid_size.width *
+ src->ms.sample_locations->grid_size.height;
+ COPY_ARRAY(MS_SAMPLE_LOCATIONS, ms.sample_locations->locations, sl_count);
+ }
+
+ COPY_IF_SET(DS_DEPTH_TEST_ENABLE, ds.depth.test_enable);
+ COPY_IF_SET(DS_DEPTH_WRITE_ENABLE, ds.depth.write_enable);
+ COPY_IF_SET(DS_DEPTH_COMPARE_OP, ds.depth.compare_op);
+ COPY_IF_SET(DS_DEPTH_BOUNDS_TEST_ENABLE, ds.depth.bounds_test.enable);
+ if (IS_SET_IN_SRC(DS_DEPTH_BOUNDS_TEST_BOUNDS)) {
+ COPY_MEMBER(DS_DEPTH_BOUNDS_TEST_BOUNDS, ds.depth.bounds_test.min);
+ COPY_MEMBER(DS_DEPTH_BOUNDS_TEST_BOUNDS, ds.depth.bounds_test.max);
+ }
+
+ COPY_IF_SET(DS_STENCIL_TEST_ENABLE, ds.stencil.test_enable);
+ if (IS_SET_IN_SRC(DS_STENCIL_OP)) {
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.front.op.fail);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.front.op.pass);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.front.op.depth_fail);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.front.op.compare);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.back.op.fail);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.back.op.pass);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.back.op.depth_fail);
+ COPY_MEMBER(DS_STENCIL_OP, ds.stencil.back.op.compare);
+ }
+ if (IS_SET_IN_SRC(DS_STENCIL_COMPARE_MASK)) {
+ COPY_MEMBER(DS_STENCIL_COMPARE_MASK, ds.stencil.front.compare_mask);
+ COPY_MEMBER(DS_STENCIL_COMPARE_MASK, ds.stencil.back.compare_mask);
+ }
+ if (IS_SET_IN_SRC(DS_STENCIL_WRITE_MASK)) {
+ COPY_MEMBER(DS_STENCIL_WRITE_MASK, ds.stencil.front.write_mask);
+ COPY_MEMBER(DS_STENCIL_WRITE_MASK, ds.stencil.back.write_mask);
+ }
+ if (IS_SET_IN_SRC(DS_STENCIL_REFERENCE)) {
+ COPY_MEMBER(DS_STENCIL_REFERENCE, ds.stencil.front.reference);
+ COPY_MEMBER(DS_STENCIL_REFERENCE, ds.stencil.back.reference);
+ }
+
+ COPY_IF_SET(CB_LOGIC_OP_ENABLE, cb.logic_op_enable);
+ COPY_IF_SET(CB_LOGIC_OP, cb.logic_op);
+ COPY_IF_SET(CB_ATTACHMENT_COUNT, cb.attachment_count);
+ COPY_IF_SET(CB_COLOR_WRITE_ENABLES, cb.color_write_enables);
+ if (IS_SET_IN_SRC(CB_BLEND_ENABLES)) {
+ for (uint32_t a = 0; a < src->cb.attachment_count; a++)
+ COPY_MEMBER(CB_BLEND_ENABLES, cb.attachments[a].blend_enable);
+ }
+ if (IS_SET_IN_SRC(CB_BLEND_EQUATIONS)) {
+ for (uint32_t a = 0; a < src->cb.attachment_count; a++) {
+ COPY_MEMBER(CB_BLEND_EQUATIONS,
+ cb.attachments[a].src_color_blend_factor);
+ COPY_MEMBER(CB_BLEND_EQUATIONS,
+ cb.attachments[a].dst_color_blend_factor);
+ COPY_MEMBER(CB_BLEND_EQUATIONS,
+ cb.attachments[a].src_alpha_blend_factor);
+ COPY_MEMBER(CB_BLEND_EQUATIONS,
+ cb.attachments[a].dst_alpha_blend_factor);
+ COPY_MEMBER(CB_BLEND_EQUATIONS, cb.attachments[a].color_blend_op);
+ COPY_MEMBER(CB_BLEND_EQUATIONS, cb.attachments[a].alpha_blend_op);
+ }
+ }
+ if (IS_SET_IN_SRC(CB_WRITE_MASKS)) {
+ for (uint32_t a = 0; a < src->cb.attachment_count; a++)
+ COPY_MEMBER(CB_WRITE_MASKS, cb.attachments[a].write_mask);
+ }
+ if (IS_SET_IN_SRC(CB_BLEND_CONSTANTS))
+ COPY_ARRAY(CB_BLEND_CONSTANTS, cb.blend_constants, 4);
+
+ COPY_IF_SET(RP_ATTACHMENTS, rp.attachments);
+
+ if (IS_SET_IN_SRC(COLOR_ATTACHMENT_MAP)) {
+ COPY_ARRAY(COLOR_ATTACHMENT_MAP, cal.color_map,
+ MESA_VK_MAX_COLOR_ATTACHMENTS);
+ }
+
+ COPY_IF_SET(ATTACHMENT_FEEDBACK_LOOP_ENABLE, feedback_loops);
+
+#undef IS_SET_IN_SRC
+#undef MARK_DIRTY
+#undef COPY_MEMBER
+#undef COPY_ARRAY
+#undef COPY_IF_SET
+
+ for (uint32_t w = 0; w < ARRAY_SIZE(dst->dirty); w++) {
+ /* If it's in the source but isn't set in the destination at all, mark
+ * it dirty. It's possible that the default values just happen to equal
+ * the value from src.
+ */
+ dst->dirty[w] |= src->set[w] & ~dst->set[w];
+
+ /* Everything that was in the source is now in the destination */
+ dst->set[w] |= src->set[w];
+ }
+}
+
+void
+vk_cmd_set_dynamic_graphics_state(struct vk_command_buffer *cmd,
+ const struct vk_dynamic_graphics_state *state)
+{
+ vk_dynamic_graphics_state_copy(&cmd->dynamic_graphics_state, state);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetVertexInputEXT(VkCommandBuffer commandBuffer,
+ uint32_t vertexBindingDescriptionCount,
+ const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions,
+ uint32_t vertexAttributeDescriptionCount,
+ const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ uint32_t bindings_valid = 0;
+ for (uint32_t i = 0; i < vertexBindingDescriptionCount; i++) {
+ const VkVertexInputBindingDescription2EXT *desc =
+ &pVertexBindingDescriptions[i];
+
+ assert(desc->binding < MESA_VK_MAX_VERTEX_BINDINGS);
+ assert(desc->stride <= MESA_VK_MAX_VERTEX_BINDING_STRIDE);
+ assert(desc->inputRate <= UINT8_MAX);
+
+ const uint32_t b = desc->binding;
+ bindings_valid |= BITFIELD_BIT(b);
+ dyn->vi->bindings[b].stride = desc->stride;
+ dyn->vi->bindings[b].input_rate = desc->inputRate;
+ dyn->vi->bindings[b].divisor = desc->divisor;
+
+ /* Also set bindings_strides in case a driver is keying off that */
+ dyn->vi_binding_strides[b] = desc->stride;
+ }
+
+ dyn->vi->bindings_valid = bindings_valid;
+ SET_DYN_VALUE(dyn, VI_BINDINGS_VALID, vi_bindings_valid, bindings_valid);
+
+ uint32_t attributes_valid = 0;
+ for (uint32_t i = 0; i < vertexAttributeDescriptionCount; i++) {
+ const VkVertexInputAttributeDescription2EXT *desc =
+ &pVertexAttributeDescriptions[i];
+
+ assert(desc->location < MESA_VK_MAX_VERTEX_ATTRIBUTES);
+ assert(desc->binding < MESA_VK_MAX_VERTEX_BINDINGS);
+ assert(bindings_valid & BITFIELD_BIT(desc->binding));
+
+ const uint32_t a = desc->location;
+ attributes_valid |= BITFIELD_BIT(a);
+ dyn->vi->attributes[a].binding = desc->binding;
+ dyn->vi->attributes[a].format = desc->format;
+ dyn->vi->attributes[a].offset = desc->offset;
+ }
+ dyn->vi->attributes_valid = attributes_valid;
+
+ BITSET_SET(dyn->set, MESA_VK_DYNAMIC_VI);
+ BITSET_SET(dyn->set, MESA_VK_DYNAMIC_VI_BINDING_STRIDES);
+ BITSET_SET(dyn->dirty, MESA_VK_DYNAMIC_VI);
+ BITSET_SET(dyn->dirty, MESA_VK_DYNAMIC_VI_BINDING_STRIDES);
+}
+
+void
+vk_cmd_set_vertex_binding_strides(struct vk_command_buffer *cmd,
+ uint32_t first_binding,
+ uint32_t binding_count,
+ const VkDeviceSize *strides)
+{
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ for (uint32_t i = 0; i < binding_count; i++) {
+ SET_DYN_VALUE(dyn, VI_BINDING_STRIDES,
+ vi_binding_strides[first_binding + i], strides[i]);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetPrimitiveTopology(VkCommandBuffer commandBuffer,
+ VkPrimitiveTopology primitiveTopology)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, IA_PRIMITIVE_TOPOLOGY,
+ ia.primitive_topology, primitiveTopology);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetPrimitiveRestartEnable(VkCommandBuffer commandBuffer,
+ VkBool32 primitiveRestartEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, IA_PRIMITIVE_RESTART_ENABLE,
+ ia.primitive_restart_enable, primitiveRestartEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer,
+ uint32_t patchControlPoints)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, TS_PATCH_CONTROL_POINTS,
+ ts.patch_control_points, patchControlPoints);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetTessellationDomainOriginEXT(VkCommandBuffer commandBuffer,
+ VkTessellationDomainOrigin domainOrigin)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, TS_DOMAIN_ORIGIN, ts.domain_origin, domainOrigin);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetViewport(VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport *pViewports)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_ARRAY(dyn, VP_VIEWPORTS, vp.viewports,
+ firstViewport, viewportCount, pViewports);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetViewportWithCount(VkCommandBuffer commandBuffer,
+ uint32_t viewportCount,
+ const VkViewport *pViewports)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, VP_VIEWPORT_COUNT, vp.viewport_count, viewportCount);
+ SET_DYN_ARRAY(dyn, VP_VIEWPORTS, vp.viewports, 0, viewportCount, pViewports);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetScissor(VkCommandBuffer commandBuffer,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D *pScissors)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_ARRAY(dyn, VP_SCISSORS, vp.scissors,
+ firstScissor, scissorCount, pScissors);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetScissorWithCount(VkCommandBuffer commandBuffer,
+ uint32_t scissorCount,
+ const VkRect2D *pScissors)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, VP_SCISSOR_COUNT, vp.scissor_count, scissorCount);
+ SET_DYN_ARRAY(dyn, VP_SCISSORS, vp.scissors, 0, scissorCount, pScissors);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthClipNegativeOneToOneEXT(VkCommandBuffer commandBuffer,
+ VkBool32 negativeOneToOne)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE,
+ vp.depth_clip_negative_one_to_one, negativeOneToOne);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstDiscardRectangle,
+ uint32_t discardRectangleCount,
+ const VkRect2D *pDiscardRectangles)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, DR_RECTANGLES, dr.rectangle_count, discardRectangleCount);
+ SET_DYN_ARRAY(dyn, DR_RECTANGLES, dr.rectangles, firstDiscardRectangle,
+ discardRectangleCount, pDiscardRectangles);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetRasterizerDiscardEnable(VkCommandBuffer commandBuffer,
+ VkBool32 rasterizerDiscardEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, RS_RASTERIZER_DISCARD_ENABLE,
+ rs.rasterizer_discard_enable, rasterizerDiscardEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthClampEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 depthClampEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, RS_DEPTH_CLAMP_ENABLE,
+ rs.depth_clamp_enable, depthClampEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthClipEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 depthClipEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_DEPTH_CLIP_ENABLE, rs.depth_clip_enable,
+ depthClipEnable ? VK_MESA_DEPTH_CLIP_ENABLE_TRUE :
+ VK_MESA_DEPTH_CLIP_ENABLE_FALSE);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetPolygonModeEXT(VkCommandBuffer commandBuffer,
+ VkPolygonMode polygonMode)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_POLYGON_MODE, rs.polygon_mode, polygonMode);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCullMode(VkCommandBuffer commandBuffer,
+ VkCullModeFlags cullMode)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_CULL_MODE, rs.cull_mode, cullMode);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetFrontFace(VkCommandBuffer commandBuffer,
+ VkFrontFace frontFace)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_FRONT_FACE, rs.front_face, frontFace);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetConservativeRasterizationModeEXT(
+ VkCommandBuffer commandBuffer,
+ VkConservativeRasterizationModeEXT conservativeRasterizationMode)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_CONSERVATIVE_MODE, rs.conservative_mode,
+ conservativeRasterizationMode);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetExtraPrimitiveOverestimationSizeEXT(
+ VkCommandBuffer commandBuffer,
+ float extraPrimitiveOverestimationSize)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE,
+ rs.extra_primitive_overestimation_size,
+ extraPrimitiveOverestimationSize);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetProvokingVertexModeEXT(VkCommandBuffer commandBuffer,
+ VkProvokingVertexModeEXT provokingVertexMode)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_PROVOKING_VERTEX,
+ rs.provoking_vertex, provokingVertexMode);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetAttachmentFeedbackLoopEnableEXT(VkCommandBuffer commandBuffer,
+ VkImageAspectFlags aspectMask)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, ATTACHMENT_FEEDBACK_LOOP_ENABLE,
+ feedback_loops, aspectMask);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetRasterizationStreamEXT(VkCommandBuffer commandBuffer,
+ uint32_t rasterizationStream)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_RASTERIZATION_STREAM,
+ rs.rasterization_stream, rasterizationStream);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthBiasEnable(VkCommandBuffer commandBuffer,
+ VkBool32 depthBiasEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, RS_DEPTH_BIAS_ENABLE,
+ rs.depth_bias.enable, depthBiasEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthBias(VkCommandBuffer commandBuffer,
+ float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+
+ VkDepthBiasInfoEXT depth_bias_info = {
+ .sType = VK_STRUCTURE_TYPE_DEPTH_BIAS_INFO_EXT,
+ .depthBiasConstantFactor = depthBiasConstantFactor,
+ .depthBiasClamp = depthBiasClamp,
+ .depthBiasSlopeFactor = depthBiasSlopeFactor,
+ };
+
+ cmd->base.device->dispatch_table.CmdSetDepthBias2EXT(commandBuffer,
+ &depth_bias_info);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetLineWidth(VkCommandBuffer commandBuffer,
+ float lineWidth)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_LINE_WIDTH, rs.line.width, lineWidth);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetLineRasterizationModeEXT(VkCommandBuffer commandBuffer,
+ VkLineRasterizationModeKHR lineRasterizationMode)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_LINE_MODE, rs.line.mode, lineRasterizationMode);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetLineStippleEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 stippledLineEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, RS_LINE_STIPPLE_ENABLE,
+ rs.line.stipple.enable, stippledLineEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetLineStippleKHR(VkCommandBuffer commandBuffer,
+ uint32_t lineStippleFactor,
+ uint16_t lineStipplePattern)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_LINE_STIPPLE,
+ rs.line.stipple.factor, lineStippleFactor);
+ SET_DYN_VALUE(dyn, RS_LINE_STIPPLE,
+ rs.line.stipple.pattern, lineStipplePattern);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer,
+ const VkExtent2D *pFragmentSize,
+ const VkFragmentShadingRateCombinerOpKHR combinerOps[2])
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, FSR, fsr.fragment_size.width, pFragmentSize->width);
+ SET_DYN_VALUE(dyn, FSR, fsr.fragment_size.height, pFragmentSize->height);
+ SET_DYN_VALUE(dyn, FSR, fsr.combiner_ops[0], combinerOps[0]);
+ SET_DYN_VALUE(dyn, FSR, fsr.combiner_ops[1], combinerOps[1]);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetRasterizationSamplesEXT(VkCommandBuffer commandBuffer,
+ VkSampleCountFlagBits rasterizationSamples)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ assert(rasterizationSamples <= MESA_VK_MAX_SAMPLES);
+
+ SET_DYN_VALUE(dyn, MS_RASTERIZATION_SAMPLES,
+ ms.rasterization_samples, rasterizationSamples);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetSampleMaskEXT(VkCommandBuffer commandBuffer,
+ VkSampleCountFlagBits samples,
+ const VkSampleMask *pSampleMask)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ VkSampleMask sample_mask = *pSampleMask & BITFIELD_MASK(MESA_VK_MAX_SAMPLES);
+
+ SET_DYN_VALUE(dyn, MS_SAMPLE_MASK, ms.sample_mask, sample_mask);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetAlphaToCoverageEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 alphaToCoverageEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, MS_ALPHA_TO_COVERAGE_ENABLE,
+ ms.alpha_to_coverage_enable, alphaToCoverageEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetAlphaToOneEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 alphaToOneEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, MS_ALPHA_TO_ONE_ENABLE,
+ ms.alpha_to_one_enable, alphaToOneEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
+ const VkSampleLocationsInfoEXT *pSampleLocationsInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, MS_SAMPLE_LOCATIONS,
+ ms.sample_locations->per_pixel,
+ pSampleLocationsInfo->sampleLocationsPerPixel);
+ SET_DYN_VALUE(dyn, MS_SAMPLE_LOCATIONS,
+ ms.sample_locations->grid_size.width,
+ pSampleLocationsInfo->sampleLocationGridSize.width);
+ SET_DYN_VALUE(dyn, MS_SAMPLE_LOCATIONS,
+ ms.sample_locations->grid_size.height,
+ pSampleLocationsInfo->sampleLocationGridSize.height);
+
+ assert(pSampleLocationsInfo->sampleLocationsCount ==
+ pSampleLocationsInfo->sampleLocationsPerPixel *
+ pSampleLocationsInfo->sampleLocationGridSize.width *
+ pSampleLocationsInfo->sampleLocationGridSize.height);
+
+ assert(pSampleLocationsInfo->sampleLocationsCount <=
+ MESA_VK_MAX_SAMPLE_LOCATIONS);
+
+ SET_DYN_ARRAY(dyn, MS_SAMPLE_LOCATIONS,
+ ms.sample_locations->locations,
+ 0, pSampleLocationsInfo->sampleLocationsCount,
+ pSampleLocationsInfo->pSampleLocations);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetSampleLocationsEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 sampleLocationsEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, MS_SAMPLE_LOCATIONS_ENABLE,
+ ms.sample_locations_enable, sampleLocationsEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthTestEnable(VkCommandBuffer commandBuffer,
+ VkBool32 depthTestEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, DS_DEPTH_TEST_ENABLE,
+ ds.depth.test_enable, depthTestEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthWriteEnable(VkCommandBuffer commandBuffer,
+ VkBool32 depthWriteEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, DS_DEPTH_WRITE_ENABLE,
+ ds.depth.write_enable, depthWriteEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthCompareOp(VkCommandBuffer commandBuffer,
+ VkCompareOp depthCompareOp)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, DS_DEPTH_COMPARE_OP, ds.depth.compare_op,
+ depthCompareOp);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthBoundsTestEnable(VkCommandBuffer commandBuffer,
+ VkBool32 depthBoundsTestEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, DS_DEPTH_BOUNDS_TEST_ENABLE,
+ ds.depth.bounds_test.enable, depthBoundsTestEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
+ float minDepthBounds,
+ float maxDepthBounds)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, DS_DEPTH_BOUNDS_TEST_BOUNDS,
+ ds.depth.bounds_test.min, minDepthBounds);
+ SET_DYN_VALUE(dyn, DS_DEPTH_BOUNDS_TEST_BOUNDS,
+ ds.depth.bounds_test.max, maxDepthBounds);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetStencilTestEnable(VkCommandBuffer commandBuffer,
+ VkBool32 stencilTestEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, DS_STENCIL_TEST_ENABLE,
+ ds.stencil.test_enable, stencilTestEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetStencilOp(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ VkStencilOp failOp,
+ VkStencilOp passOp,
+ VkStencilOp depthFailOp,
+ VkCompareOp compareOp)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.front.op.fail, failOp);
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.front.op.pass, passOp);
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.front.op.depth_fail, depthFailOp);
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.front.op.compare, compareOp);
+ }
+
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.back.op.fail, failOp);
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.back.op.pass, passOp);
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.back.op.depth_fail, depthFailOp);
+ SET_DYN_VALUE(dyn, DS_STENCIL_OP, ds.stencil.back.op.compare, compareOp);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t compareMask)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ /* We assume 8-bit stencil always */
+ STATIC_ASSERT(sizeof(dyn->ds.stencil.front.write_mask) == 1);
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_COMPARE_MASK,
+ ds.stencil.front.compare_mask, (uint8_t)compareMask);
+ }
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_COMPARE_MASK,
+ ds.stencil.back.compare_mask, (uint8_t)compareMask);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t writeMask)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ /* We assume 8-bit stencil always */
+ STATIC_ASSERT(sizeof(dyn->ds.stencil.front.write_mask) == 1);
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_WRITE_MASK,
+ ds.stencil.front.write_mask, (uint8_t)writeMask);
+ }
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_WRITE_MASK,
+ ds.stencil.back.write_mask, (uint8_t)writeMask);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetStencilReference(VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t reference)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ /* We assume 8-bit stencil always */
+ STATIC_ASSERT(sizeof(dyn->ds.stencil.front.write_mask) == 1);
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_REFERENCE,
+ ds.stencil.front.reference, (uint8_t)reference);
+ }
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
+ SET_DYN_VALUE(dyn, DS_STENCIL_REFERENCE,
+ ds.stencil.back.reference, (uint8_t)reference);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetLogicOpEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 logicOpEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_BOOL(dyn, CB_LOGIC_OP_ENABLE, cb.logic_op_enable, logicOpEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetLogicOpEXT(VkCommandBuffer commandBuffer,
+ VkLogicOp logicOp)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, CB_LOGIC_OP, cb.logic_op, logicOp);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer,
+ uint32_t attachmentCount,
+ const VkBool32 *pColorWriteEnables)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ assert(attachmentCount <= MESA_VK_MAX_COLOR_ATTACHMENTS);
+
+ uint8_t color_write_enables = 0;
+ for (uint32_t a = 0; a < attachmentCount; a++) {
+ if (pColorWriteEnables[a])
+ color_write_enables |= BITFIELD_BIT(a);
+ }
+
+ SET_DYN_VALUE(dyn, CB_COLOR_WRITE_ENABLES,
+ cb.color_write_enables, color_write_enables);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetColorBlendEnableEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstAttachment,
+ uint32_t attachmentCount,
+ const VkBool32 *pColorBlendEnables)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ for (uint32_t i = 0; i < attachmentCount; i++) {
+ uint32_t a = firstAttachment + i;
+ assert(a < ARRAY_SIZE(dyn->cb.attachments));
+
+ SET_DYN_BOOL(dyn, CB_BLEND_ENABLES,
+ cb.attachments[a].blend_enable, pColorBlendEnables[i]);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetColorBlendEquationEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstAttachment,
+ uint32_t attachmentCount,
+ const VkColorBlendEquationEXT *pColorBlendEquations)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ for (uint32_t i = 0; i < attachmentCount; i++) {
+ uint32_t a = firstAttachment + i;
+ assert(a < ARRAY_SIZE(dyn->cb.attachments));
+
+ SET_DYN_VALUE(dyn, CB_BLEND_EQUATIONS,
+ cb.attachments[a].src_color_blend_factor,
+ pColorBlendEquations[i].srcColorBlendFactor);
+
+ SET_DYN_VALUE(dyn, CB_BLEND_EQUATIONS,
+ cb.attachments[a].dst_color_blend_factor,
+ pColorBlendEquations[i].dstColorBlendFactor);
+
+ SET_DYN_VALUE(dyn, CB_BLEND_EQUATIONS,
+ cb.attachments[a].color_blend_op,
+ pColorBlendEquations[i].colorBlendOp);
+
+ SET_DYN_VALUE(dyn, CB_BLEND_EQUATIONS,
+ cb.attachments[a].src_alpha_blend_factor,
+ pColorBlendEquations[i].srcAlphaBlendFactor);
+
+ SET_DYN_VALUE(dyn, CB_BLEND_EQUATIONS,
+ cb.attachments[a].dst_alpha_blend_factor,
+ pColorBlendEquations[i].dstAlphaBlendFactor);
+
+ SET_DYN_VALUE(dyn, CB_BLEND_EQUATIONS,
+ cb.attachments[a].alpha_blend_op,
+ pColorBlendEquations[i].alphaBlendOp);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetColorWriteMaskEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstAttachment,
+ uint32_t attachmentCount,
+ const VkColorComponentFlags *pColorWriteMasks)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ for (uint32_t i = 0; i < attachmentCount; i++) {
+ uint32_t a = firstAttachment + i;
+ assert(a < ARRAY_SIZE(dyn->cb.attachments));
+
+ SET_DYN_VALUE(dyn, CB_WRITE_MASKS,
+ cb.attachments[a].write_mask, pColorWriteMasks[i]);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
+ const float blendConstants[4])
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_ARRAY(dyn, CB_BLEND_CONSTANTS, cb.blend_constants,
+ 0, 4, blendConstants);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetColorBlendAdvancedEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstAttachment,
+ uint32_t attachmentCount,
+ const VkColorBlendAdvancedEXT* pColorBlendAdvanced)
+{
+ unreachable("VK_EXT_blend_operation_advanced unsupported");
+}
+
+void
+vk_cmd_set_cb_attachment_count(struct vk_command_buffer *cmd,
+ uint32_t attachment_count)
+{
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, CB_ATTACHMENT_COUNT, cb.attachment_count, attachment_count);
+}
+
+void
+vk_cmd_set_rp_attachments(struct vk_command_buffer *cmd,
+ enum vk_rp_attachment_flags attachments)
+{
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RP_ATTACHMENTS, rp.attachments, attachments);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDiscardRectangleEnableEXT(VkCommandBuffer commandBuffer,
+ VkBool32 discardRectangleEnable)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, DR_ENABLE, dr.enable, discardRectangleEnable);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDiscardRectangleModeEXT(VkCommandBuffer commandBuffer,
+ VkDiscardRectangleModeEXT discardRectangleMode)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, DR_MODE, dr.mode, discardRectangleMode);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetDepthBias2EXT(
+ VkCommandBuffer commandBuffer,
+ const VkDepthBiasInfoEXT* pDepthBiasInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.constant, pDepthBiasInfo->depthBiasConstantFactor);
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.clamp, pDepthBiasInfo->depthBiasClamp);
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.slope, pDepthBiasInfo->depthBiasSlopeFactor);
+
+ /** From the Vulkan 1.3.254 spec:
+ *
+ * "If pNext does not contain a VkDepthBiasRepresentationInfoEXT
+ * structure, then this command is equivalent to including a
+ * VkDepthBiasRepresentationInfoEXT with depthBiasExact set to VK_FALSE
+ * and depthBiasRepresentation set to
+ * VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORMAT_EXT."
+ */
+ const VkDepthBiasRepresentationInfoEXT *dbr_info =
+ vk_find_struct_const(pDepthBiasInfo->pNext, DEPTH_BIAS_REPRESENTATION_INFO_EXT);
+ if (dbr_info) {
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.representation, dbr_info->depthBiasRepresentation);
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.exact, dbr_info->depthBiasExact);
+ } else {
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.representation,
+ VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORMAT_EXT);
+ SET_DYN_VALUE(dyn, RS_DEPTH_BIAS_FACTORS,
+ rs.depth_bias.exact, false);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetRenderingAttachmentLocationsKHR(
+ VkCommandBuffer commandBuffer,
+ const VkRenderingAttachmentLocationInfoKHR* pLocationInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ assert(pLocationInfo->colorAttachmentCount <= MESA_VK_MAX_COLOR_ATTACHMENTS);
+ for (uint32_t i = 0; i < pLocationInfo->colorAttachmentCount; i++) {
+ uint8_t val =
+ pLocationInfo->pColorAttachmentLocations[i] == VK_ATTACHMENT_UNUSED ?
+ MESA_VK_ATTACHMENT_UNUSED : pLocationInfo->pColorAttachmentLocations[i];
+ SET_DYN_VALUE(dyn, COLOR_ATTACHMENT_MAP, cal.color_map[i], val);
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetRenderingInputAttachmentIndicesKHR(
+ VkCommandBuffer commandBuffer,
+ const VkRenderingInputAttachmentIndexInfoKHR* pLocationInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd, commandBuffer);
+ struct vk_dynamic_graphics_state *dyn = &cmd->dynamic_graphics_state;
+
+ assert(pLocationInfo->colorAttachmentCount <= MESA_VK_MAX_COLOR_ATTACHMENTS);
+ for (uint32_t i = 0; i < pLocationInfo->colorAttachmentCount; i++) {
+ uint8_t val =
+ pLocationInfo->pColorAttachmentInputIndices[i] == VK_ATTACHMENT_UNUSED ?
+ MESA_VK_ATTACHMENT_UNUSED : pLocationInfo->pColorAttachmentInputIndices[i];
+ SET_DYN_VALUE(dyn, INPUT_ATTACHMENT_MAP,
+ ial.color_map[i], val);
+ }
+
+ uint8_t depth_att =
+ (pLocationInfo->pDepthInputAttachmentIndex == NULL ||
+ *pLocationInfo->pDepthInputAttachmentIndex == VK_ATTACHMENT_UNUSED) ?
+ MESA_VK_ATTACHMENT_UNUSED : *pLocationInfo->pDepthInputAttachmentIndex;
+ uint8_t stencil_att =
+ (pLocationInfo->pStencilInputAttachmentIndex == NULL ||
+ *pLocationInfo->pStencilInputAttachmentIndex == VK_ATTACHMENT_UNUSED) ?
+ MESA_VK_ATTACHMENT_UNUSED : *pLocationInfo->pStencilInputAttachmentIndex;
+ SET_DYN_VALUE(dyn, INPUT_ATTACHMENT_MAP, ial.depth_att, depth_att);
+ SET_DYN_VALUE(dyn, INPUT_ATTACHMENT_MAP, ial.stencil_att, stencil_att);
+}
+
+/* These are stubs required by VK_EXT_shader_object */
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetViewportWScalingEnableNV(
+ VkCommandBuffer commandBuffer,
+ VkBool32 viewportWScalingEnable)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCoverageReductionModeNV(
+ VkCommandBuffer commandBuffer,
+ VkCoverageReductionModeNV coverageReductionMode)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCoverageToColorEnableNV(
+ VkCommandBuffer commandBuffer,
+ VkBool32 coverageToColorEnable)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCoverageToColorLocationNV(
+ VkCommandBuffer commandBuffer,
+ uint32_t coverageToColorLocation)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCoverageModulationModeNV(
+ VkCommandBuffer commandBuffer,
+ VkCoverageModulationModeNV coverageModulationMode)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCoverageModulationTableEnableNV(
+ VkCommandBuffer commandBuffer,
+ VkBool32 coverageModulationTableEnable)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetCoverageModulationTableNV(
+ VkCommandBuffer commandBuffer,
+ uint32_t coverageModulationTableCount,
+ const float* pCoverageModulationTable)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetRepresentativeFragmentTestEnableNV(
+ VkCommandBuffer commandBuffer,
+ VkBool32 representativeFragmentTestEnable)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetShadingRateImageEnableNV(
+ VkCommandBuffer commandBuffer,
+ VkBool32 shadingRateImageEnable)
+{
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetViewportSwizzleNV(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewportSwizzleNV* pViewportSwizzles)
+{
+}
+
+const char *
+vk_dynamic_graphic_state_to_str(enum mesa_vk_dynamic_graphics_state state)
+{
+#define NAME(name) \
+ case MESA_VK_DYNAMIC_##name: return #name
+
+ switch (state) {
+ NAME(VI);
+ NAME(VI_BINDINGS_VALID);
+ NAME(VI_BINDING_STRIDES);
+ NAME(IA_PRIMITIVE_TOPOLOGY);
+ NAME(IA_PRIMITIVE_RESTART_ENABLE);
+ NAME(TS_PATCH_CONTROL_POINTS);
+ NAME(TS_DOMAIN_ORIGIN);
+ NAME(VP_VIEWPORT_COUNT);
+ NAME(VP_VIEWPORTS);
+ NAME(VP_SCISSOR_COUNT);
+ NAME(VP_SCISSORS);
+ NAME(VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE);
+ NAME(DR_RECTANGLES);
+ NAME(DR_MODE);
+ NAME(DR_ENABLE);
+ NAME(RS_RASTERIZER_DISCARD_ENABLE);
+ NAME(RS_DEPTH_CLAMP_ENABLE);
+ NAME(RS_DEPTH_CLIP_ENABLE);
+ NAME(RS_POLYGON_MODE);
+ NAME(RS_CULL_MODE);
+ NAME(RS_FRONT_FACE);
+ NAME(RS_CONSERVATIVE_MODE);
+ NAME(RS_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE);
+ NAME(RS_RASTERIZATION_ORDER_AMD);
+ NAME(RS_PROVOKING_VERTEX);
+ NAME(RS_RASTERIZATION_STREAM);
+ NAME(RS_DEPTH_BIAS_ENABLE);
+ NAME(RS_DEPTH_BIAS_FACTORS);
+ NAME(RS_LINE_WIDTH);
+ NAME(RS_LINE_MODE);
+ NAME(RS_LINE_STIPPLE_ENABLE);
+ NAME(RS_LINE_STIPPLE);
+ NAME(FSR);
+ NAME(MS_RASTERIZATION_SAMPLES);
+ NAME(MS_SAMPLE_MASK);
+ NAME(MS_ALPHA_TO_COVERAGE_ENABLE);
+ NAME(MS_ALPHA_TO_ONE_ENABLE);
+ NAME(MS_SAMPLE_LOCATIONS_ENABLE);
+ NAME(MS_SAMPLE_LOCATIONS);
+ NAME(DS_DEPTH_TEST_ENABLE);
+ NAME(DS_DEPTH_WRITE_ENABLE);
+ NAME(DS_DEPTH_COMPARE_OP);
+ NAME(DS_DEPTH_BOUNDS_TEST_ENABLE);
+ NAME(DS_DEPTH_BOUNDS_TEST_BOUNDS);
+ NAME(DS_STENCIL_TEST_ENABLE);
+ NAME(DS_STENCIL_OP);
+ NAME(DS_STENCIL_COMPARE_MASK);
+ NAME(DS_STENCIL_WRITE_MASK);
+ NAME(DS_STENCIL_REFERENCE);
+ NAME(CB_LOGIC_OP_ENABLE);
+ NAME(CB_LOGIC_OP);
+ NAME(CB_ATTACHMENT_COUNT);
+ NAME(CB_COLOR_WRITE_ENABLES);
+ NAME(CB_BLEND_ENABLES);
+ NAME(CB_BLEND_EQUATIONS);
+ NAME(CB_WRITE_MASKS);
+ NAME(CB_BLEND_CONSTANTS);
+ NAME(ATTACHMENT_FEEDBACK_LOOP_ENABLE);
+ NAME(COLOR_ATTACHMENT_MAP);
+ default: unreachable("Invalid state");
+ }
+
+#undef NAME
+}
diff --git a/src/vulkan/runtime/vk_graphics_state.h b/src/vulkan/runtime/vk_graphics_state.h
new file mode 100644
index 00000000000..9fa4bf7b638
--- /dev/null
+++ b/src/vulkan/runtime/vk_graphics_state.h
@@ -0,0 +1,1274 @@
+/*
+ * Copyright © 2022 Collabora, Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_GRAPHICS_STATE_H
+#define VK_GRAPHICS_STATE_H
+
+#include "vulkan/vulkan_core.h"
+
+#include "vk_limits.h"
+
+#include "util/bitset.h"
+#include "util/enum_operators.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_command_buffer;
+struct vk_device;
+
+/** Enumeration of all Vulkan dynamic graphics states
+ *
+ * Enumerants are named with both the abreviation of the state group to which
+ * the state belongs as well as the name of the state itself. These are
+ * intended to pretty closely match the VkDynamicState enum but may not match
+ * perfectly all the time.
+ */
+enum mesa_vk_dynamic_graphics_state {
+ MESA_VK_DYNAMIC_VI,
+ MESA_VK_DYNAMIC_VI_BINDINGS_VALID,
+ MESA_VK_DYNAMIC_VI_BINDING_STRIDES,
+ MESA_VK_DYNAMIC_IA_PRIMITIVE_TOPOLOGY,
+ MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE,
+ MESA_VK_DYNAMIC_TS_PATCH_CONTROL_POINTS,
+ MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN,
+ MESA_VK_DYNAMIC_VP_VIEWPORT_COUNT,
+ MESA_VK_DYNAMIC_VP_VIEWPORTS,
+ MESA_VK_DYNAMIC_VP_SCISSOR_COUNT,
+ MESA_VK_DYNAMIC_VP_SCISSORS,
+ MESA_VK_DYNAMIC_VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE,
+ MESA_VK_DYNAMIC_DR_RECTANGLES,
+ MESA_VK_DYNAMIC_DR_MODE,
+ MESA_VK_DYNAMIC_DR_ENABLE,
+ MESA_VK_DYNAMIC_RS_RASTERIZER_DISCARD_ENABLE,
+ MESA_VK_DYNAMIC_RS_DEPTH_CLAMP_ENABLE,
+ MESA_VK_DYNAMIC_RS_DEPTH_CLIP_ENABLE,
+ MESA_VK_DYNAMIC_RS_POLYGON_MODE,
+ MESA_VK_DYNAMIC_RS_CULL_MODE,
+ MESA_VK_DYNAMIC_RS_FRONT_FACE,
+ MESA_VK_DYNAMIC_RS_CONSERVATIVE_MODE,
+ MESA_VK_DYNAMIC_RS_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE,
+ MESA_VK_DYNAMIC_RS_RASTERIZATION_ORDER_AMD,
+ MESA_VK_DYNAMIC_RS_PROVOKING_VERTEX,
+ MESA_VK_DYNAMIC_RS_RASTERIZATION_STREAM,
+ MESA_VK_DYNAMIC_RS_DEPTH_BIAS_ENABLE,
+ MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS,
+ MESA_VK_DYNAMIC_RS_LINE_WIDTH,
+ MESA_VK_DYNAMIC_RS_LINE_MODE,
+ MESA_VK_DYNAMIC_RS_LINE_STIPPLE_ENABLE,
+ MESA_VK_DYNAMIC_RS_LINE_STIPPLE,
+ MESA_VK_DYNAMIC_FSR,
+ MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES,
+ MESA_VK_DYNAMIC_MS_SAMPLE_MASK,
+ MESA_VK_DYNAMIC_MS_ALPHA_TO_COVERAGE_ENABLE,
+ MESA_VK_DYNAMIC_MS_ALPHA_TO_ONE_ENABLE,
+ MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS_ENABLE,
+ MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS,
+ MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE,
+ MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE,
+ MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP,
+ MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_ENABLE,
+ MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_BOUNDS,
+ MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE,
+ MESA_VK_DYNAMIC_DS_STENCIL_OP,
+ MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK,
+ MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK,
+ MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE,
+ MESA_VK_DYNAMIC_CB_LOGIC_OP_ENABLE,
+ MESA_VK_DYNAMIC_CB_LOGIC_OP,
+ MESA_VK_DYNAMIC_CB_ATTACHMENT_COUNT,
+ MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES,
+ MESA_VK_DYNAMIC_CB_BLEND_ENABLES,
+ MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS,
+ MESA_VK_DYNAMIC_CB_WRITE_MASKS,
+ MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS,
+ MESA_VK_DYNAMIC_RP_ATTACHMENTS,
+ MESA_VK_DYNAMIC_ATTACHMENT_FEEDBACK_LOOP_ENABLE,
+ MESA_VK_DYNAMIC_COLOR_ATTACHMENT_MAP,
+ MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP,
+
+ /* Must be left at the end */
+ MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX,
+};
+
+#define MESA_VK_ATTACHMENT_UNUSED (0xff)
+
+/** Populate a bitset with dynamic states
+ *
+ * This function maps a VkPipelineDynamicStateCreateInfo to a bitset indexed
+ * by mesa_vk_dynamic_graphics_state enumerants.
+ *
+ * :param dynamic: |out| Bitset to populate
+ * :param info: |in| VkPipelineDynamicStateCreateInfo or NULL
+ */
+void
+vk_get_dynamic_graphics_states(BITSET_WORD *dynamic,
+ const VkPipelineDynamicStateCreateInfo *info);
+
+/***/
+struct vk_vertex_binding_state {
+ /** VkVertexInputBindingDescription::stride */
+ uint16_t stride;
+
+ /** VkVertexInputBindingDescription::inputRate */
+ uint16_t input_rate;
+
+ /** VkVertexInputBindingDivisorDescriptionKHR::divisor or 1 */
+ uint32_t divisor;
+};
+
+/***/
+struct vk_vertex_attribute_state {
+ /** VkVertexInputAttributeDescription::binding */
+ uint32_t binding;
+
+ /** VkVertexInputAttributeDescription::format */
+ VkFormat format;
+
+ /** VkVertexInputAttributeDescription::offset */
+ uint32_t offset;
+};
+
+/***/
+struct vk_vertex_input_state {
+ /** Bitset of which bindings are valid, indexed by binding */
+ uint32_t bindings_valid;
+ struct vk_vertex_binding_state bindings[MESA_VK_MAX_VERTEX_BINDINGS];
+
+ /** Bitset of which attributes are valid, indexed by location */
+ uint32_t attributes_valid;
+ struct vk_vertex_attribute_state attributes[MESA_VK_MAX_VERTEX_ATTRIBUTES];
+};
+
+/***/
+struct vk_input_assembly_state {
+ /** VkPipelineInputAssemblyStateCreateInfo::topology
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_IA_PRIMITIVE_TOPOLOGY
+ */
+ uint8_t primitive_topology;
+
+ /** VkPipelineInputAssemblyStateCreateInfo::primitiveRestartEnable
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_IA_PRIMITIVE_RESTART_ENABLE
+ */
+ bool primitive_restart_enable;
+};
+
+/***/
+struct vk_tessellation_state {
+ /** VkPipelineTessellationStateCreateInfo::patchControlPoints
+ *
+ * MESA_VK_DYNAMIC_TS_PATCH_CONTROL_POINTS
+ */
+ uint8_t patch_control_points;
+
+ /** VkPipelineTessellationDomainOriginStateCreateInfo::domainOrigin
+ *
+ * MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN
+ */
+ uint8_t domain_origin;
+};
+
+/***/
+struct vk_viewport_state {
+ /** VkPipelineViewportDepthClipControlCreateInfoEXT::negativeOneToOne
+ */
+ bool depth_clip_negative_one_to_one;
+
+ /** VkPipelineViewportStateCreateInfo::viewportCount
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VP_VIEWPORT_COUNT
+ */
+ uint8_t viewport_count;
+
+ /** VkPipelineViewportStateCreateInfo::scissorCount
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VP_SCISSOR_COUNT
+ */
+ uint8_t scissor_count;
+
+ /** VkPipelineViewportStateCreateInfo::pViewports
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VP_VIEWPORTS
+ */
+ VkViewport viewports[MESA_VK_MAX_VIEWPORTS];
+
+ /** VkPipelineViewportStateCreateInfo::pScissors
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VP_SCISSORS
+ */
+ VkRect2D scissors[MESA_VK_MAX_SCISSORS];
+};
+
+/***/
+struct vk_discard_rectangles_state {
+ /** VkPipelineDiscardRectangleStateCreateInfoEXT::discardRectangleMode */
+ VkDiscardRectangleModeEXT mode;
+
+ /** VkPipelineDiscardRectangleStateCreateInfoEXT::discardRectangleCount */
+ uint32_t rectangle_count;
+
+ /** VkPipelineDiscardRectangleStateCreateInfoEXT::pDiscardRectangles */
+ VkRect2D rectangles[MESA_VK_MAX_DISCARD_RECTANGLES];
+};
+
+enum ENUM_PACKED vk_mesa_depth_clip_enable {
+ /** Depth clipping should be disabled */
+ VK_MESA_DEPTH_CLIP_ENABLE_FALSE = 0,
+
+ /** Depth clipping should be enabled */
+ VK_MESA_DEPTH_CLIP_ENABLE_TRUE = 1,
+
+ /** Depth clipping should be enabled iff depth clamping is disabled */
+ VK_MESA_DEPTH_CLIP_ENABLE_NOT_CLAMP,
+};
+
+/***/
+struct vk_rasterization_state {
+ /** VkPipelineRasterizationStateCreateInfo::rasterizerDiscardEnable
+ *
+ * This will be false if rasterizer discard is dynamic
+ *
+ * MESA_VK_DYNAMIC_RS_RASTERIZER_DISCARD_ENABLE
+ */
+ bool rasterizer_discard_enable;
+
+ /** VkPipelineRasterizationStateCreateInfo::depthClampEnable
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_CLAMP_ENABLE
+ */
+ bool depth_clamp_enable;
+
+ /** VkPipelineRasterizationDepthClipStateCreateInfoEXT::depthClipEnable
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_CLIP_ENABLE
+ */
+ enum vk_mesa_depth_clip_enable depth_clip_enable;
+
+ /** VkPipelineRasterizationStateCreateInfo::polygonMode
+ *
+ * MESA_VK_DYNAMIC_RS_POLYGON_MODE_ENABLEDEPTH_CLIP_ENABLE
+ */
+ VkPolygonMode polygon_mode;
+
+ /** VkPipelineRasterizationStateCreateInfo::cullMode
+ *
+ * MESA_VK_DYNAMIC_RS_CULL_MODE
+ */
+ VkCullModeFlags cull_mode;
+
+ /** VkPipelineRasterizationStateCreateInfo::frontFace
+ *
+ * MESA_VK_DYNAMIC_RS_FRONT_FACE
+ */
+ VkFrontFace front_face;
+
+ /** VkPipelineRasterizationConservativeStateCreateInfoEXT::conservativeRasterizationMode
+ *
+ * MESA_VK_DYNAMIC_RS_CONSERVATIVE_MODE
+ */
+ VkConservativeRasterizationModeEXT conservative_mode;
+
+ /** VkPipelineRasterizationConservativeStateCreateInfoEXT::extraPrimitiveOverestimationSize
+ *
+ * MESA_VK_DYNAMIC_RS_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE
+ */
+ float extra_primitive_overestimation_size;
+
+ /** VkPipelineRasterizationStateRasterizationOrderAMD::rasterizationOrder */
+ VkRasterizationOrderAMD rasterization_order_amd;
+
+ /** VkPipelineRasterizationProvokingVertexStateCreateInfoEXT::provokingVertexMode
+ *
+ * MESA_VK_DYNAMIC_RS_PROVOKING_VERTEX
+ */
+ VkProvokingVertexModeEXT provoking_vertex;
+
+ /** VkPipelineRasterizationStateStreamCreateInfoEXT::rasterizationStream
+ *
+ * MESA_VK_DYNAMIC_RS_RASTERIZATION_STREAM
+ */
+ uint32_t rasterization_stream;
+
+ struct {
+ /** VkPipelineRasterizationStateCreateInfo::depthBiasEnable
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_BIAS_ENABLE
+ */
+ bool enable;
+
+ /** VkPipelineRasterizationStateCreateInfo::depthBiasConstantFactor
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS
+ */
+ float constant;
+
+ /** VkPipelineRasterizationStateCreateInfo::depthBiasClamp
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS
+ */
+ float clamp;
+
+ /** VkPipelineRasterizationStateCreateInfo::depthBiasSlopeFactor
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS
+ */
+ float slope;
+
+ /** VkDepthBiasRepresentationInfoEXT::depthBiasRepresentation
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS
+ */
+ VkDepthBiasRepresentationEXT representation;
+
+ /** VkDepthBiasRepresentationInfoEXT::depthBiasExact
+ *
+ * MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS
+ */
+ bool exact;
+ } depth_bias;
+
+ struct {
+ /** VkPipelineRasterizationStateCreateInfo::lineWidth
+ *
+ * MESA_VK_DYNAMIC_RS_LINE_WIDTH
+ */
+ float width;
+
+ /** VkPipelineRasterizationLineStateCreateInfoKHR::lineRasterizationMode
+ *
+ * Will be set to VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR if
+ * VkPipelineRasterizationLineStateCreateInfoKHR is not provided.
+ *
+ * MESA_VK_DYNAMIC_RS_LINE_MODE
+ */
+ VkLineRasterizationModeKHR mode;
+
+ struct {
+ /** VkPipelineRasterizationLineStateCreateInfoKHR::stippledLineEnable
+ *
+ * MESA_VK_DYNAMIC_RS_LINE_STIPPLE_ENABLE
+ */
+ bool enable;
+
+ /** VkPipelineRasterizationLineStateCreateInfoKHR::lineStippleFactor
+ *
+ * MESA_VK_DYNAMIC_RS_LINE_STIPPLE
+ */
+ uint32_t factor;
+
+ /** VkPipelineRasterizationLineStateCreateInfoKHR::lineStipplePattern
+ *
+ * MESA_VK_DYNAMIC_RS_LINE_STIPPLE
+ */
+ uint16_t pattern;
+ } stipple;
+ } line;
+};
+
+static inline bool
+vk_rasterization_state_depth_clip_enable(const struct vk_rasterization_state *rs)
+{
+ switch (rs->depth_clip_enable) {
+ case VK_MESA_DEPTH_CLIP_ENABLE_FALSE: return false;
+ case VK_MESA_DEPTH_CLIP_ENABLE_TRUE: return true;
+ case VK_MESA_DEPTH_CLIP_ENABLE_NOT_CLAMP: return !rs->depth_clamp_enable;
+ }
+ unreachable("Invalid depth clip enable");
+}
+
+/***/
+struct vk_fragment_shading_rate_state {
+ /** VkPipelineFragmentShadingRateStateCreateInfoKHR::fragmentSize
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_FSR
+ */
+ VkExtent2D fragment_size;
+
+ /** VkPipelineFragmentShadingRateStateCreateInfoKHR::combinerOps
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_FSR
+ */
+ VkFragmentShadingRateCombinerOpKHR combiner_ops[2];
+};
+
+/***/
+struct vk_sample_locations_state {
+ /** VkSampleLocationsInfoEXT::sampleLocationsPerPixel */
+ VkSampleCountFlagBits per_pixel;
+
+ /** VkSampleLocationsInfoEXT::sampleLocationGridSize */
+ VkExtent2D grid_size;
+
+ /** VkSampleLocationsInfoEXT::sampleLocations */
+ VkSampleLocationEXT locations[MESA_VK_MAX_SAMPLE_LOCATIONS];
+};
+
+/***/
+struct vk_multisample_state {
+ /** VkPipelineMultisampleStateCreateInfo::rasterizationSamples */
+ VkSampleCountFlagBits rasterization_samples;
+
+ /** VkPipelineMultisampleStateCreateInfo::sampleShadingEnable */
+ bool sample_shading_enable;
+
+ /** VkPipelineMultisampleStateCreateInfo::minSampleShading */
+ float min_sample_shading;
+
+ /** VkPipelineMultisampleStateCreateInfo::pSampleMask */
+ uint16_t sample_mask;
+
+ /** VkPipelineMultisampleStateCreateInfo::alphaToCoverageEnable */
+ bool alpha_to_coverage_enable;
+
+ /** VkPipelineMultisampleStateCreateInfo::alphaToOneEnable */
+ bool alpha_to_one_enable;
+
+ /** VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsEnable
+ *
+ * This will be true if sample locations enable dynamic.
+ */
+ bool sample_locations_enable;
+
+ /** VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo
+ *
+ * May be NULL for dynamic sample locations.
+ */
+ const struct vk_sample_locations_state *sample_locations;
+};
+
+/** Represents the stencil test state for a face */
+struct vk_stencil_test_face_state {
+ /*
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_STENCIL_OP
+ */
+ struct {
+ /** VkStencilOpState::failOp */
+ uint8_t fail;
+
+ /** VkStencilOpState::passOp */
+ uint8_t pass;
+
+ /** VkStencilOpState::depthFailOp */
+ uint8_t depth_fail;
+
+ /** VkStencilOpState::compareOp */
+ uint8_t compare;
+ } op;
+
+ /** VkStencilOpState::compareMask
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_STENCIL_COMPARE_MASK
+ */
+ uint8_t compare_mask;
+
+ /** VkStencilOpState::writeMask
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_STENCIL_WRITE_MASK
+ */
+ uint8_t write_mask;
+
+ /** VkStencilOpState::reference
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_STENCIL_REFERENCE
+ */
+ uint8_t reference;
+};
+
+/***/
+struct vk_depth_stencil_state {
+ struct {
+ /** VkPipelineDepthStencilStateCreateInfo::depthTestEnable
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_DEPTH_TEST_ENABLE
+ */
+ bool test_enable;
+
+ /** VkPipelineDepthStencilStateCreateInfo::depthWriteEnable
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_DEPTH_WRITE_ENABLE
+ */
+ bool write_enable;
+
+ /** VkPipelineDepthStencilStateCreateInfo::depthCompareOp
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_DEPTH_COMPARE_OP
+ */
+ VkCompareOp compare_op;
+
+ struct {
+ /** VkPipelineDepthStencilStateCreateInfo::depthBoundsTestEnable
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_DEPTH_BOUNDS_TEST_ENABLE
+ */
+ bool enable;
+
+ /** VkPipelineDepthStencilStateCreateInfo::min/maxDepthBounds
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_DEPTH_BOUNDS_TEST_BOUNDS
+ */
+ float min, max;
+ } bounds_test;
+ } depth;
+
+ struct {
+ /** VkPipelineDepthStencilStateCreateInfo::stencilTestEnable
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DS_STENCIL_TEST_ENABLE
+ */
+ bool test_enable;
+
+ /** Whether or not stencil is should be written
+ *
+ * This does not map directly to any particular Vulkan API state and is
+ * initialized to true. If independent stencil disable ever becomes a
+ * thing, it will use this state. vk_optimize_depth_stencil_state() may
+ * set this to false if it can prove that the stencil test will never
+ * alter the stencil value.
+ */
+ bool write_enable;
+
+ /** VkPipelineDepthStencilStateCreateInfo::front */
+ struct vk_stencil_test_face_state front;
+
+ /** VkPipelineDepthStencilStateCreateInfo::back */
+ struct vk_stencil_test_face_state back;
+ } stencil;
+};
+
+/** Optimize a depth/stencil state
+ *
+ * The way depth and stencil testing is specified, there are many case where,
+ * regardless of depth/stencil writes being enabled, nothing actually gets
+ * written due to some other bit of state being set. In the presence of
+ * discards, it's fairly easy to get into cases where early depth/stencil
+ * testing is disabled on some hardware, leading to a fairly big performance
+ * hit. This function attempts to optimize the depth stencil state and
+ * disable writes and sometimes even testing whenever possible.
+ *
+ * :param ds: |inout| The depth stencil state to optimize
+ * :param ds_aspects: |in| Which image aspects are present in the
+ * render pass.
+ * :param consider_write_mask: |in| If true, the write mask will be taken
+ * into account when optimizing. If
+ * false, it will be ignored.
+ */
+void vk_optimize_depth_stencil_state(struct vk_depth_stencil_state *ds,
+ VkImageAspectFlags ds_aspects,
+ bool consider_write_mask);
+
+struct vk_color_blend_attachment_state {
+ /** VkPipelineColorBlendAttachmentState::blendEnable
+ *
+ * This will be true if blend enables are dynamic
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_ENABLES
+ */
+ bool blend_enable;
+
+ /** VkPipelineColorBlendAttachmentState::srcColorBlendFactor
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS
+ */
+ uint8_t src_color_blend_factor;
+
+ /** VkPipelineColorBlendAttachmentState::dstColorBlendFactor
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS
+ */
+ uint8_t dst_color_blend_factor;
+
+ /** VkPipelineColorBlendAttachmentState::srcAlphaBlendFactor
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS
+ */
+ uint8_t src_alpha_blend_factor;
+
+ /** VkPipelineColorBlendAttachmentState::dstAlphaBlendFactor
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS
+ */
+ uint8_t dst_alpha_blend_factor;
+
+ /** VkPipelineColorBlendAttachmentState::colorWriteMask
+ *
+ * MESA_VK_DYNAMIC_CB_WRITE_MASKS
+ */
+ uint8_t write_mask;
+
+ /** VkPipelineColorBlendAttachmentState::colorBlendOp
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS
+ */
+ VkBlendOp color_blend_op;
+
+ /** VkPipelineColorBlendAttachmentState::alphaBlendOp
+ *
+ * MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS
+ */
+ VkBlendOp alpha_blend_op;
+};
+
+/***/
+struct vk_color_blend_state {
+ /** VkPipelineColorBlendStateCreateInfo::logicOpEnable
+ *
+ * MESA_VK_DYNAMIC_CB_LOGIC_OP_ENABLE,
+ */
+ bool logic_op_enable;
+
+ /** VkPipelineColorBlendStateCreateInfo::logicOp
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_CB_LOGIC_OP,
+ */
+ uint8_t logic_op;
+
+ /** VkPipelineColorBlendStateCreateInfo::attachmentCount
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_CB_ATTACHMENT_COUNT,
+ */
+ uint8_t attachment_count;
+
+ /** VkPipelineColorWriteCreateInfoEXT::pColorWriteEnables
+ *
+ * Bitmask of color write enables, indexed by color attachment index.
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_CB_COLOR_WRITE_ENABLES,
+ */
+ uint8_t color_write_enables;
+
+ /** VkPipelineColorBlendStateCreateInfo::pAttachments */
+ struct vk_color_blend_attachment_state attachments[MESA_VK_MAX_COLOR_ATTACHMENTS];
+
+ /** VkPipelineColorBlendStateCreateInfo::blendConstants
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_CB_BLEND_CONSTANTS,
+ */
+ float blend_constants[4];
+};
+
+enum vk_rp_attachment_flags {
+ MESA_VK_RP_ATTACHMENT_NONE = 0,
+
+ MESA_VK_RP_ATTACHMENT_COLOR_0_BIT = (1 << 0),
+ MESA_VK_RP_ATTACHMENT_COLOR_1_BIT = (1 << 1),
+ MESA_VK_RP_ATTACHMENT_COLOR_2_BIT = (1 << 2),
+ MESA_VK_RP_ATTACHMENT_COLOR_3_BIT = (1 << 3),
+ MESA_VK_RP_ATTACHMENT_COLOR_4_BIT = (1 << 4),
+ MESA_VK_RP_ATTACHMENT_COLOR_5_BIT = (1 << 5),
+ MESA_VK_RP_ATTACHMENT_COLOR_6_BIT = (1 << 6),
+ MESA_VK_RP_ATTACHMENT_COLOR_7_BIT = (1 << 7),
+ MESA_VK_RP_ATTACHMENT_ANY_COLOR_BITS = 0xff,
+
+ MESA_VK_RP_ATTACHMENT_DEPTH_BIT = (1 << 8),
+ MESA_VK_RP_ATTACHMENT_STENCIL_BIT = (1 << 9),
+
+ MESA_VK_RP_ATTACHMENT_INFO_INVALID = 0xffff,
+};
+MESA_DEFINE_CPP_ENUM_BITFIELD_OPERATORS(vk_rp_attachment_flags)
+static_assert(MESA_VK_MAX_COLOR_ATTACHMENTS == 8,
+ "This enum must match the global runtime limit");
+
+#define MESA_VK_RP_ATTACHMENT_COLOR_BIT(n) \
+ ((enum vk_rp_attachment_flags)(MESA_VK_RP_ATTACHMENT_COLOR_0_BIT << (n)))
+
+/***/
+struct vk_input_attachment_location_state {
+ /** VkRenderingInputAttachmentIndexInfoKHR::pColorAttachmentLocations
+ *
+ * MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP
+ */
+ uint8_t color_map[MESA_VK_MAX_COLOR_ATTACHMENTS];
+
+ /** VkRenderingInputAttachmentIndexInfoKHR::pDepthInputAttachmentIndex
+ *
+ * MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP
+ */
+ uint8_t depth_att;
+
+ /** VkRenderingInputAttachmentIndexInfoKHR::pStencilInputAttachmentIndex
+ *
+ * MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP
+ */
+ uint8_t stencil_att;
+};
+
+/***/
+struct vk_color_attachment_location_state {
+ /** VkRenderingAttachmentLocationInfoKHR::pColorAttachmentLocations
+ *
+ * MESA_VK_DYNAMIC_COLOR_ATTACHMENT_MAP
+ */
+ uint8_t color_map[MESA_VK_MAX_COLOR_ATTACHMENTS];
+};
+
+/***/
+struct vk_render_pass_state {
+ /** Set of image aspects bound as color/depth/stencil attachments
+ *
+ * Set to MESA_VK_RP_ATTACHMENT_INFO_INVALID to indicate that attachment
+ * info is invalid.
+ */
+ enum vk_rp_attachment_flags attachments;
+
+ /** VkPipelineRenderingCreateInfo::viewMask */
+ uint32_t view_mask;
+
+ /** VkPipelineRenderingCreateInfo::colorAttachmentCount */
+ uint8_t color_attachment_count;
+
+ /** VkPipelineRenderingCreateInfo::pColorAttachmentFormats */
+ VkFormat color_attachment_formats[MESA_VK_MAX_COLOR_ATTACHMENTS];
+
+ /** VkPipelineRenderingCreateInfo::depthAttachmentFormat */
+ VkFormat depth_attachment_format;
+
+ /** VkPipelineRenderingCreateInfo::stencilAttachmentFormat */
+ VkFormat stencil_attachment_format;
+
+ /** VkAttachmentSampleCountInfoAMD::pColorAttachmentSamples */
+ uint8_t color_attachment_samples[MESA_VK_MAX_COLOR_ATTACHMENTS];
+
+ /** VkAttachmentSampleCountInfoAMD::depthStencilAttachmentSamples */
+ uint8_t depth_stencil_attachment_samples;
+};
+
+static inline bool
+vk_render_pass_state_has_attachment_info(const struct vk_render_pass_state *rp)
+{
+ return rp->attachments != MESA_VK_RP_ATTACHMENT_INFO_INVALID;
+}
+
+static inline VkImageAspectFlags
+vk_pipeline_flags_feedback_loops(VkPipelineCreateFlags2KHR flags)
+{
+ VkImageAspectFlags feedback_loops = 0;
+ if (flags &
+ VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT)
+ feedback_loops |= VK_IMAGE_ASPECT_COLOR_BIT;
+ if (flags &
+ VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT)
+ feedback_loops |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ return feedback_loops;
+}
+
+/** Struct representing all dynamic graphics state
+ *
+ * Before invoking any core functions, the driver must properly populate
+ * initialize this struct:
+ *
+ * - Initialize using vk_default_dynamic_graphics_state, if desired
+ * - Set vi to a driver-allocated vk_vertex_input_state struct
+ * - Set ms.sample_locations to a driver-allocated
+ * vk_sample_locations_state struct
+ */
+struct vk_dynamic_graphics_state {
+ /** Vertex input state
+ *
+ * Must be provided by the driver if VK_EXT_vertex_input_dynamic_state is
+ * supported.
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VI
+ */
+ struct vk_vertex_input_state *vi;
+
+ /* This is a copy of vi->bindings_valid, used when the vertex input state
+ * is precompiled in the pipeline (so that vi is NULL) but the strides are
+ * set dynamically.
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VI_BINDINGS_VALID
+ */
+ uint32_t vi_bindings_valid;
+
+ /** Vertex binding strides
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_VI_BINDING_STRIDES
+ */
+ uint16_t vi_binding_strides[MESA_VK_MAX_VERTEX_BINDINGS];
+
+ /** Input assembly state */
+ struct vk_input_assembly_state ia;
+
+ /** Tessellation state */
+ struct vk_tessellation_state ts;
+
+ /** Viewport state */
+ struct vk_viewport_state vp;
+
+ /** Discard rectangles state */
+ struct {
+ /** Custom enable
+ *
+ * MESA_VK_DYNAMIC_DR_ENABLE
+ */
+ bool enable;
+
+ /** Mode
+ *
+ * MESA_VK_DYNAMIC_DR_MODE
+ */
+ VkDiscardRectangleModeEXT mode;
+
+ /** Rectangles
+ *
+ * MESA_VK_DYNAMIC_DR_RECTANGLES
+ */
+ VkRect2D rectangles[MESA_VK_MAX_DISCARD_RECTANGLES];
+
+ /** Number of rectangles
+ *
+ * MESA_VK_DYNAMIC_GRAPHICS_STATE_DR_RECTANGLES
+ */
+ uint32_t rectangle_count;
+ } dr;
+
+ /** Rasterization state */
+ struct vk_rasterization_state rs;
+
+ /* Fragment shading rate state */
+ struct vk_fragment_shading_rate_state fsr;
+
+ /** Multisample state */
+ struct {
+ /** Rasterization samples
+ *
+ * MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES
+ */
+ VkSampleCountFlagBits rasterization_samples;
+
+ /** Sample mask
+ *
+ * MESA_VK_DYNAMIC_MS_SAMPLE_MASK
+ */
+ uint16_t sample_mask;
+
+ /** Alpha to coverage enable
+ *
+ * MESA_VK_DYNAMIC_MS_ALPHA_TO_CONVERAGE_ENABLE
+ */
+ bool alpha_to_coverage_enable;
+
+ /** Alpha to one enable
+ *
+ * MESA_VK_DYNAMIC_MS_ALPHA_TO_ONE_ENABLE
+ */
+ bool alpha_to_one_enable;
+
+ /** Custom sample locations enable
+ *
+ * MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS_ENABLE
+ */
+ bool sample_locations_enable;
+
+ /** Sample locations
+ *
+ * Must be provided by the driver if VK_EXT_sample_locations is
+ * supported.
+ *
+ * MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS
+ */
+ struct vk_sample_locations_state *sample_locations;
+ } ms;
+
+ /** Depth stencil state */
+ struct vk_depth_stencil_state ds;
+
+ /** Color blend state */
+ struct vk_color_blend_state cb;
+
+ struct {
+ enum vk_rp_attachment_flags attachments;
+ } rp;
+
+ /** MESA_VK_DYNAMIC_ATTACHMENT_FEEDBACK_LOOP_ENABLE */
+ VkImageAspectFlags feedback_loops;
+
+ /** MESA_VK_DYNAMIC_INPUT_ATTACHMENT_MAP */
+ struct vk_input_attachment_location_state ial;
+
+ /** MESA_VK_DYNAMIC_COLOR_ATTACHMENT_MAP */
+ struct vk_color_attachment_location_state cal;
+
+ /** For pipelines, which bits of dynamic state are set */
+ BITSET_DECLARE(set, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+
+ /** For command buffers, which bits of dynamic state have changed */
+ BITSET_DECLARE(dirty, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+};
+
+/***/
+struct vk_graphics_pipeline_all_state {
+ struct vk_vertex_input_state vi;
+ struct vk_input_assembly_state ia;
+ struct vk_tessellation_state ts;
+ struct vk_viewport_state vp;
+ struct vk_discard_rectangles_state dr;
+ struct vk_rasterization_state rs;
+ struct vk_fragment_shading_rate_state fsr;
+ struct vk_multisample_state ms;
+ struct vk_sample_locations_state ms_sample_locations;
+ struct vk_depth_stencil_state ds;
+ struct vk_color_blend_state cb;
+ struct vk_input_attachment_location_state ial;
+ struct vk_color_attachment_location_state cal;
+ struct vk_render_pass_state rp;
+};
+
+/***/
+struct vk_graphics_pipeline_state {
+ /** Bitset of which states are dynamic */
+ BITSET_DECLARE(dynamic, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+
+ VkShaderStageFlags shader_stages;
+
+ /** Flags from VkGraphicsPipelineCreateInfo::flags that are considered part
+ * of a stage and need to be merged when linking libraries.
+ *
+ * For drivers which use vk_render_pass, this will also include flags
+ * generated based on subpass self-dependencies and fragment density map.
+ */
+ VkPipelineCreateFlags2KHR pipeline_flags;
+
+ /* True if there are feedback loops that do not involve input attachments
+ * managed by the driver. This is set to true by the runtime if there
+ * are loops indicated by a pipeline flag (which may involve any image
+ * rather than only input attachments under the control of the driver) or
+ * there was no driver-provided render pass info struct (because input
+ * attachments for emulated renderpasses cannot be managed by the driver).
+ */
+ bool feedback_loop_not_input_only;
+
+ /** Vertex input state */
+ const struct vk_vertex_input_state *vi;
+
+ /** Input assembly state */
+ const struct vk_input_assembly_state *ia;
+
+ /** Tessellation state */
+ const struct vk_tessellation_state *ts;
+
+ /** Viewport state */
+ const struct vk_viewport_state *vp;
+
+ /** Discard Rectangles state */
+ const struct vk_discard_rectangles_state *dr;
+
+ /** Rasterization state */
+ const struct vk_rasterization_state *rs;
+
+ /** Fragment shading rate state */
+ const struct vk_fragment_shading_rate_state *fsr;
+
+ /** Multiesample state */
+ const struct vk_multisample_state *ms;
+
+ /** Depth stencil state */
+ const struct vk_depth_stencil_state *ds;
+
+ /** Color blend state */
+ const struct vk_color_blend_state *cb;
+
+ /** Input attachment mapping state */
+ const struct vk_input_attachment_location_state *ial;
+
+ /** Color attachment mapping state */
+ const struct vk_color_attachment_location_state *cal;
+
+ /** Render pass state */
+ const struct vk_render_pass_state *rp;
+};
+
+/** Populate a vk_graphics_pipeline_state from VkGraphicsPipelineCreateInfo
+ *
+ * This function crawls the provided VkGraphicsPipelineCreateInfo and uses it
+ * to populate the vk_graphics_pipeline_state. Upon returning from this
+ * function, all pointers in `state` will either be `NULL` or point to a valid
+ * sub-state structure. Whenever an extension struct is missing, a reasonable
+ * default value is provided whenever possible. Some states may be left NULL
+ * if the state does not exist (such as when rasterizer discard is enabled) or
+ * if all of the corresponding states are dynamic.
+ *
+ * This function assumes that the vk_graphics_pipeline_state is already valid
+ * (i.e., all pointers are NULL or point to valid states). Any states already
+ * present are assumed to be identical to how we would populate them from
+ * VkGraphicsPipelineCreateInfo.
+ *
+ * This function can operate in one of two modes with respect to how the
+ * memory for states is allocated. If a `vk_graphics_pipeline_all_state`
+ * struct is provided, any newly populated states will point to the relevant
+ * field in `all`. If `all == NULL`, it attempts to dynamically allocate any
+ * newly required states using the provided allocator and scope. The pointer
+ * to this new blob of memory is returned via `alloc_ptr_out` and must
+ * eventually be freed by the driver.
+ *
+ * :param device: |in| The Vulkan device
+ * :param state: |out| The graphics pipeline state to populate
+ * :param info: |in| The pCreateInfo from vkCreateGraphicsPipelines
+ * :param driver_rp: |in| Renderpass state if the driver implements render
+ * passes itself. This should be NULL for drivers
+ * that use the common render pass infrastructure
+ * built on top of dynamic rendering.
+ * :param driver_rp_flags: |in| Pipeline create flags implied by the
+ * renderpass or subpass if the driver implements
+ * render passes itself. This is only used if
+ * driver_rp is non-NULL.
+ * :param all: |in| The vk_graphics_pipeline_all_state to use to
+ * back any newly needed states. If NULL, newly
+ * needed states will be dynamically allocated
+ * instead.
+ * :param alloc: |in| Allocation callbacks for dynamically allocating
+ * new state memory.
+ * :param scope: |in| Allocation scope for dynamically allocating new
+ * state memory.
+ * :param alloc_ptr_out: |out| Will be populated with a pointer to any newly
+ * allocated state. The driver is responsible for
+ * freeing this pointer.
+ */
+VkResult
+vk_graphics_pipeline_state_fill(const struct vk_device *device,
+ struct vk_graphics_pipeline_state *state,
+ const VkGraphicsPipelineCreateInfo *info,
+ const struct vk_render_pass_state *driver_rp,
+ VkPipelineCreateFlags2KHR driver_rp_flags,
+ struct vk_graphics_pipeline_all_state *all,
+ const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ void **alloc_ptr_out);
+
+/** Populate a vk_graphics_pipeline_state from another one.
+ *
+ * This allocates space for graphics pipeline state and copies it from another
+ * pipeline state. It ignores state in `old_state` which is not set and does
+ * not allocate memory if the entire group is unused. The intended use-case is
+ * for drivers that may be able to precompile some state ahead of time, to
+ * avoid allocating memory for it in pipeline libraries. The workflow looks
+ * something like this:
+ *
+ * struct vk_graphics_pipeline_all_state all;
+ * struct vk_graphics_pipeline_state state;
+ * vk_graphics_pipeline_state_fill(dev, &state, ..., &all, NULL, 0, NULL);
+ *
+ * ...
+ *
+ * BITSET_DECLARE(set_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ * vk_graphics_pipeline_get_state(&state, &set_state);
+ *
+ * ...
+ *
+ * if (BITSET_TEST(set_state, MESA_VK_DYNAMIC_FOO)) {
+ * emit_foo(&state.foo, ...);
+ * BITSET_SET(state.dynamic, MESA_VK_DYNAMIC_FOO);
+ * }
+ *
+ * ...
+ *
+ * if (pipeline->is_library) {
+ * library = pipeline_to_library(pipeline);
+ * vk_graphics_pipeline_state_copy(dev, &library->state, &state, ...);
+ * }
+ *
+ * In this case we will avoid allocating memory for `library->state.foo`.
+ *
+ * :param device: |in| The Vulkan device
+ * :param state: |out| The graphics pipeline state to populate
+ * :param old_state: |in| The graphics pipeline state to copy from
+ * :param alloc: |in| Allocation callbacks for dynamically allocating
+ * new state memory.
+ * :param scope: |in| Allocation scope for dynamically allocating new
+ * state memory.
+ * :param alloc_ptr_out: |out| Will be populated with a pointer to any newly
+ * allocated state. The driver is responsible for
+ * freeing this pointer.
+ */
+VkResult
+vk_graphics_pipeline_state_copy(const struct vk_device *device,
+ struct vk_graphics_pipeline_state *state,
+ const struct vk_graphics_pipeline_state *old_state,
+ const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ void **alloc_ptr_out);
+
+/** Merge one vk_graphics_pipeline_state into another
+ *
+ * Both the destination and source states are assumed to be valid (i.e., all
+ * pointers are NULL or point to valid states). Any states which exist in
+ * both are expected to be identical and the state already in dst is used.
+ * The only exception here is render pass state which may be only partially
+ * defined in which case the fully defined one (if any) is used.
+ *
+ * :param dst: |out| The destination state. When the function returns, this
+ * will be the union of the original dst and src.
+ * :param src: |in| The source state
+ */
+void
+vk_graphics_pipeline_state_merge(struct vk_graphics_pipeline_state *dst,
+ const struct vk_graphics_pipeline_state *src);
+
+/** Get the states which will be set for a given vk_graphics_pipeline_state
+ *
+ * Return which states should be set when the pipeline is bound.
+ */
+void
+vk_graphics_pipeline_get_state(const struct vk_graphics_pipeline_state *state,
+ BITSET_WORD *set_state_out);
+
+/** Initialize a vk_dynamic_graphics_state with defaults
+ *
+ * :param dyn: |out| Dynamic graphics state to initizlie
+ */
+void
+vk_dynamic_graphics_state_init(struct vk_dynamic_graphics_state *dyn);
+
+/** Clear a vk_dynamic_graphics_state to defaults
+ *
+ * :param dyn: |out| Dynamic graphics state to initizlie
+ */
+void
+vk_dynamic_graphics_state_clear(struct vk_dynamic_graphics_state *dyn);
+
+/** Initialize a vk_dynamic_graphics_state for a pipeline
+ *
+ * :param dyn: |out| Dynamic graphics state to initizlie
+ * :param supported: |in| Bitset of all dynamic state supported by the driver.
+ * :param p: |in| The pipeline state from which to initialize the
+ * dynamic state.
+ */
+void
+vk_dynamic_graphics_state_fill(struct vk_dynamic_graphics_state *dyn,
+ const struct vk_graphics_pipeline_state *p);
+
+/** Mark all states in the given vk_dynamic_graphics_state dirty
+ *
+ * :param d: |out| Dynamic graphics state struct
+ */
+static inline void
+vk_dynamic_graphics_state_dirty_all(struct vk_dynamic_graphics_state *d)
+{
+ BITSET_SET_RANGE(d->dirty, 0, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX - 1);
+}
+
+/** Mark all states in the given vk_dynamic_graphics_state not dirty
+ *
+ * :param d: |out| Dynamic graphics state struct
+ */
+static inline void
+vk_dynamic_graphics_state_clear_dirty(struct vk_dynamic_graphics_state *d)
+{
+ BITSET_ZERO(d->dirty);
+}
+
+/** Test if any states in the given vk_dynamic_graphics_state are dirty
+ *
+ * :param d: |in| Dynamic graphics state struct to test
+ * :returns: true if any state is dirty
+ */
+static inline bool
+vk_dynamic_graphics_state_any_dirty(const struct vk_dynamic_graphics_state *d)
+{
+ return BITSET_TEST_RANGE(d->dirty,
+ 0, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX - 1);
+}
+
+/** Copies all set state from src to dst
+ *
+ * Both src and dst are assumed to be properly initialized dynamic state
+ * structs. Anything not set in src, as indicated by src->set, is ignored and
+ * those bits of dst are left untouched.
+ *
+ * :param dst: |out| Copy destination
+ * :param src: |in| Copy source
+ */
+void
+vk_dynamic_graphics_state_copy(struct vk_dynamic_graphics_state *dst,
+ const struct vk_dynamic_graphics_state *src);
+
+/** Set all of the state in src on a command buffer
+ *
+ * Anything not set, as indicated by src->set, is ignored and those states in
+ * the command buffer are left untouched.
+ *
+ * :param cmd: |inout| Command buffer to update
+ * :param src: |in| State to set
+ */
+void
+vk_cmd_set_dynamic_graphics_state(struct vk_command_buffer *cmd,
+ const struct vk_dynamic_graphics_state *src);
+
+/** Set vertex binding strides on a command buffer
+ *
+ * This is the dynamic state part of vkCmdBindVertexBuffers2().
+ *
+ * :param cmd: |inout| Command buffer to update
+ * :param first_binding: |in| First binding to update
+ * :param binding_count: |in| Number of bindings to update
+ * :param strides: |in| binding_count many stride values to set
+ */
+void
+vk_cmd_set_vertex_binding_strides(struct vk_command_buffer *cmd,
+ uint32_t first_binding,
+ uint32_t binding_count,
+ const VkDeviceSize *strides);
+
+/* Set color attachment count for blending on a command buffer.
+ *
+ * This is an implicit part of starting a subpass or a secondary command
+ * buffer in a subpass.
+ */
+void
+vk_cmd_set_cb_attachment_count(struct vk_command_buffer *cmd,
+ uint32_t attachment_count);
+
+/* Set render pass attachments on a command buffer.
+ *
+ * This is required for VK_EXT_shader_object in order to disable attachments
+ * based on bound shaders.
+ */
+void
+vk_cmd_set_rp_attachments(struct vk_command_buffer *cmd,
+ enum vk_rp_attachment_flags attachments);
+
+const char *
+vk_dynamic_graphic_state_to_str(enum mesa_vk_dynamic_graphics_state state);
+
+/** Check whether the color attachment location map is the identity
+ *
+ * :param cal: |in| Color attachment location state
+ */
+static inline bool
+vk_color_attachment_location_state_is_identity(
+ const struct vk_color_attachment_location_state *cal)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(cal->color_map); i++) {
+ if (cal->color_map[i] != i)
+ return false;
+ }
+ return true;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_GRAPHICS_STATE_H */
diff --git a/src/vulkan/runtime/vk_image.c b/src/vulkan/runtime/vk_image.c
new file mode 100644
index 00000000000..cada2dd6761
--- /dev/null
+++ b/src/vulkan/runtime/vk_image.c
@@ -0,0 +1,1040 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_image.h"
+
+#if DETECT_OS_LINUX || DETECT_OS_BSD
+#include <drm-uapi/drm_fourcc.h>
+#endif
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_format_info.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_render_pass.h"
+#include "vk_util.h"
+#include "vulkan/wsi/wsi_common.h"
+
+#if DETECT_OS_ANDROID
+#include "vk_android.h"
+#include <vulkan/vulkan_android.h>
+#endif
+
+void
+vk_image_init(struct vk_device *device,
+ struct vk_image *image,
+ const VkImageCreateInfo *pCreateInfo)
+{
+ vk_object_base_init(device, &image->base, VK_OBJECT_TYPE_IMAGE);
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
+ assert(pCreateInfo->mipLevels > 0);
+ assert(pCreateInfo->arrayLayers > 0);
+ assert(pCreateInfo->samples > 0);
+ assert(pCreateInfo->extent.width > 0);
+ assert(pCreateInfo->extent.height > 0);
+ assert(pCreateInfo->extent.depth > 0);
+
+ if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
+ assert(pCreateInfo->imageType == VK_IMAGE_TYPE_2D);
+ if (pCreateInfo->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)
+ assert(pCreateInfo->imageType == VK_IMAGE_TYPE_3D);
+
+ image->create_flags = pCreateInfo->flags;
+ image->image_type = pCreateInfo->imageType;
+ vk_image_set_format(image, pCreateInfo->format);
+ image->extent = vk_image_sanitize_extent(image, pCreateInfo->extent);
+ image->mip_levels = pCreateInfo->mipLevels;
+ image->array_layers = pCreateInfo->arrayLayers;
+ image->samples = pCreateInfo->samples;
+ image->tiling = pCreateInfo->tiling;
+ image->usage = pCreateInfo->usage;
+ image->sharing_mode = pCreateInfo->sharingMode;
+
+ if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ const VkImageStencilUsageCreateInfo *stencil_usage_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ IMAGE_STENCIL_USAGE_CREATE_INFO);
+ image->stencil_usage =
+ stencil_usage_info ? stencil_usage_info->stencilUsage :
+ pCreateInfo->usage;
+ } else {
+ image->stencil_usage = 0;
+ }
+
+ const VkExternalMemoryImageCreateInfo *ext_mem_info =
+ vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+ if (ext_mem_info)
+ image->external_handle_types = ext_mem_info->handleTypes;
+ else
+ image->external_handle_types = 0;
+
+ const struct wsi_image_create_info *wsi_info =
+ vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
+ image->wsi_legacy_scanout = wsi_info && wsi_info->scanout;
+
+#if DETECT_OS_LINUX || DETECT_OS_BSD
+ image->drm_format_mod = ((1ULL << 56) - 1) /* DRM_FORMAT_MOD_INVALID */;
+#endif
+
+#if DETECT_OS_ANDROID
+ const VkExternalFormatANDROID *ext_format =
+ vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_FORMAT_ANDROID);
+ if (ext_format && ext_format->externalFormat != 0) {
+ assert(image->format == VK_FORMAT_UNDEFINED);
+ assert(image->external_handle_types &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID);
+ vk_image_set_format(image, (VkFormat)ext_format->externalFormat);
+ }
+
+ image->ahb_format = vk_image_format_to_ahb_format(image->format);
+#endif
+
+ const VkImageCompressionControlEXT *compr_info =
+ vk_find_struct_const(pCreateInfo->pNext, IMAGE_COMPRESSION_CONTROL_EXT);
+ if (compr_info)
+ image->compr_flags = compr_info->flags;
+}
+
+void *
+vk_image_create(struct vk_device *device,
+ const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_image *image =
+ vk_zalloc2(&device->alloc, alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (image == NULL)
+ return NULL;
+
+ vk_image_init(device, image, pCreateInfo);
+
+ return image;
+}
+
+void
+vk_image_finish(struct vk_image *image)
+{
+ vk_object_base_finish(&image->base);
+}
+
+void
+vk_image_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_image *image)
+{
+ vk_object_free(device, alloc, image);
+}
+
+#if DETECT_OS_LINUX || DETECT_OS_BSD
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetImageDrmFormatModifierPropertiesEXT(UNUSED VkDevice device,
+ VkImage _image,
+ VkImageDrmFormatModifierPropertiesEXT *pProperties)
+{
+ VK_FROM_HANDLE(vk_image, image, _image);
+
+ assert(pProperties->sType ==
+ VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT);
+
+ assert(image->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
+ pProperties->drmFormatModifier = image->drm_format_mod;
+
+ return VK_SUCCESS;
+}
+#endif
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetImageSubresourceLayout(VkDevice _device, VkImage _image,
+ const VkImageSubresource *pSubresource,
+ VkSubresourceLayout *pLayout)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ const VkImageSubresource2KHR subresource = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR,
+ .imageSubresource = *pSubresource,
+ };
+
+ VkSubresourceLayout2KHR layout = {
+ .sType = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR
+ };
+
+ device->dispatch_table.GetImageSubresourceLayout2KHR(_device, _image,
+ &subresource, &layout);
+
+ *pLayout = layout.subresourceLayout;
+}
+
+void
+vk_image_set_format(struct vk_image *image, VkFormat format)
+{
+ image->format = format;
+ image->aspects = vk_format_aspects(format);
+}
+
+VkImageUsageFlags
+vk_image_usage(const struct vk_image *image,
+ VkImageAspectFlags aspect_mask)
+{
+ /* From the Vulkan 1.2.131 spec:
+ *
+ * "If the image was has a depth-stencil format and was created with
+ * a VkImageStencilUsageCreateInfo structure included in the pNext
+ * chain of VkImageCreateInfo, the usage is calculated based on the
+ * subresource.aspectMask provided:
+ *
+ * - If aspectMask includes only VK_IMAGE_ASPECT_STENCIL_BIT, the
+ * implicit usage is equal to
+ * VkImageStencilUsageCreateInfo::stencilUsage.
+ *
+ * - If aspectMask includes only VK_IMAGE_ASPECT_DEPTH_BIT, the
+ * implicit usage is equal to VkImageCreateInfo::usage.
+ *
+ * - If both aspects are included in aspectMask, the implicit usage
+ * is equal to the intersection of VkImageCreateInfo::usage and
+ * VkImageStencilUsageCreateInfo::stencilUsage.
+ */
+ if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ return image->stencil_usage;
+ } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ return image->usage & image->stencil_usage;
+ } else {
+ /* This also handles the color case */
+ return image->usage;
+ }
+}
+
+#define VK_IMAGE_ASPECT_ANY_COLOR_MASK_MESA ( \
+ VK_IMAGE_ASPECT_COLOR_BIT | \
+ VK_IMAGE_ASPECT_PLANE_0_BIT | \
+ VK_IMAGE_ASPECT_PLANE_1_BIT | \
+ VK_IMAGE_ASPECT_PLANE_2_BIT)
+
+/** Expands the given aspect mask relative to the image
+ *
+ * If the image has color plane aspects VK_IMAGE_ASPECT_COLOR_BIT has been
+ * requested, this returns the aspects of the underlying image.
+ *
+ * For example,
+ *
+ * VK_IMAGE_ASPECT_COLOR_BIT
+ *
+ * will be converted to
+ *
+ * VK_IMAGE_ASPECT_PLANE_0_BIT |
+ * VK_IMAGE_ASPECT_PLANE_1_BIT |
+ * VK_IMAGE_ASPECT_PLANE_2_BIT
+ *
+ * for an image of format VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM.
+ */
+VkImageAspectFlags
+vk_image_expand_aspect_mask(const struct vk_image *image,
+ VkImageAspectFlags aspect_mask)
+{
+ if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
+ assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_MASK_MESA);
+ return image->aspects;
+ } else {
+ assert(aspect_mask && !(aspect_mask & ~image->aspects));
+ return aspect_mask;
+ }
+}
+
+VkExtent3D
+vk_image_extent_to_elements(const struct vk_image *image, VkExtent3D extent)
+{
+ const struct util_format_description *fmt =
+ vk_format_description(image->format);
+
+ extent = vk_image_sanitize_extent(image, extent);
+ extent.width = DIV_ROUND_UP(extent.width, fmt->block.width);
+ extent.height = DIV_ROUND_UP(extent.height, fmt->block.height);
+ extent.depth = DIV_ROUND_UP(extent.depth, fmt->block.depth);
+
+ return extent;
+}
+
+VkOffset3D
+vk_image_offset_to_elements(const struct vk_image *image, VkOffset3D offset)
+{
+ const struct util_format_description *fmt =
+ vk_format_description(image->format);
+
+ offset = vk_image_sanitize_offset(image, offset);
+
+ assert(offset.x % fmt->block.width == 0);
+ assert(offset.y % fmt->block.height == 0);
+ assert(offset.z % fmt->block.depth == 0);
+
+ offset.x /= fmt->block.width;
+ offset.y /= fmt->block.height;
+ offset.z /= fmt->block.depth;
+
+ return offset;
+}
+
+struct vk_image_buffer_layout
+vk_image_buffer_copy_layout(const struct vk_image *image,
+ const VkBufferImageCopy2* region)
+{
+ VkExtent3D extent = vk_image_sanitize_extent(image, region->imageExtent);
+
+ const uint32_t row_length = region->bufferRowLength ?
+ region->bufferRowLength : extent.width;
+ const uint32_t image_height = region->bufferImageHeight ?
+ region->bufferImageHeight : extent.height;
+
+ const VkImageAspectFlags aspect = region->imageSubresource.aspectMask;
+ VkFormat format = vk_format_get_aspect_format(image->format, aspect);
+ const struct util_format_description *fmt = vk_format_description(format);
+
+ assert(fmt->block.bits % 8 == 0);
+ const uint32_t element_size_B = fmt->block.bits / 8;
+
+ const uint32_t row_stride_B =
+ DIV_ROUND_UP(row_length, fmt->block.width) * element_size_B;
+ const uint64_t image_stride_B =
+ DIV_ROUND_UP(image_height, fmt->block.height) * (uint64_t)row_stride_B;
+
+ return (struct vk_image_buffer_layout) {
+ .row_length = row_length,
+ .image_height = image_height,
+ .element_size_B = element_size_B,
+ .row_stride_B = row_stride_B,
+ .image_stride_B = image_stride_B,
+ };
+}
+
+struct vk_image_buffer_layout
+vk_memory_to_image_copy_layout(const struct vk_image *image,
+ const VkMemoryToImageCopyEXT* region)
+{
+ const VkBufferImageCopy2 bic = {
+ .bufferOffset = 0,
+ .bufferRowLength = region->memoryRowLength,
+ .bufferImageHeight = region->memoryImageHeight,
+ .imageSubresource = region->imageSubresource,
+ .imageOffset = region->imageOffset,
+ .imageExtent = region->imageExtent,
+ };
+ return vk_image_buffer_copy_layout(image, &bic);
+}
+
+struct vk_image_buffer_layout
+vk_image_to_memory_copy_layout(const struct vk_image *image,
+ const VkImageToMemoryCopyEXT* region)
+{
+ const VkBufferImageCopy2 bic = {
+ .bufferOffset = 0,
+ .bufferRowLength = region->memoryRowLength,
+ .bufferImageHeight = region->memoryImageHeight,
+ .imageSubresource = region->imageSubresource,
+ .imageOffset = region->imageOffset,
+ .imageExtent = region->imageExtent,
+ };
+ return vk_image_buffer_copy_layout(image, &bic);
+}
+
+static VkComponentSwizzle
+remap_swizzle(VkComponentSwizzle swizzle, VkComponentSwizzle component)
+{
+ return swizzle == VK_COMPONENT_SWIZZLE_IDENTITY ? component : swizzle;
+}
+
+void
+vk_image_view_init(struct vk_device *device,
+ struct vk_image_view *image_view,
+ bool driver_internal,
+ const VkImageViewCreateInfo *pCreateInfo)
+{
+ vk_object_base_init(device, &image_view->base, VK_OBJECT_TYPE_IMAGE_VIEW);
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO);
+ VK_FROM_HANDLE(vk_image, image, pCreateInfo->image);
+
+ image_view->create_flags = pCreateInfo->flags;
+ image_view->image = image;
+ image_view->view_type = pCreateInfo->viewType;
+
+ image_view->format = pCreateInfo->format;
+ if (image_view->format == VK_FORMAT_UNDEFINED)
+ image_view->format = image->format;
+
+ if (!driver_internal) {
+ switch (image_view->view_type) {
+ case VK_IMAGE_VIEW_TYPE_1D:
+ case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+ assert(image->image_type == VK_IMAGE_TYPE_1D);
+ break;
+ case VK_IMAGE_VIEW_TYPE_2D:
+ case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+ if (image->create_flags & (VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT |
+ VK_IMAGE_CREATE_2D_VIEW_COMPATIBLE_BIT_EXT))
+ assert(image->image_type == VK_IMAGE_TYPE_3D);
+ else
+ assert(image->image_type == VK_IMAGE_TYPE_2D);
+ break;
+ case VK_IMAGE_VIEW_TYPE_3D:
+ assert(image->image_type == VK_IMAGE_TYPE_3D);
+ break;
+ case VK_IMAGE_VIEW_TYPE_CUBE:
+ case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+ assert(image->image_type == VK_IMAGE_TYPE_2D);
+ assert(image->create_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT);
+ break;
+ default:
+ unreachable("Invalid image view type");
+ }
+ }
+
+ const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
+
+ if (driver_internal) {
+ image_view->aspects = range->aspectMask;
+ image_view->view_format = image_view->format;
+ } else {
+ image_view->aspects =
+ vk_image_expand_aspect_mask(image, range->aspectMask);
+
+ assert(!(image_view->aspects & ~image->aspects));
+
+ /* From the Vulkan 1.2.184 spec:
+ *
+ * "If the image has a multi-planar format and
+ * subresourceRange.aspectMask is VK_IMAGE_ASPECT_COLOR_BIT, and image
+ * has been created with a usage value not containing any of the
+ * VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR,
+ * VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR,
+ * VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR,
+ * VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR,
+ * VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR, and
+ * VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR flags, then the format must
+ * be identical to the image format, and the sampler to be used with the
+ * image view must enable sampler Y′CBCR conversion."
+ *
+ * Since no one implements video yet, we can ignore the bits about video
+ * create flags and assume YCbCr formats match.
+ */
+ if ((image->aspects & VK_IMAGE_ASPECT_PLANE_1_BIT) &&
+ (range->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT))
+ assert(image_view->format == image->format);
+
+ /* From the Vulkan 1.2.184 spec:
+ *
+ * "Each depth/stencil format is only compatible with itself."
+ */
+ if (image_view->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT))
+ assert(image_view->format == image->format);
+
+ if (!(image->create_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT))
+ assert(image_view->format == image->format);
+
+ /* Restrict the format to only the planes chosen.
+ *
+ * For combined depth and stencil images, this means the depth-only or
+ * stencil-only format if only one aspect is chosen and the full
+ * combined format if both aspects are chosen.
+ *
+ * For single-plane color images, we just take the format as-is. For
+ * multi-plane views of multi-plane images, this means we want the full
+ * multi-plane format. For single-plane views of multi-plane images, we
+ * want a format compatible with the one plane. Fortunately, this is
+ * already what the client gives us. The Vulkan 1.2.184 spec says:
+ *
+ * "If image was created with the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
+ * and the image has a multi-planar format, and if
+ * subresourceRange.aspectMask is VK_IMAGE_ASPECT_PLANE_0_BIT,
+ * VK_IMAGE_ASPECT_PLANE_1_BIT, or VK_IMAGE_ASPECT_PLANE_2_BIT,
+ * format must be compatible with the corresponding plane of the
+ * image, and the sampler to be used with the image view must not
+ * enable sampler Y′CBCR conversion."
+ */
+ if (image_view->aspects == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ image_view->view_format = vk_format_stencil_only(image_view->format);
+ } else if (image_view->aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ image_view->view_format = vk_format_depth_only(image_view->format);
+ } else {
+ image_view->view_format = image_view->format;
+ }
+ }
+
+ image_view->swizzle = (VkComponentMapping) {
+ .r = remap_swizzle(pCreateInfo->components.r, VK_COMPONENT_SWIZZLE_R),
+ .g = remap_swizzle(pCreateInfo->components.g, VK_COMPONENT_SWIZZLE_G),
+ .b = remap_swizzle(pCreateInfo->components.b, VK_COMPONENT_SWIZZLE_B),
+ .a = remap_swizzle(pCreateInfo->components.a, VK_COMPONENT_SWIZZLE_A),
+ };
+
+ assert(range->layerCount > 0);
+ assert(range->baseMipLevel < image->mip_levels);
+
+ image_view->base_mip_level = range->baseMipLevel;
+ image_view->level_count = vk_image_subresource_level_count(image, range);
+ image_view->base_array_layer = range->baseArrayLayer;
+ image_view->layer_count = vk_image_subresource_layer_count(image, range);
+
+ const VkImageViewMinLodCreateInfoEXT *min_lod_info =
+ vk_find_struct_const(pCreateInfo, IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT);
+ image_view->min_lod = min_lod_info ? min_lod_info->minLod : 0.0f;
+
+ /* From the Vulkan 1.3.215 spec:
+ *
+ * VUID-VkImageViewMinLodCreateInfoEXT-minLod-06456
+ *
+ * "minLod must be less or equal to the index of the last mipmap level
+ * accessible to the view."
+ */
+ assert(image_view->min_lod <= image_view->base_mip_level +
+ image_view->level_count - 1);
+
+ image_view->extent =
+ vk_image_mip_level_extent(image, image_view->base_mip_level);
+
+ /* By default storage uses the same as the image properties, but it can be
+ * overriden with VkImageViewSlicedCreateInfoEXT.
+ */
+ image_view->storage.z_slice_offset = 0;
+ image_view->storage.z_slice_count = image_view->extent.depth;
+
+ const VkImageViewSlicedCreateInfoEXT *sliced_info =
+ vk_find_struct_const(pCreateInfo, IMAGE_VIEW_SLICED_CREATE_INFO_EXT);
+ assert(image_view->base_mip_level + image_view->level_count
+ <= image->mip_levels);
+ switch (image->image_type) {
+ default:
+ unreachable("bad VkImageType");
+ case VK_IMAGE_TYPE_1D:
+ case VK_IMAGE_TYPE_2D:
+ assert(image_view->base_array_layer + image_view->layer_count
+ <= image->array_layers);
+ break;
+ case VK_IMAGE_TYPE_3D:
+ if (sliced_info && image_view->view_type == VK_IMAGE_VIEW_TYPE_3D) {
+ unsigned total = image_view->extent.depth;
+ image_view->storage.z_slice_offset = sliced_info->sliceOffset;
+ assert(image_view->storage.z_slice_offset < total);
+ if (sliced_info->sliceCount == VK_REMAINING_3D_SLICES_EXT) {
+ image_view->storage.z_slice_count = total - image_view->storage.z_slice_offset;
+ } else {
+ image_view->storage.z_slice_count = sliced_info->sliceCount;
+ }
+ } else if (image_view->view_type != VK_IMAGE_VIEW_TYPE_3D) {
+ image_view->storage.z_slice_offset = image_view->base_array_layer;
+ image_view->storage.z_slice_count = image_view->layer_count;
+ }
+ assert(image_view->storage.z_slice_offset + image_view->storage.z_slice_count
+ <= image->extent.depth);
+ assert(image_view->base_array_layer + image_view->layer_count
+ <= image_view->extent.depth);
+ break;
+ }
+
+ /* If we are creating a color view from a depth/stencil image we compute
+ * usage from the underlying depth/stencil aspects.
+ */
+ const VkImageUsageFlags image_usage =
+ vk_image_usage(image, image_view->aspects);
+ const VkImageViewUsageCreateInfo *usage_info =
+ vk_find_struct_const(pCreateInfo, IMAGE_VIEW_USAGE_CREATE_INFO);
+ image_view->usage = usage_info ? usage_info->usage : image_usage;
+ assert(driver_internal || !(image_view->usage & ~image_usage));
+}
+
+void
+vk_image_view_finish(struct vk_image_view *image_view)
+{
+ vk_object_base_finish(&image_view->base);
+}
+
+void *
+vk_image_view_create(struct vk_device *device,
+ bool driver_internal,
+ const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_image_view *image_view =
+ vk_zalloc2(&device->alloc, alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (image_view == NULL)
+ return NULL;
+
+ vk_image_view_init(device, image_view, driver_internal, pCreateInfo);
+
+ return image_view;
+}
+
+void
+vk_image_view_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_image_view *image_view)
+{
+ vk_object_free(device, alloc, image_view);
+}
+
+bool
+vk_image_layout_is_read_only(VkImageLayout layout,
+ VkImageAspectFlagBits aspect)
+{
+ assert(util_bitcount(aspect) == 1);
+
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return true; /* These are only used for layout transitions */
+
+ case VK_IMAGE_LAYOUT_GENERAL:
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
+ case VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR:
+ return false;
+
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
+ case VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR:
+ case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL:
+ return true;
+
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
+ return aspect == VK_IMAGE_ASPECT_DEPTH_BIT;
+
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
+ return aspect == VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ case VK_IMAGE_LAYOUT_MAX_ENUM:
+ case VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR:
+ case VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR:
+ case VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR:
+ case VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR:
+ case VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR:
+ case VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR:
+ unreachable("Invalid image layout.");
+ }
+
+ unreachable("Invalid image layout.");
+}
+
+bool
+vk_image_layout_is_depth_only(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static VkResult
+vk_image_create_get_format_list_uncompressed(struct vk_device *device,
+ const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFormat **formats,
+ uint32_t *format_count)
+{
+ const struct vk_format_class_info *class =
+ vk_format_get_class_info(pCreateInfo->format);
+
+ *formats = NULL;
+ *format_count = 0;
+
+ if (class->format_count < 2)
+ return VK_SUCCESS;
+
+ *formats = vk_alloc2(&device->alloc, pAllocator,
+ sizeof(VkFormat) * class->format_count,
+ alignof(VkFormat), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ if (*formats == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ memcpy(*formats, class->formats, sizeof(VkFormat) * class->format_count);
+ *format_count = class->format_count;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_image_create_get_format_list_compressed(struct vk_device *device,
+ const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFormat **formats,
+ uint32_t *format_count)
+{
+ if ((pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) == 0) {
+ return vk_image_create_get_format_list_uncompressed(device,
+ pCreateInfo,
+ pAllocator,
+ formats,
+ format_count);
+ }
+
+ const struct vk_format_class_info *class =
+ vk_format_get_class_info(pCreateInfo->format);
+ const struct vk_format_class_info *uncompr_class = NULL;
+
+ switch (vk_format_get_blocksizebits(pCreateInfo->format)) {
+ case 64:
+ uncompr_class = vk_format_class_get_info(MESA_VK_FORMAT_CLASS_64_BIT);
+ break;
+ case 128:
+ uncompr_class = vk_format_class_get_info(MESA_VK_FORMAT_CLASS_128_BIT);
+ break;
+ }
+
+ if (!uncompr_class)
+ return vk_error(device, VK_ERROR_FORMAT_NOT_SUPPORTED);
+
+ uint32_t fmt_count = class->format_count + uncompr_class->format_count;
+
+ *formats = vk_alloc2(&device->alloc, pAllocator,
+ sizeof(VkFormat) * fmt_count,
+ alignof(VkFormat), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ if (*formats == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ memcpy(*formats, class->formats, sizeof(VkFormat) * class->format_count);
+ memcpy(*formats + class->format_count, uncompr_class->formats,
+ sizeof(VkFormat) * uncompr_class->format_count);
+ *format_count = class->format_count + uncompr_class->format_count;
+
+ return VK_SUCCESS;
+}
+
+/* Get a list of compatible formats when VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
+ * or VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT is set. This list is
+ * either retrieved from a VkImageFormatListCreateInfo passed to the creation
+ * chain, or forged from the default compatible list specified in the
+ * "formats-compatibility-classes" section of the spec.
+ *
+ * The value returned in *formats must be freed with
+ * vk_free2(&device->alloc, pAllocator), and should not live past the
+ * vkCreateImage() call (allocated in the COMMAND scope).
+ */
+VkResult
+vk_image_create_get_format_list(struct vk_device *device,
+ const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFormat **formats,
+ uint32_t *format_count)
+{
+ *formats = NULL;
+ *format_count = 0;
+
+ if (!(pCreateInfo->flags &
+ (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
+ VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT))) {
+ return VK_SUCCESS;
+ }
+
+ /* "Each depth/stencil format is only compatible with itself." */
+ if (vk_format_is_depth_or_stencil(pCreateInfo->format))
+ return VK_SUCCESS;
+
+ const VkImageFormatListCreateInfo *format_list = (const VkImageFormatListCreateInfo *)
+ vk_find_struct_const(pCreateInfo->pNext, IMAGE_FORMAT_LIST_CREATE_INFO);
+
+ if (format_list) {
+ if (!format_list->viewFormatCount)
+ return VK_SUCCESS;
+
+ *formats = vk_alloc2(&device->alloc, pAllocator,
+ sizeof(VkFormat) * format_list->viewFormatCount,
+ alignof(VkFormat), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ if (*formats == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ memcpy(*formats, format_list->pViewFormats, sizeof(VkFormat) * format_list->viewFormatCount);
+ *format_count = format_list->viewFormatCount;
+ return VK_SUCCESS;
+ }
+
+ if (vk_format_is_compressed(pCreateInfo->format))
+ return vk_image_create_get_format_list_compressed(device,
+ pCreateInfo,
+ pAllocator,
+ formats,
+ format_count);
+
+ return vk_image_create_get_format_list_uncompressed(device,
+ pCreateInfo,
+ pAllocator,
+ formats,
+ format_count);
+}
+
+/* From the Vulkan Specification 1.2.166 - VkAttachmentReference2:
+ *
+ * "If layout only specifies the layout of the depth aspect of the
+ * attachment, the layout of the stencil aspect is specified by the
+ * stencilLayout member of a VkAttachmentReferenceStencilLayout structure
+ * included in the pNext chain. Otherwise, layout describes the layout for
+ * all relevant image aspects."
+ */
+VkImageLayout
+vk_att_ref_stencil_layout(const VkAttachmentReference2 *att_ref,
+ const VkAttachmentDescription2 *attachments)
+{
+ /* From VUID-VkAttachmentReference2-attachment-04755:
+ * "If attachment is not VK_ATTACHMENT_UNUSED, and the format of the
+ * referenced attachment is a depth/stencil format which includes both
+ * depth and stencil aspects [...]
+ */
+ if (att_ref->attachment == VK_ATTACHMENT_UNUSED ||
+ !vk_format_has_stencil(attachments[att_ref->attachment].format))
+ return VK_IMAGE_LAYOUT_UNDEFINED;
+
+ const VkAttachmentReferenceStencilLayout *stencil_ref =
+ vk_find_struct_const(att_ref->pNext, ATTACHMENT_REFERENCE_STENCIL_LAYOUT);
+
+ if (stencil_ref)
+ return stencil_ref->stencilLayout;
+
+ /* From VUID-VkAttachmentReference2-attachment-04755:
+ * "If attachment is not VK_ATTACHMENT_UNUSED, and the format of the
+ * referenced attachment is a depth/stencil format which includes both
+ * depth and stencil aspects, and layout is
+ * VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or
+ * VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, the pNext chain must include
+ * a VkAttachmentReferenceStencilLayout structure."
+ */
+ assert(!vk_image_layout_is_depth_only(att_ref->layout));
+
+ return att_ref->layout;
+}
+
+/* From the Vulkan Specification 1.2.184:
+ *
+ * "If the pNext chain includes a VkAttachmentDescriptionStencilLayout
+ * structure, then the stencilInitialLayout and stencilFinalLayout members
+ * specify the initial and final layouts of the stencil aspect of a
+ * depth/stencil format, and initialLayout and finalLayout only apply to the
+ * depth aspect. For depth-only formats, the
+ * VkAttachmentDescriptionStencilLayout structure is ignored. For
+ * stencil-only formats, the initial and final layouts of the stencil aspect
+ * are taken from the VkAttachmentDescriptionStencilLayout structure if
+ * present, or initialLayout and finalLayout if not present."
+ *
+ * "If format is a depth/stencil format, and either initialLayout or
+ * finalLayout does not specify a layout for the stencil aspect, then the
+ * application must specify the initial and final layouts of the stencil
+ * aspect by including a VkAttachmentDescriptionStencilLayout structure in
+ * the pNext chain."
+ */
+VkImageLayout
+vk_att_desc_stencil_layout(const VkAttachmentDescription2 *att_desc, bool final)
+{
+ if (!vk_format_has_stencil(att_desc->format))
+ return VK_IMAGE_LAYOUT_UNDEFINED;
+
+ const VkAttachmentDescriptionStencilLayout *stencil_desc =
+ vk_find_struct_const(att_desc->pNext, ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT);
+
+ if (stencil_desc) {
+ return final ?
+ stencil_desc->stencilFinalLayout :
+ stencil_desc->stencilInitialLayout;
+ }
+
+ const VkImageLayout main_layout =
+ final ? att_desc->finalLayout : att_desc->initialLayout;
+
+ /* From VUID-VkAttachmentDescription2-format-03302/03303:
+ * "If format is a depth/stencil format which includes both depth and
+ * stencil aspects, and initial/finalLayout is
+ * VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or
+ * VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, the pNext chain must include
+ * a VkAttachmentDescriptionStencilLayout structure."
+ */
+ assert(!vk_image_layout_is_depth_only(main_layout));
+
+ return main_layout;
+}
+
+VkImageUsageFlags
+vk_image_layout_to_usage_flags(VkImageLayout layout,
+ VkImageAspectFlagBits aspect)
+{
+ assert(util_bitcount(aspect) == 1);
+
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return 0u;
+
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return ~0u;
+
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ assert(aspect & VK_IMAGE_ASPECT_ANY_COLOR_MASK_MESA);
+ return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ assert(aspect & (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT));
+ return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
+ assert(aspect & VK_IMAGE_ASPECT_DEPTH_BIT);
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
+
+ case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
+ assert(aspect & VK_IMAGE_ASPECT_STENCIL_BIT);
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
+
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ assert(aspect & (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT));
+ return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
+ assert(aspect & VK_IMAGE_ASPECT_DEPTH_BIT);
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
+
+ case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
+ assert(aspect & VK_IMAGE_ASPECT_STENCIL_BIT);
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
+
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
+ if (aspect == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
+ } else if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
+ } else {
+ assert(!"Must be a depth/stencil aspect");
+ return 0;
+ }
+
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
+ if (aspect == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
+ } else if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ return vk_image_layout_to_usage_flags(
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
+ } else {
+ assert(!"Must be a depth/stencil aspect");
+ return 0;
+ }
+
+ case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
+ assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
+ /* This needs to be handled specially by the caller */
+ return 0;
+
+ case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
+ assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
+ return vk_image_layout_to_usage_flags(VK_IMAGE_LAYOUT_GENERAL, aspect);
+
+ case VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR:
+ assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
+ return VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR;
+
+ case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
+ assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
+ return VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT;
+
+ case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL:
+ if (aspect == VK_IMAGE_ASPECT_DEPTH_BIT ||
+ aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ } else {
+ assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
+ return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL:
+ return VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
+ case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
+ case VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR:
+ if (aspect == VK_IMAGE_ASPECT_DEPTH_BIT ||
+ aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ } else {
+ assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
+ return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ }
+
+ case VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR:
+ return VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR;
+ case VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR:
+ return VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR;
+ case VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR:
+ return VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR;
+ case VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR:
+ return VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR;
+ case VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR:
+ return VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR;
+ case VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR:
+ return VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR;
+ case VK_IMAGE_LAYOUT_MAX_ENUM:
+ unreachable("Invalid image layout.");
+ }
+
+ unreachable("Invalid image layout.");
+}
diff --git a/src/vulkan/util/vk_image.h b/src/vulkan/runtime/vk_image.h
index beb26064775..d69009a4abb 100644
--- a/src/vulkan/util/vk_image.h
+++ b/src/vulkan/runtime/vk_image.h
@@ -25,6 +25,7 @@
#include "vk_object.h"
+#include "util/detect_os.h"
#include "util/u_math.h"
#ifdef __cplusplus
@@ -36,13 +37,20 @@ struct vk_image {
VkImageCreateFlags create_flags;
VkImageType image_type;
+
+ /* format is from VkImageCreateInfo::format or
+ * VkExternalFormatANDROID::externalFormat. This works because only one of
+ * them can be defined and the runtime uses VkFormat for external formats.
+ */
VkFormat format;
+
VkExtent3D extent;
uint32_t mip_levels;
uint32_t array_layers;
VkSampleCountFlagBits samples;
VkImageTiling tiling;
VkImageUsageFlags usage;
+ VkSharingMode sharing_mode;
/* Derived from format */
VkImageAspectFlags aspects;
@@ -53,10 +61,13 @@ struct vk_image {
/* VK_KHR_external_memory */
VkExternalMemoryHandleTypeFlags external_handle_types;
+ /* VK_EXT_image_compression_control */
+ VkImageCompressionFlagsEXT compr_flags;
+
/* wsi_image_create_info::scanout */
bool wsi_legacy_scanout;
-#ifndef _WIN32
+#if DETECT_OS_LINUX || DETECT_OS_BSD
/* VK_EXT_drm_format_modifier
*
* Initialized by vk_image_create/init() to DRM_FORMAT_MOD_INVALID. It's
@@ -69,9 +80,13 @@ struct vk_image {
uint64_t drm_format_mod;
#endif
-#ifdef ANDROID
- /* VK_ANDROID_external_memory_android_hardware_buffer */
- uint64_t android_external_format;
+#if DETECT_OS_ANDROID
+ /* AHARDWAREBUFFER_FORMAT for this image or 0
+ *
+ * A default is provided by the Vulkan runtime code based on the VkFormat
+ * but it may be overridden by the driver as needed.
+ */
+ uint32_t ahb_format;
#endif
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vk_image, base, VkImage,
@@ -90,6 +105,13 @@ void vk_image_destroy(struct vk_device *device,
const VkAllocationCallbacks *alloc,
struct vk_image *image);
+VkResult
+vk_image_create_get_format_list(struct vk_device *device,
+ const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFormat **formats,
+ uint32_t *format_count);
+
void vk_image_set_format(struct vk_image *image, VkFormat format);
VkImageUsageFlags vk_image_usage(const struct vk_image *image,
@@ -125,6 +147,84 @@ vk_image_subresource_level_count(const struct vk_image *image,
image->mip_levels - range->baseMipLevel : range->levelCount;
}
+static inline VkExtent3D
+vk_image_sanitize_extent(const struct vk_image *image,
+ const VkExtent3D imageExtent)
+{
+ switch (image->image_type) {
+ case VK_IMAGE_TYPE_1D:
+ return (VkExtent3D) { imageExtent.width, 1, 1 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
+ case VK_IMAGE_TYPE_3D:
+ return imageExtent;
+ default:
+ unreachable("invalid image type");
+ }
+}
+
+VkExtent3D
+vk_image_extent_to_elements(const struct vk_image *image, VkExtent3D extent);
+
+static inline VkOffset3D
+vk_image_sanitize_offset(const struct vk_image *image,
+ const VkOffset3D imageOffset)
+{
+ switch (image->image_type) {
+ case VK_IMAGE_TYPE_1D:
+ return (VkOffset3D) { imageOffset.x, 0, 0 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
+ case VK_IMAGE_TYPE_3D:
+ return imageOffset;
+ default:
+ unreachable("invalid image type");
+ }
+}
+
+VkOffset3D
+vk_image_offset_to_elements(const struct vk_image *image, VkOffset3D offset);
+
+struct vk_image_buffer_layout {
+ /**
+ * VkBufferImageCopy2::bufferRowLength or
+ * VkBufferImageCopy2::extent::width as needed.
+ */
+ uint32_t row_length;
+
+ /**
+ * VkBufferImageCopy2::bufferImageHeight or
+ * VkBufferImageCopy2::extent::height as needed.
+ */
+ uint32_t image_height;
+
+ /** Size of a single element (pixel or compressed block) in bytes */
+ uint32_t element_size_B;
+
+ /** Row stride in bytes */
+ uint32_t row_stride_B;
+
+ /** Image (or layer) stride in bytes
+ *
+ * For 1D or 2D array images, this is the stride in bytes between array
+ * slices. For 3D images, this is the stride in bytes between fixed-Z
+ * slices.
+ */
+ uint64_t image_stride_B;
+};
+
+struct vk_image_buffer_layout
+vk_image_buffer_copy_layout(const struct vk_image *image,
+ const VkBufferImageCopy2* region);
+
+struct vk_image_buffer_layout
+vk_memory_to_image_copy_layout(const struct vk_image *image,
+ const VkMemoryToImageCopyEXT* region);
+
+struct vk_image_buffer_layout
+vk_image_to_memory_copy_layout(const struct vk_image *image,
+ const VkImageToMemoryCopyEXT* region);
+
struct vk_image_view {
struct vk_object_base base;
@@ -132,6 +232,9 @@ struct vk_image_view {
struct vk_image *image;
VkImageViewType view_type;
+ /** VkImageViewCreateInfo::format or vk_image::format */
+ VkFormat format;
+
/** Image view format, relative to the selected aspects
*
* For a depth/stencil image:
@@ -145,7 +248,8 @@ struct vk_image_view {
* For color images, we have three cases:
*
* 1. It's a single-plane image in which case this is the unmodified
- * format provided to VkImageViewCreateInfo::format.
+ * format provided to VkImageViewCreateInfo::format or
+ * vk_image::format.
*
* 2. It's a YCbCr view of a multi-plane image in which case the
* client will have asked for VK_IMAGE_ASPECT_COLOR_BIT and the
@@ -158,7 +262,7 @@ struct vk_image_view {
* plane of the multi-planar format. In this case, the format will be
* the plane-compatible format requested by the client.
*/
- VkFormat format;
+ VkFormat view_format;
/* Component mapping, aka swizzle
*
@@ -199,19 +303,45 @@ struct vk_image_view {
uint32_t base_array_layer;
uint32_t layer_count;
+ /* VK_EXT_sliced_view_of_3d */
+ struct {
+ /* VkImageViewSlicedCreateInfoEXT::sliceOffset
+ *
+ * This field will be 0 for 1D and 2D images, 2D views of 3D images, or
+ * when no VkImageViewSlicedCreateInfoEXT is provided.
+ */
+ uint32_t z_slice_offset;
+
+ /* VkImageViewSlicedCreateInfoEXT::sliceCount
+ *
+ * This field will be 1 for 1D and 2D images or 2D views of 3D images.
+ * For 3D views, it will be VkImageViewSlicedCreateInfoEXT::sliceCount
+ * or image view depth (see vk_image_view::extent) when no
+ * VkImageViewSlicedCreateInfoEXT is provided.
+ */
+ uint32_t z_slice_count;
+ } storage;
+
+ /* VK_EXT_image_view_min_lod */
+ float min_lod;
+
/* Image extent at LOD 0 */
VkExtent3D extent;
/* VK_KHR_maintenance2 */
VkImageUsageFlags usage;
};
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_image_view, base, VkImageView,
+ VK_OBJECT_TYPE_IMAGE_VIEW);
void vk_image_view_init(struct vk_device *device,
struct vk_image_view *image_view,
+ bool driver_internal,
const VkImageViewCreateInfo *pCreateInfo);
void vk_image_view_finish(struct vk_image_view *image_view);
void *vk_image_view_create(struct vk_device *device,
+ bool driver_internal,
const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc,
size_t size);
@@ -219,11 +349,32 @@ void vk_image_view_destroy(struct vk_device *device,
const VkAllocationCallbacks *alloc,
struct vk_image_view *image_view);
+static inline VkImageSubresourceRange
+vk_image_view_subresource_range(const struct vk_image_view *view)
+{
+ VkImageSubresourceRange range = {
+ .aspectMask = view->aspects,
+ .baseMipLevel = view->base_mip_level,
+ .levelCount = view->level_count,
+ .baseArrayLayer = view->base_array_layer,
+ .layerCount = view->layer_count,
+ };
+
+ return range;
+}
+
bool vk_image_layout_is_read_only(VkImageLayout layout,
VkImageAspectFlagBits aspect);
+bool vk_image_layout_is_depth_only(VkImageLayout layout);
+
VkImageUsageFlags vk_image_layout_to_usage_flags(VkImageLayout layout,
VkImageAspectFlagBits aspect);
+VkImageLayout vk_att_ref_stencil_layout(const VkAttachmentReference2 *att_ref,
+ const VkAttachmentDescription2 *attachments);
+VkImageLayout vk_att_desc_stencil_layout(const VkAttachmentDescription2 *att_desc,
+ bool final);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/vulkan/runtime/vk_instance.c b/src/vulkan/runtime/vk_instance.c
new file mode 100644
index 00000000000..186452d16a4
--- /dev/null
+++ b/src/vulkan/runtime/vk_instance.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_instance.h"
+
+#include "util/libdrm.h"
+#include "util/perf/cpu_trace.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_dispatch_trampolines.h"
+#include "vk_log.h"
+#include "vk_util.h"
+#include "vk_debug_utils.h"
+#include "vk_physical_device.h"
+
+#if !VK_LITE_RUNTIME_INSTANCE
+#include "compiler/glsl_types.h"
+#endif
+
+#define VERSION_IS_1_0(version) \
+ (VK_API_VERSION_MAJOR(version) == 1 && VK_API_VERSION_MINOR(version) == 0)
+
+static const struct debug_control trace_options[] = {
+ {"rmv", VK_TRACE_MODE_RMV},
+ {NULL, 0},
+};
+
+VkResult
+vk_instance_init(struct vk_instance *instance,
+ const struct vk_instance_extension_table *supported_extensions,
+ const struct vk_instance_dispatch_table *dispatch_table,
+ const VkInstanceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc)
+{
+ memset(instance, 0, sizeof(*instance));
+ vk_object_base_instance_init(instance, &instance->base, VK_OBJECT_TYPE_INSTANCE);
+ instance->alloc = *alloc;
+
+ util_cpu_trace_init();
+
+ /* VK_EXT_debug_utils */
+ /* These messengers will only be used during vkCreateInstance or
+ * vkDestroyInstance calls. We do this first so that it's safe to use
+ * vk_errorf and friends.
+ */
+ list_inithead(&instance->debug_utils.instance_callbacks);
+ vk_foreach_struct_const(ext, pCreateInfo->pNext) {
+ if (ext->sType ==
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT) {
+ const VkDebugUtilsMessengerCreateInfoEXT *debugMessengerCreateInfo =
+ (const VkDebugUtilsMessengerCreateInfoEXT *)ext;
+ struct vk_debug_utils_messenger *messenger =
+ vk_alloc2(alloc, alloc, sizeof(struct vk_debug_utils_messenger), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (!messenger)
+ return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ vk_object_base_instance_init(instance, &messenger->base,
+ VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT);
+
+ messenger->alloc = *alloc;
+ messenger->severity = debugMessengerCreateInfo->messageSeverity;
+ messenger->type = debugMessengerCreateInfo->messageType;
+ messenger->callback = debugMessengerCreateInfo->pfnUserCallback;
+ messenger->data = debugMessengerCreateInfo->pUserData;
+
+ list_addtail(&messenger->link,
+ &instance->debug_utils.instance_callbacks);
+ }
+ }
+
+ uint32_t instance_version = VK_API_VERSION_1_0;
+ if (dispatch_table->EnumerateInstanceVersion)
+ dispatch_table->EnumerateInstanceVersion(&instance_version);
+
+ instance->app_info = (struct vk_app_info) { .api_version = 0 };
+ if (pCreateInfo->pApplicationInfo) {
+ const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
+
+ instance->app_info.app_name =
+ vk_strdup(&instance->alloc, app->pApplicationName,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ instance->app_info.app_version = app->applicationVersion;
+
+ instance->app_info.engine_name =
+ vk_strdup(&instance->alloc, app->pEngineName,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ instance->app_info.engine_version = app->engineVersion;
+
+ instance->app_info.api_version = app->apiVersion;
+ }
+
+ /* From the Vulkan 1.2.199 spec:
+ *
+ * "Note:
+ *
+ * Providing a NULL VkInstanceCreateInfo::pApplicationInfo or providing
+ * an apiVersion of 0 is equivalent to providing an apiVersion of
+ * VK_MAKE_API_VERSION(0,1,0,0)."
+ */
+ if (instance->app_info.api_version == 0)
+ instance->app_info.api_version = VK_API_VERSION_1_0;
+
+ /* From the Vulkan 1.2.199 spec:
+ *
+ * VUID-VkApplicationInfo-apiVersion-04010
+ *
+ * "If apiVersion is not 0, then it must be greater than or equal to
+ * VK_API_VERSION_1_0"
+ */
+ assert(instance->app_info.api_version >= VK_API_VERSION_1_0);
+
+ /* From the Vulkan 1.2.199 spec:
+ *
+ * "Vulkan 1.0 implementations were required to return
+ * VK_ERROR_INCOMPATIBLE_DRIVER if apiVersion was larger than 1.0.
+ * Implementations that support Vulkan 1.1 or later must not return
+ * VK_ERROR_INCOMPATIBLE_DRIVER for any value of apiVersion."
+ */
+ if (VERSION_IS_1_0(instance_version) &&
+ !VERSION_IS_1_0(instance->app_info.api_version))
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+
+ instance->supported_extensions = supported_extensions;
+
+ for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+ int idx;
+ for (idx = 0; idx < VK_INSTANCE_EXTENSION_COUNT; idx++) {
+ if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
+ vk_instance_extensions[idx].extensionName) == 0)
+ break;
+ }
+
+ if (idx >= VK_INSTANCE_EXTENSION_COUNT)
+ return vk_errorf(instance, VK_ERROR_EXTENSION_NOT_PRESENT,
+ "%s not supported",
+ pCreateInfo->ppEnabledExtensionNames[i]);
+
+ if (!supported_extensions->extensions[idx])
+ return vk_errorf(instance, VK_ERROR_EXTENSION_NOT_PRESENT,
+ "%s not supported",
+ pCreateInfo->ppEnabledExtensionNames[i]);
+
+#ifdef ANDROID_STRICT
+ if (!vk_android_allowed_instance_extensions.extensions[idx])
+ return vk_errorf(instance, VK_ERROR_EXTENSION_NOT_PRESENT,
+ "%s not supported",
+ pCreateInfo->ppEnabledExtensionNames[i]);
+#endif
+
+ instance->enabled_extensions.extensions[idx] = true;
+ }
+
+ instance->dispatch_table = *dispatch_table;
+
+ /* Add common entrypoints without overwriting driver-provided ones. */
+ vk_instance_dispatch_table_from_entrypoints(
+ &instance->dispatch_table, &vk_common_instance_entrypoints, false);
+
+ if (mtx_init(&instance->debug_report.callbacks_mutex, mtx_plain) != 0)
+ return vk_error(instance, VK_ERROR_INITIALIZATION_FAILED);
+
+ list_inithead(&instance->debug_report.callbacks);
+
+ if (mtx_init(&instance->debug_utils.callbacks_mutex, mtx_plain) != 0) {
+ mtx_destroy(&instance->debug_report.callbacks_mutex);
+ return vk_error(instance, VK_ERROR_INITIALIZATION_FAILED);
+ }
+
+ list_inithead(&instance->debug_utils.callbacks);
+
+ list_inithead(&instance->physical_devices.list);
+
+ if (mtx_init(&instance->physical_devices.mutex, mtx_plain) != 0) {
+ mtx_destroy(&instance->debug_report.callbacks_mutex);
+ mtx_destroy(&instance->debug_utils.callbacks_mutex);
+ return vk_error(instance, VK_ERROR_INITIALIZATION_FAILED);
+ }
+
+ instance->trace_mode = parse_debug_string(getenv("MESA_VK_TRACE"), trace_options);
+ instance->trace_frame = (uint32_t)debug_get_num_option("MESA_VK_TRACE_FRAME", 0xFFFFFFFF);
+ instance->trace_trigger_file = secure_getenv("MESA_VK_TRACE_TRIGGER");
+
+#if !VK_LITE_RUNTIME_INSTANCE
+ glsl_type_singleton_init_or_ref();
+#endif
+
+ return VK_SUCCESS;
+}
+
+static void
+destroy_physical_devices(struct vk_instance *instance)
+{
+ list_for_each_entry_safe(struct vk_physical_device, pdevice,
+ &instance->physical_devices.list, link) {
+ list_del(&pdevice->link);
+ instance->physical_devices.destroy(pdevice);
+ }
+}
+
+void
+vk_instance_finish(struct vk_instance *instance)
+{
+ destroy_physical_devices(instance);
+
+#if !VK_LITE_RUNTIME_INSTANCE
+ glsl_type_singleton_decref();
+#endif
+
+ if (unlikely(!list_is_empty(&instance->debug_utils.callbacks))) {
+ list_for_each_entry_safe(struct vk_debug_utils_messenger, messenger,
+ &instance->debug_utils.callbacks, link) {
+ list_del(&messenger->link);
+ vk_object_base_finish(&messenger->base);
+ vk_free2(&instance->alloc, &messenger->alloc, messenger);
+ }
+ }
+ if (unlikely(!list_is_empty(&instance->debug_utils.instance_callbacks))) {
+ list_for_each_entry_safe(struct vk_debug_utils_messenger, messenger,
+ &instance->debug_utils.instance_callbacks,
+ link) {
+ list_del(&messenger->link);
+ vk_object_base_finish(&messenger->base);
+ vk_free2(&instance->alloc, &messenger->alloc, messenger);
+ }
+ }
+ mtx_destroy(&instance->debug_report.callbacks_mutex);
+ mtx_destroy(&instance->debug_utils.callbacks_mutex);
+ mtx_destroy(&instance->physical_devices.mutex);
+ vk_free(&instance->alloc, (char *)instance->app_info.app_name);
+ vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
+ vk_object_base_finish(&instance->base);
+}
+
+VkResult
+vk_enumerate_instance_extension_properties(
+ const struct vk_instance_extension_table *supported_extensions,
+ uint32_t *pPropertyCount,
+ VkExtensionProperties *pProperties)
+{
+ VK_OUTARRAY_MAKE_TYPED(VkExtensionProperties, out, pProperties, pPropertyCount);
+
+ for (int i = 0; i < VK_INSTANCE_EXTENSION_COUNT; i++) {
+ if (!supported_extensions->extensions[i])
+ continue;
+
+#ifdef ANDROID_STRICT
+ if (!vk_android_allowed_instance_extensions.extensions[i])
+ continue;
+#endif
+
+ vk_outarray_append_typed(VkExtensionProperties, &out, prop) {
+ *prop = vk_instance_extensions[i];
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+PFN_vkVoidFunction
+vk_instance_get_proc_addr(const struct vk_instance *instance,
+ const struct vk_instance_entrypoint_table *entrypoints,
+ const char *name)
+{
+ PFN_vkVoidFunction func;
+
+ /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
+ * when we have to return valid function pointers, NULL, or it's left
+ * undefined. See the table for exact details.
+ */
+ if (name == NULL)
+ return NULL;
+
+#define LOOKUP_VK_ENTRYPOINT(entrypoint) \
+ if (strcmp(name, "vk" #entrypoint) == 0) \
+ return (PFN_vkVoidFunction)entrypoints->entrypoint
+
+ LOOKUP_VK_ENTRYPOINT(EnumerateInstanceExtensionProperties);
+ LOOKUP_VK_ENTRYPOINT(EnumerateInstanceLayerProperties);
+ LOOKUP_VK_ENTRYPOINT(EnumerateInstanceVersion);
+ LOOKUP_VK_ENTRYPOINT(CreateInstance);
+
+ /* GetInstanceProcAddr() can also be called with a NULL instance.
+ * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
+ */
+ LOOKUP_VK_ENTRYPOINT(GetInstanceProcAddr);
+
+#undef LOOKUP_VK_ENTRYPOINT
+
+ /* Beginning with ICD interface v7, the following functions can also be
+ * retrieved via vk_icdGetInstanceProcAddr.
+ */
+
+ if (strcmp(name, "vk_icdNegotiateLoaderICDInterfaceVersion") == 0)
+ return (PFN_vkVoidFunction)vk_icdNegotiateLoaderICDInterfaceVersion;
+ if (strcmp(name, "vk_icdGetPhysicalDeviceProcAddr") == 0)
+ return (PFN_vkVoidFunction)vk_icdGetPhysicalDeviceProcAddr;
+#ifdef _WIN32
+ if (strcmp(name, "vk_icdEnumerateAdapterPhysicalDevices") == 0)
+ return (PFN_vkVoidFunction)vk_icdEnumerateAdapterPhysicalDevices;
+#endif
+
+ if (instance == NULL)
+ return NULL;
+
+ func = vk_instance_dispatch_table_get_if_supported(&instance->dispatch_table,
+ name,
+ instance->app_info.api_version,
+ &instance->enabled_extensions);
+ if (func != NULL)
+ return func;
+
+ func = vk_physical_device_dispatch_table_get_if_supported(&vk_physical_device_trampolines,
+ name,
+ instance->app_info.api_version,
+ &instance->enabled_extensions);
+ if (func != NULL)
+ return func;
+
+ func = vk_device_dispatch_table_get_if_supported(&vk_device_trampolines,
+ name,
+ instance->app_info.api_version,
+ &instance->enabled_extensions,
+ NULL);
+ if (func != NULL)
+ return func;
+
+ return NULL;
+}
+
+PFN_vkVoidFunction
+vk_instance_get_proc_addr_unchecked(const struct vk_instance *instance,
+ const char *name)
+{
+ PFN_vkVoidFunction func;
+
+ if (instance == NULL || name == NULL)
+ return NULL;
+
+ func = vk_instance_dispatch_table_get(&instance->dispatch_table, name);
+ if (func != NULL)
+ return func;
+
+ func = vk_physical_device_dispatch_table_get(
+ &vk_physical_device_trampolines, name);
+ if (func != NULL)
+ return func;
+
+ func = vk_device_dispatch_table_get(&vk_device_trampolines, name);
+ if (func != NULL)
+ return func;
+
+ return NULL;
+}
+
+PFN_vkVoidFunction
+vk_instance_get_physical_device_proc_addr(const struct vk_instance *instance,
+ const char *name)
+{
+ if (instance == NULL || name == NULL)
+ return NULL;
+
+ return vk_physical_device_dispatch_table_get_if_supported(&vk_physical_device_trampolines,
+ name,
+ instance->app_info.api_version,
+ &instance->enabled_extensions);
+}
+
+void
+vk_instance_add_driver_trace_modes(struct vk_instance *instance,
+ const struct debug_control *modes)
+{
+ instance->trace_mode |= parse_debug_string(getenv("MESA_VK_TRACE"), modes);
+}
+
+static VkResult
+enumerate_drm_physical_devices_locked(struct vk_instance *instance)
+{
+ /* libdrm returns a maximum of 256 devices (see MAX_DRM_NODES in libdrm) */
+ drmDevicePtr devices[256];
+ int max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
+
+ if (max_devices < 1)
+ return VK_SUCCESS;
+
+ VkResult result;
+ for (uint32_t i = 0; i < (uint32_t)max_devices; i++) {
+ struct vk_physical_device *pdevice;
+ result = instance->physical_devices.try_create_for_drm(instance, devices[i], &pdevice);
+
+ /* Incompatible DRM device, skip. */
+ if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
+ result = VK_SUCCESS;
+ continue;
+ }
+
+ /* Error creating the physical device, report the error. */
+ if (result != VK_SUCCESS)
+ break;
+
+ list_addtail(&pdevice->link, &instance->physical_devices.list);
+ }
+
+ drmFreeDevices(devices, max_devices);
+ return result;
+}
+
+static VkResult
+enumerate_physical_devices_locked(struct vk_instance *instance)
+{
+ if (instance->physical_devices.enumerate) {
+ VkResult result = instance->physical_devices.enumerate(instance);
+ if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ return result;
+ }
+
+ VkResult result = VK_SUCCESS;
+
+ if (instance->physical_devices.try_create_for_drm) {
+ result = enumerate_drm_physical_devices_locked(instance);
+ if (result != VK_SUCCESS) {
+ destroy_physical_devices(instance);
+ return result;
+ }
+ }
+
+ return result;
+}
+
+static VkResult
+enumerate_physical_devices(struct vk_instance *instance)
+{
+ VkResult result = VK_SUCCESS;
+
+ mtx_lock(&instance->physical_devices.mutex);
+ if (!instance->physical_devices.enumerated) {
+ result = enumerate_physical_devices_locked(instance);
+ if (result == VK_SUCCESS)
+ instance->physical_devices.enumerated = true;
+ }
+ mtx_unlock(&instance->physical_devices.mutex);
+
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_EnumeratePhysicalDevices(VkInstance _instance, uint32_t *pPhysicalDeviceCount,
+ VkPhysicalDevice *pPhysicalDevices)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ VK_OUTARRAY_MAKE_TYPED(VkPhysicalDevice, out, pPhysicalDevices, pPhysicalDeviceCount);
+
+ VkResult result = enumerate_physical_devices(instance);
+ if (result != VK_SUCCESS)
+ return result;
+
+ list_for_each_entry(struct vk_physical_device, pdevice,
+ &instance->physical_devices.list, link) {
+ vk_outarray_append_typed(VkPhysicalDevice, &out, element) {
+ *element = vk_physical_device_to_handle(pdevice);
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+#ifdef _WIN32
+/* Note: This entrypoint is not exported from ICD DLLs, and is only exposed via
+ * vk_icdGetInstanceProcAddr for loaders with interface v7. This is to avoid
+ * a design flaw in the original loader implementation, which prevented enumeration
+ * of physical devices that didn't have a LUID. This flaw was fixed prior to the
+ * implementation of v7, so v7 loaders are unaffected, and it's safe to support this.
+ */
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_icdEnumerateAdapterPhysicalDevices(VkInstance _instance, LUID adapterLUID,
+ uint32_t *pPhysicalDeviceCount,
+ VkPhysicalDevice *pPhysicalDevices)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ VK_OUTARRAY_MAKE_TYPED(VkPhysicalDevice, out, pPhysicalDevices, pPhysicalDeviceCount);
+
+ VkResult result = enumerate_physical_devices(instance);
+ if (result != VK_SUCCESS)
+ return result;
+
+ list_for_each_entry(struct vk_physical_device, pdevice,
+ &instance->physical_devices.list, link) {
+ if (pdevice->properties.deviceLUIDValid &&
+ memcmp(pdevice->properties.deviceLUID, &adapterLUID, sizeof(adapterLUID)) == 0) {
+ vk_outarray_append_typed(VkPhysicalDevice, &out, element) {
+ *element = vk_physical_device_to_handle(pdevice);
+ }
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+#endif
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_EnumeratePhysicalDeviceGroups(VkInstance _instance, uint32_t *pGroupCount,
+ VkPhysicalDeviceGroupProperties *pGroupProperties)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ VK_OUTARRAY_MAKE_TYPED(VkPhysicalDeviceGroupProperties, out, pGroupProperties,
+ pGroupCount);
+
+ VkResult result = enumerate_physical_devices(instance);
+ if (result != VK_SUCCESS)
+ return result;
+
+ list_for_each_entry(struct vk_physical_device, pdevice,
+ &instance->physical_devices.list, link) {
+ vk_outarray_append_typed(VkPhysicalDeviceGroupProperties, &out, p) {
+ p->physicalDeviceCount = 1;
+ memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
+ p->physicalDevices[0] = vk_physical_device_to_handle(pdevice);
+ p->subsetAllocation = false;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+/* For Windows, PUBLIC is default-defined to __declspec(dllexport) to automatically export the
+ * public entrypoints from a DLL. However, this declspec needs to match between declaration and
+ * definition, and this attribute is not present on the prototypes specified in vk_icd.h. Instead,
+ * we'll use a .def file to manually export these entrypoints on Windows.
+ */
+#ifdef _WIN32
+#undef PUBLIC
+#define PUBLIC
+#endif
+
+/* With version 4+ of the loader interface the ICD should expose
+ * vk_icdGetPhysicalDeviceProcAddr()
+ */
+PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
+vk_icdGetPhysicalDeviceProcAddr(VkInstance _instance,
+ const char *pName)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ return vk_instance_get_physical_device_proc_addr(instance, pName);
+}
+
+static uint32_t vk_icd_version = 7;
+
+uint32_t
+vk_get_negotiated_icd_version(void)
+{
+ return vk_icd_version;
+}
+
+PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
+vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
+{
+ /* For the full details on loader interface versioning, see
+ * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
+ * What follows is a condensed summary, to help you navigate the large and
+ * confusing official doc.
+ *
+ * - Loader interface v0 is incompatible with later versions. We don't
+ * support it.
+ *
+ * - In loader interface v1:
+ * - The first ICD entrypoint called by the loader is
+ * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
+ * entrypoint.
+ * - The ICD must statically expose no other Vulkan symbol unless it is
+ * linked with -Bsymbolic.
+ * - Each dispatchable Vulkan handle created by the ICD must be
+ * a pointer to a struct whose first member is VK_LOADER_DATA. The
+ * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
+ * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
+ * vkDestroySurfaceKHR(). The ICD must be capable of working with
+ * such loader-managed surfaces.
+ *
+ * - Loader interface v2 differs from v1 in:
+ * - The first ICD entrypoint called by the loader is
+ * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
+ * statically expose this entrypoint.
+ *
+ * - Loader interface v3 differs from v2 in:
+ * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
+ * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
+ * because the loader no longer does so.
+ *
+ * - Loader interface v4 differs from v3 in:
+ * - The ICD must implement vk_icdGetPhysicalDeviceProcAddr().
+ *
+ * - Loader interface v5 differs from v4 in:
+ * - The ICD must support Vulkan API version 1.1 and must not return
+ * VK_ERROR_INCOMPATIBLE_DRIVER from vkCreateInstance() unless a
+ * Vulkan Loader with interface v4 or smaller is being used and the
+ * application provides an API version that is greater than 1.0.
+ *
+ * - Loader interface v6 differs from v5 in:
+ * - Windows ICDs may export vk_icdEnumerateAdapterPhysicalDevices,
+ * to tie a physical device to a WDDM adapter LUID. This allows the
+ * loader to sort physical devices according to the same policy as other
+ * graphics APIs.
+ * - Note: A design flaw in the loader implementation of v6 means we do
+ * not actually support returning this function to v6 loaders. See the
+ * comments around the implementation above. It's still fine to report
+ * version number 6 without this method being implemented, however.
+ *
+ * - Loader interface v7 differs from v6 in:
+ * - If implemented, the ICD must return the following functions via
+ * vk_icdGetInstanceProcAddr:
+ * - vk_icdNegotiateLoaderICDInterfaceVersion
+ * - vk_icdGetPhysicalDeviceProcAddr
+ * - vk_icdEnumerateAdapterPhysicalDevices
+ * Exporting these functions from the ICD is optional. If
+ * vk_icdNegotiateLoaderICDInterfaceVersion is not exported from the
+ * module, or if VK_LUNARG_direct_driver_loading is being used, then
+ * vk_icdGetInstanceProcAddr will be the first method called, to query
+ * for vk_icdNegotiateLoaderICDInterfaceVersion.
+ */
+ vk_icd_version = MIN2(vk_icd_version, *pSupportedVersion);
+ *pSupportedVersion = vk_icd_version;
+ return VK_SUCCESS;
+}
diff --git a/src/vulkan/runtime/vk_instance.h b/src/vulkan/runtime/vk_instance.h
new file mode 100644
index 00000000000..f0e6a3fdb99
--- /dev/null
+++ b/src/vulkan/runtime/vk_instance.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_INSTANCE_H
+#define VK_INSTANCE_H
+
+#include "vk_dispatch_table.h"
+#include "vk_extensions.h"
+#include "vk_object.h"
+
+#include "c11/threads.h"
+#include "util/list.h"
+#include "util/u_debug.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_app_info {
+ /** VkApplicationInfo::pApplicationName */
+ const char* app_name;
+
+ /** VkApplicationInfo::applicationVersion */
+ uint32_t app_version;
+
+ /** VkApplicationInfo::pEngineName */
+ const char* engine_name;
+
+ /** VkApplicationInfo::engineVersion */
+ uint32_t engine_version;
+
+ /** VkApplicationInfo::apiVersion or `VK_API_VERSION_1_0`
+ *
+ * If the application does not provide a `pApplicationInfo` or the
+ * `apiVersion` field is 0, this is set to `VK_API_VERSION_1_0`.
+ */
+ uint32_t api_version;
+};
+
+struct _drmDevice;
+struct vk_physical_device;
+
+enum vk_trace_mode {
+ /** Radeon Memory Visualizer */
+ VK_TRACE_MODE_RMV = 1 << 0,
+
+ /** Number of common trace modes. */
+ VK_TRACE_MODE_COUNT = 1,
+};
+
+/** Base struct for all `VkInstance` implementations
+ *
+ * This contains data structures necessary for detecting enabled extensions,
+ * handling entrypoint dispatch, and implementing `vkGetInstanceProcAddr()`.
+ * It also contains data copied from the `VkInstanceCreateInfo` such as the
+ * application information.
+ */
+struct vk_instance {
+ struct vk_object_base base;
+
+ /** Allocator used when creating this instance
+ *
+ * This is used as a fall-back for when a NULL pAllocator is passed into a
+ * device-level create function such as vkCreateImage().
+ */
+ VkAllocationCallbacks alloc;
+
+ /** VkInstanceCreateInfo::pApplicationInfo */
+ struct vk_app_info app_info;
+
+ /** Table of all supported instance extensions
+ *
+ * This is the static const struct passed by the driver as the
+ * `supported_extensions` parameter to `vk_instance_init()`.
+ */
+ const struct vk_instance_extension_table *supported_extensions;
+
+ /** Table of all enabled instance extensions
+ *
+ * This is generated automatically as part of `vk_instance_init()` from
+ * VkInstanceCreateInfo::ppEnabledExtensionNames.
+ */
+ struct vk_instance_extension_table enabled_extensions;
+
+ /** Instance-level dispatch table */
+ struct vk_instance_dispatch_table dispatch_table;
+
+ /* VK_EXT_debug_report debug callbacks */
+ struct {
+ mtx_t callbacks_mutex;
+ struct list_head callbacks;
+ } debug_report;
+
+ /* VK_EXT_debug_utils */
+ struct {
+ /* These callbacks are only used while creating or destroying an
+ * instance
+ */
+ struct list_head instance_callbacks;
+ mtx_t callbacks_mutex;
+ /* Persistent callbacks */
+ struct list_head callbacks;
+ } debug_utils;
+
+ /** List of all physical devices and callbacks
+ *
+ * This is used for automatic physical device creation,
+ * deletion and enumeration.
+ */
+ struct {
+ struct list_head list;
+ bool enumerated;
+
+ /** Enumerate physical devices for this instance
+ *
+ * The driver can implement this callback for custom physical device
+ * enumeration. The returned value must be a valid return code of
+ * vkEnumeratePhysicalDevices.
+ *
+ * Note that the loader calls vkEnumeratePhysicalDevices of all
+ * installed ICDs and fails device enumeration when any of the calls
+ * fails. The driver should return VK_SUCCESS when it does not find any
+ * compatible device.
+ *
+ * If this callback is not set, try_create_for_drm will be used for
+ * enumeration.
+ */
+ VkResult (*enumerate)(struct vk_instance *instance);
+
+ /** Try to create a physical device for a drm device
+ *
+ * The returned value must be a valid return code of
+ * vkEnumeratePhysicalDevices, or VK_ERROR_INCOMPATIBLE_DRIVER. When
+ * VK_ERROR_INCOMPATIBLE_DRIVER is returned, the error and the drm
+ * device are silently ignored.
+ */
+ VkResult (*try_create_for_drm)(struct vk_instance *instance,
+ struct _drmDevice *device,
+ struct vk_physical_device **out);
+
+ /** Handle the destruction of a physical device
+ *
+ * This callback has to be implemented when using common physical device
+ * management. The device pointer and any resource allocated for the
+ * device should be freed here.
+ */
+ void (*destroy)(struct vk_physical_device *pdevice);
+
+ mtx_t mutex;
+ } physical_devices;
+
+ /** Enabled tracing modes */
+ uint64_t trace_mode;
+
+ uint32_t trace_frame;
+ char *trace_trigger_file;
+};
+
+VK_DEFINE_HANDLE_CASTS(vk_instance, base, VkInstance,
+ VK_OBJECT_TYPE_INSTANCE);
+
+/** Initialize a vk_instance
+ *
+ * Along with initializing the data structures in `vk_instance`, this function
+ * validates the Vulkan version number provided by the client and checks that
+ * every extension specified by
+ * ``VkInstanceCreateInfo::ppEnabledExtensionNames`` is actually supported by
+ * the implementation and returns `VK_ERROR_EXTENSION_NOT_PRESENT` if an
+ * unsupported extension is requested.
+ *
+ * :param instance: |out| The instance to initialize
+ * :param supported_extensions: |in| Table of all instance extensions supported
+ * by this instance
+ * :param dispatch_table: |in| Instance-level dispatch table
+ * :param pCreateInfo: |in| VkInstanceCreateInfo pointer passed to
+ * `vkCreateInstance()`
+ * :param alloc: |in| Allocation callbacks used to create this
+ * instance; must not be `NULL`
+ */
+VkResult MUST_CHECK
+vk_instance_init(struct vk_instance *instance,
+ const struct vk_instance_extension_table *supported_extensions,
+ const struct vk_instance_dispatch_table *dispatch_table,
+ const VkInstanceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc);
+
+/** Tears down a vk_instance
+ *
+ * :param instance: |out| The instance to tear down
+ */
+void
+vk_instance_finish(struct vk_instance *instance);
+
+/** Implementaiton of vkEnumerateInstanceExtensionProperties() */
+VkResult
+vk_enumerate_instance_extension_properties(
+ const struct vk_instance_extension_table *supported_extensions,
+ uint32_t *pPropertyCount,
+ VkExtensionProperties *pProperties);
+
+/** Implementaiton of vkGetInstanceProcAddr() */
+PFN_vkVoidFunction
+vk_instance_get_proc_addr(const struct vk_instance *instance,
+ const struct vk_instance_entrypoint_table *entrypoints,
+ const char *name);
+
+/** Unchecked version of vk_instance_get_proc_addr
+ *
+ * This is identical to `vk_instance_get_proc_addr()` except that it doesn't
+ * check whether extensions are enabled before returning function pointers.
+ * This is useful in window-system code where we may use extensions without
+ * the client explicitly enabling them.
+ */
+PFN_vkVoidFunction
+vk_instance_get_proc_addr_unchecked(const struct vk_instance *instance,
+ const char *name);
+
+/** Implementaiton of vk_icdGetPhysicalDeviceProcAddr() */
+PFN_vkVoidFunction
+vk_instance_get_physical_device_proc_addr(const struct vk_instance *instance,
+ const char *name);
+
+void
+vk_instance_add_driver_trace_modes(struct vk_instance *instance,
+ const struct debug_control *modes);
+
+uint32_t
+vk_get_negotiated_icd_version(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_INSTANCE_H */
diff --git a/src/vulkan/runtime/vk_limits.h b/src/vulkan/runtime/vk_limits.h
new file mode 100644
index 00000000000..50bfde0c0eb
--- /dev/null
+++ b/src/vulkan/runtime/vk_limits.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_LIMITS_H
+#define VK_LIMITS_H
+
+/* Maximun number of shader stages in a single graphics pipeline */
+#define MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES 5
+
+#define MESA_VK_MAX_DESCRIPTOR_SETS 32
+
+/* From the Vulkan 1.3.274 spec:
+ *
+ * VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292
+ *
+ * "Any two elements of pPushConstantRanges must not include the same
+ * stage in stageFlags"
+ *
+ * and
+ *
+ * VUID-VkPushConstantRange-stageFlags-requiredbitmask
+ *
+ * "stageFlags must not be 0"
+ *
+ * This means that the number of push constant ranges is effectively bounded
+ * by the number of possible shader stages. Not the number of stages that can
+ * be compiled together (a pipeline layout can be used in multiple pipelnes
+ * wth different sets of shaders) but the total number of stage bits supported
+ * by the implementation. Currently, those are
+ *
+ * - VK_SHADER_STAGE_VERTEX_BIT
+ * - VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
+ * - VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
+ * - VK_SHADER_STAGE_GEOMETRY_BIT
+ * - VK_SHADER_STAGE_FRAGMENT_BIT
+ * - VK_SHADER_STAGE_COMPUTE_BIT
+ * - VK_SHADER_STAGE_RAYGEN_BIT_KHR
+ * - VK_SHADER_STAGE_ANY_HIT_BIT_KHR
+ * - VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR
+ * - VK_SHADER_STAGE_MISS_BIT_KHR
+ * - VK_SHADER_STAGE_INTERSECTION_BIT_KHR
+ * - VK_SHADER_STAGE_CALLABLE_BIT_KHR
+ * - VK_SHADER_STAGE_TASK_BIT_EXT
+ * - VK_SHADER_STAGE_MESH_BIT_EXT
+ */
+#define MESA_VK_MAX_PUSH_CONSTANT_RANGES 14
+
+#define MESA_VK_MAX_VERTEX_BINDINGS 32
+#define MESA_VK_MAX_VERTEX_ATTRIBUTES 32
+
+/* As of June 29, 2022, according to vulkan.gpuinfo.org, 99% of all reports
+ * listed a max vertex stride that fits in 16 bits.
+ */
+#define MESA_VK_MAX_VERTEX_BINDING_STRIDE UINT16_MAX
+
+#define MESA_VK_MAX_VIEWPORTS 16
+#define MESA_VK_MAX_SCISSORS 16
+#define MESA_VK_MAX_DISCARD_RECTANGLES 4
+
+/* As of June 29, 2022, according to vulkan.gpuinfo.org, no reports list more
+ * than 16 samples for framebufferColorSampleCounts except one layer running
+ * on top of WARP on Windows.
+ */
+#define MESA_VK_MAX_SAMPLES 16
+
+/* As of June 29, 2022, according to vulkan.gpuinfo.org, the only GPUs
+ * claiming support for maxSampleLocationGridSize greater than 1x1 is AMD
+ * which supports 2x2 but only up to 8 samples.
+ */
+#define MESA_VK_MAX_SAMPLE_LOCATIONS 32
+
+#define MESA_VK_MAX_COLOR_ATTACHMENTS 8
+
+/* Since VkSubpassDescription2::viewMask is a 32-bit integer, there are a
+ * maximum of 32 possible views.
+ */
+#define MESA_VK_MAX_MULTIVIEW_VIEW_COUNT 32
+
+#endif /* VK_LIMITS_H */
diff --git a/src/vulkan/runtime/vk_log.c b/src/vulkan/runtime/vk_log.c
new file mode 100644
index 00000000000..afe42aedcd7
--- /dev/null
+++ b/src/vulkan/runtime/vk_log.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_log.h"
+#include "vk_debug_utils.h"
+#include "vk_debug_report.h"
+
+#include "vk_command_buffer.h"
+#include "vk_enum_to_str.h"
+#include "vk_queue.h"
+#include "vk_device.h"
+#include "vk_physical_device.h"
+
+#include "util/ralloc.h"
+#include "util/log.h"
+
+static struct vk_device *
+vk_object_to_device(struct vk_object_base *obj)
+{
+ assert(obj->device);
+ return obj->device;
+}
+
+static struct vk_physical_device *
+vk_object_to_physical_device(struct vk_object_base *obj)
+{
+ switch (obj->type) {
+ case VK_OBJECT_TYPE_INSTANCE:
+ unreachable("Unsupported object type");
+ case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
+ return container_of(obj, struct vk_physical_device, base);
+ case VK_OBJECT_TYPE_SURFACE_KHR:
+ case VK_OBJECT_TYPE_DISPLAY_KHR:
+ case VK_OBJECT_TYPE_DISPLAY_MODE_KHR:
+ case VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT:
+ case VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT:
+ unreachable("Unsupported object type");
+ default:
+ return vk_object_to_device(obj)->physical;
+ }
+}
+
+static struct vk_instance *
+vk_object_to_instance(struct vk_object_base *obj)
+{
+ if (obj == NULL)
+ return NULL;
+
+ if (obj->type == VK_OBJECT_TYPE_INSTANCE) {
+ return container_of(obj, struct vk_instance, base);
+ } else {
+ return vk_object_to_physical_device(obj)->instance;
+ }
+}
+
+void
+__vk_log_impl(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT types,
+ int object_count,
+ const void **objects_or_instance,
+ const char *file,
+ int line,
+ const char *format,
+ ...)
+{
+ struct vk_instance *instance = NULL;
+ struct vk_object_base **objects = NULL;
+ if (object_count == 0) {
+ instance = (struct vk_instance *) objects_or_instance;
+ } else {
+ objects = (struct vk_object_base **) objects_or_instance;
+ for (unsigned i = 0; i < object_count; i++) {
+ if (unlikely(objects[i] == NULL)) {
+ mesa_logw("vk_log*() called with NULL object\n");
+ continue;
+ }
+
+ if (unlikely(!objects[i]->client_visible)) {
+ mesa_logw("vk_log*() called with client-invisible object %p "
+ "of type %s", objects[i],
+ vk_ObjectType_to_str(objects[i]->type));
+ }
+
+ if (!instance) {
+ instance = vk_object_to_instance(objects[i]);
+ assert(instance->base.client_visible);
+ } else {
+ assert(vk_object_to_instance(objects[i]) == instance);
+ }
+ break;
+ }
+ }
+
+#if !MESA_DEBUG
+ if (unlikely(!instance) ||
+ (likely(list_is_empty(&instance->debug_utils.callbacks)) &&
+ likely(list_is_empty(&instance->debug_report.callbacks))))
+ return;
+#endif
+
+ va_list va;
+ char *message = NULL;
+
+ va_start(va, format);
+ message = ralloc_vasprintf(NULL, format, va);
+ va_end(va);
+
+ char *message_idname = ralloc_asprintf(NULL, "%s:%d", file, line);
+
+#if MESA_DEBUG
+ switch (severity) {
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
+ mesa_logd("%s: %s", message_idname, message);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ mesa_logi("%s: %s", message_idname, message);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ if (types & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT)
+ mesa_logw("%s: PERF: %s", message_idname, message);
+ else
+ mesa_logw("%s: %s", message_idname, message);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ mesa_loge("%s: %s", message_idname, message);
+ break;
+ default:
+ unreachable("Invalid debug message severity");
+ break;
+ }
+
+ if (!instance) {
+ ralloc_free(message);
+ ralloc_free(message_idname);
+ return;
+ }
+#endif
+
+ if (!instance->base.client_visible) {
+ vk_debug_message_instance(instance, severity, types,
+ message_idname, 0, message);
+ ralloc_free(message);
+ ralloc_free(message_idname);
+ return;
+ }
+
+ /* If VK_EXT_debug_utils messengers have been set up, form the
+ * message */
+ if (!list_is_empty(&instance->debug_utils.callbacks)) {
+ VkDebugUtilsMessengerCallbackDataEXT cb_data = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT,
+ .pMessageIdName = message_idname,
+ .messageIdNumber = 0,
+ .pMessage = message,
+ };
+
+ VkDebugUtilsObjectNameInfoEXT *object_name_infos =
+ ralloc_array(NULL, VkDebugUtilsObjectNameInfoEXT, object_count);
+
+ ASSERTED int cmdbuf_n = 0, queue_n = 0, obj_n = 0;
+ for (int i = 0; i < object_count; i++) {
+ struct vk_object_base *base = objects[i];
+ if (base == NULL || !base->client_visible)
+ continue;
+
+ switch (base->type) {
+ case VK_OBJECT_TYPE_COMMAND_BUFFER: {
+ /* We allow at most one command buffer to be submitted at a time */
+ assert(++cmdbuf_n <= 1);
+ struct vk_command_buffer *cmd_buffer =
+ (struct vk_command_buffer *)base;
+ if (cmd_buffer->labels.size > 0) {
+ cb_data.cmdBufLabelCount = util_dynarray_num_elements(
+ &cmd_buffer->labels, VkDebugUtilsLabelEXT);
+ cb_data.pCmdBufLabels = cmd_buffer->labels.data;
+ }
+ break;
+ }
+
+ case VK_OBJECT_TYPE_QUEUE: {
+ /* We allow at most one queue to be submitted at a time */
+ assert(++queue_n <= 1);
+ struct vk_queue *queue = (struct vk_queue *)base;
+ if (queue->labels.size > 0) {
+ cb_data.queueLabelCount =
+ util_dynarray_num_elements(&queue->labels, VkDebugUtilsLabelEXT);
+ cb_data.pQueueLabels = queue->labels.data;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ object_name_infos[obj_n++] = (VkDebugUtilsObjectNameInfoEXT){
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
+ .pNext = NULL,
+ .objectType = base->type,
+ .objectHandle = (uint64_t)(uintptr_t)base,
+ .pObjectName = base->object_name,
+ };
+ }
+ cb_data.objectCount = obj_n;
+ cb_data.pObjects = object_name_infos;
+
+ vk_debug_message(instance, severity, types, &cb_data);
+
+ ralloc_free(object_name_infos);
+ }
+
+ /* If VK_EXT_debug_report callbacks also have been set up, forward
+ * the message there as well */
+ if (!list_is_empty(&instance->debug_report.callbacks)) {
+ VkDebugReportFlagsEXT flags = 0;
+
+ switch (severity) {
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
+ flags |= VK_DEBUG_REPORT_DEBUG_BIT_EXT;
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ flags |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT;
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ if (types & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT)
+ flags |= VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
+ else
+ flags |= VK_DEBUG_REPORT_WARNING_BIT_EXT;
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ flags |= VK_DEBUG_REPORT_ERROR_BIT_EXT;
+ break;
+ default:
+ unreachable("Invalid debug message severity");
+ break;
+ }
+
+ /* VK_EXT_debug_report-provided callback accepts only one object
+ * related to the message. Since they are given to us in
+ * decreasing order of importance, we're forwarding the first
+ * one.
+ */
+ vk_debug_report(instance, flags, object_count ? objects[0] : NULL, 0,
+ 0, message_idname, message);
+ }
+
+ ralloc_free(message);
+ ralloc_free(message_idname);
+}
+
+static struct vk_object_base *
+vk_object_for_error(struct vk_object_base *obj, VkResult error)
+{
+ if (obj == NULL)
+ return NULL;
+
+ switch (error) {
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ case VK_ERROR_LAYER_NOT_PRESENT:
+ case VK_ERROR_EXTENSION_NOT_PRESENT:
+ case VK_ERROR_UNKNOWN:
+ return &vk_object_to_instance(obj)->base;
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ return &vk_object_to_physical_device(obj)->base;
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+ case VK_ERROR_MEMORY_MAP_FAILED:
+ case VK_ERROR_TOO_MANY_OBJECTS:
+ return &vk_object_to_device(obj)->base;
+ default:
+ return obj;
+ }
+}
+
+VkResult
+__vk_errorv(const void *_obj, VkResult error,
+ const char *file, int line,
+ const char *format, va_list va)
+{
+ struct vk_object_base *object = (struct vk_object_base *)_obj;
+ struct vk_instance *instance = vk_object_to_instance(object);
+ object = vk_object_for_error(object, error);
+
+ /* If object->client_visible isn't set then the object hasn't been fully
+ * constructed and we shouldn't hand it back to the client. This typically
+ * happens if an error is thrown during object construction. This is safe
+ * to do as long as vk_object_base_init() has already been called.
+ */
+ if (object && !object->client_visible)
+ object = NULL;
+
+ const char *error_str = vk_Result_to_str(error);
+
+ if (format) {
+ char *message = ralloc_vasprintf(NULL, format, va);
+
+ if (object) {
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT,
+ VK_LOG_OBJS(object), file, line,
+ "%s (%s)", message, error_str);
+ } else {
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT,
+ VK_LOG_NO_OBJS(instance), file, line,
+ "%s (%s)", message, error_str);
+ }
+
+ ralloc_free(message);
+ } else {
+ if (object) {
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT,
+ VK_LOG_OBJS(object), file, line,
+ "%s", error_str);
+ } else {
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT,
+ VK_LOG_NO_OBJS(instance), file, line,
+ "%s", error_str);
+ }
+ }
+
+ return error;
+}
+
+VkResult
+__vk_errorf(const void *_obj, VkResult error,
+ const char *file, int line,
+ const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+ VkResult result = __vk_errorv(_obj, error, file, line, format, va);
+ va_end(va);
+
+ return result;
+}
diff --git a/src/vulkan/runtime/vk_log.h b/src/vulkan/runtime/vk_log.h
new file mode 100644
index 00000000000..5d0c230f2a8
--- /dev/null
+++ b/src/vulkan/runtime/vk_log.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_instance.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* __VK_ARG_N(...) returns the number of arguments provided to it */
+#define __VK_ARG_SEQ(_1,_2,_3,_4,_5,_6,_7,_8,N,...) N
+#define __VK_ARG_N(...) __VK_ARG_SEQ(__VA_ARGS__,8,7,6,5,4,3,2,1,0)
+
+#define VK_LOG_OBJS(...) \
+ __VK_ARG_N(__VA_ARGS__), (const void*[]){__VA_ARGS__}
+
+#define VK_LOG_NO_OBJS(instance) 0, (const void**)instance
+
+#define vk_logd(objects_macro, format, ...) \
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT, \
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT, \
+ objects_macro, __FILE__, __LINE__, format, ## __VA_ARGS__)
+
+#define vk_logi(objects_macro, format, ...) \
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT, \
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT, \
+ objects_macro, __FILE__, __LINE__, format, ## __VA_ARGS__)
+
+#define vk_logw(objects_macro, format, ...) \
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT, \
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT, \
+ objects_macro, __FILE__, __LINE__, format, ## __VA_ARGS__)
+
+#define vk_loge(objects_macro, format, ...) \
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, \
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT, \
+ objects_macro, __FILE__, __LINE__, format, ## __VA_ARGS__)
+
+#define vk_perf(objects_macro, format, ...) \
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT, \
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, \
+ objects_macro, __FILE__, __LINE__, format, ## __VA_ARGS__)
+
+#define __vk_log(severity, type, object_count, \
+ objects_or_instance, file, line, format, ...) \
+ __vk_log_impl(severity, type, object_count, objects_or_instance, \
+ file, line, format, ## __VA_ARGS__)
+
+void PRINTFLIKE(7, 8)
+__vk_log_impl(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT types,
+ int object_count,
+ const void **objects_or_instance,
+ const char *file,
+ int line,
+ const char *format,
+ ...);
+
+#define vk_error(obj, error) \
+ __vk_errorf(obj, error, __FILE__, __LINE__, NULL)
+
+#define vk_errorf(obj, error, ...) \
+ __vk_errorf(obj, error, __FILE__, __LINE__, __VA_ARGS__)
+
+VkResult
+__vk_errorv(const void *_obj, VkResult error,
+ const char *file, int line,
+ const char *format, va_list va);
+
+VkResult PRINTFLIKE(5, 6)
+__vk_errorf(const void *_obj, VkResult error,
+ const char *file, int line,
+ const char *format, ...);
+
+#ifdef __cplusplus
+}
+#endif \ No newline at end of file
diff --git a/src/vulkan/runtime/vk_meta.c b/src/vulkan/runtime/vk_meta.c
new file mode 100644
index 00000000000..cb7aee3ca3a
--- /dev/null
+++ b/src/vulkan/runtime/vk_meta.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_meta_private.h"
+
+#include "vk_command_buffer.h"
+#include "vk_device.h"
+#include "vk_pipeline.h"
+#include "vk_util.h"
+
+#include "util/hash_table.h"
+
+#include <string.h>
+
+struct cache_key {
+ VkObjectType obj_type;
+ uint32_t key_size;
+ const void *key_data;
+};
+
+static struct cache_key *
+cache_key_create(VkObjectType obj_type, const void *key_data, size_t key_size)
+{
+ assert(key_size <= UINT32_MAX);
+
+ struct cache_key *key = malloc(sizeof(*key) + key_size);
+ *key = (struct cache_key) {
+ .obj_type = obj_type,
+ .key_size = key_size,
+ .key_data = key + 1,
+ };
+ memcpy(key + 1, key_data, key_size);
+
+ return key;
+}
+
+static uint32_t
+cache_key_hash(const void *_key)
+{
+ const struct cache_key *key = _key;
+
+ assert(sizeof(key->obj_type) == 4);
+ uint32_t hash = _mesa_hash_u32(&key->obj_type);
+ return _mesa_hash_data_with_seed(key->key_data, key->key_size, hash);
+}
+
+static bool
+cache_key_equal(const void *_a, const void *_b)
+{
+ const struct cache_key *a = _a, *b = _b;
+ if (a->obj_type != b->obj_type || a->key_size != b->key_size)
+ return false;
+
+ return memcmp(a->key_data, b->key_data, a->key_size) == 0;
+}
+
+static void
+destroy_object(struct vk_device *device, struct vk_object_base *obj)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ switch (obj->type) {
+ case VK_OBJECT_TYPE_BUFFER:
+ disp->DestroyBuffer(_device, (VkBuffer)(uintptr_t)obj, NULL);
+ break;
+ case VK_OBJECT_TYPE_IMAGE_VIEW:
+ disp->DestroyImageView(_device, (VkImageView)(uintptr_t)obj, NULL);
+ break;
+ case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
+ disp->DestroyDescriptorSetLayout(_device, (VkDescriptorSetLayout)(uintptr_t)obj, NULL);
+ break;
+ case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
+ disp->DestroyPipelineLayout(_device, (VkPipelineLayout)(uintptr_t)obj, NULL);
+ break;
+ case VK_OBJECT_TYPE_PIPELINE:
+ disp->DestroyPipeline(_device, (VkPipeline)(uintptr_t)obj, NULL);
+ break;
+ case VK_OBJECT_TYPE_SAMPLER:
+ disp->DestroySampler(_device, (VkSampler)(uintptr_t)obj, NULL);
+ break;
+ default:
+ unreachable("Unsupported object type");
+ }
+}
+
+VkResult
+vk_meta_device_init(struct vk_device *device,
+ struct vk_meta_device *meta)
+{
+ memset(meta, 0, sizeof(*meta));
+
+ meta->cache = _mesa_hash_table_create(NULL, cache_key_hash,
+ cache_key_equal);
+ simple_mtx_init(&meta->cache_mtx, mtx_plain);
+
+ meta->cmd_draw_rects = vk_meta_draw_rects;
+ meta->cmd_draw_volume = vk_meta_draw_volume;
+
+ return VK_SUCCESS;
+}
+
+void
+vk_meta_device_finish(struct vk_device *device,
+ struct vk_meta_device *meta)
+{
+ hash_table_foreach(meta->cache, entry) {
+ free((void *)entry->key);
+ destroy_object(device, entry->data);
+ }
+ _mesa_hash_table_destroy(meta->cache, NULL);
+ simple_mtx_destroy(&meta->cache_mtx);
+}
+
+uint64_t
+vk_meta_lookup_object(struct vk_meta_device *meta,
+ VkObjectType obj_type,
+ const void *key_data, size_t key_size)
+{
+ assert(key_size >= sizeof(enum vk_meta_object_key_type));
+ assert(*(enum vk_meta_object_key_type *)key_data !=
+ VK_META_OBJECT_KEY_TYPE_INVALID);
+
+ struct cache_key key = {
+ .obj_type = obj_type,
+ .key_size = key_size,
+ .key_data = key_data,
+ };
+
+ uint32_t hash = cache_key_hash(&key);
+
+ simple_mtx_lock(&meta->cache_mtx);
+ struct hash_entry *entry =
+ _mesa_hash_table_search_pre_hashed(meta->cache, hash, &key);
+ simple_mtx_unlock(&meta->cache_mtx);
+
+ if (entry == NULL)
+ return 0;
+
+ struct vk_object_base *obj = entry->data;
+ assert(obj->type == obj_type);
+
+ return (uint64_t)(uintptr_t)obj;
+}
+
+uint64_t
+vk_meta_cache_object(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const void *key_data, size_t key_size,
+ VkObjectType obj_type,
+ uint64_t handle)
+{
+ assert(key_size >= sizeof(enum vk_meta_object_key_type));
+ assert(*(enum vk_meta_object_key_type *)key_data !=
+ VK_META_OBJECT_KEY_TYPE_INVALID);
+
+ struct cache_key *key = cache_key_create(obj_type, key_data, key_size);
+ struct vk_object_base *obj =
+ vk_object_base_from_u64_handle(handle, obj_type);
+
+ uint32_t hash = cache_key_hash(key);
+
+ simple_mtx_lock(&meta->cache_mtx);
+ struct hash_entry *entry =
+ _mesa_hash_table_search_pre_hashed(meta->cache, hash, key);
+ if (entry == NULL)
+ _mesa_hash_table_insert_pre_hashed(meta->cache, hash, key, obj);
+ simple_mtx_unlock(&meta->cache_mtx);
+
+ if (entry != NULL) {
+ /* We raced and found that object already in the cache */
+ free(key);
+ destroy_object(device, obj);
+ return (uint64_t)(uintptr_t)entry->data;
+ } else {
+ /* Return the newly inserted object */
+ return (uint64_t)(uintptr_t)obj;
+ }
+}
+
+VkResult
+vk_meta_create_sampler(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkSamplerCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkSampler *sampler_out)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkSampler sampler;
+ VkResult result = disp->CreateSampler(_device, info, NULL, &sampler);
+ if (result != VK_SUCCESS)
+ return result;
+
+ *sampler_out = (VkSampler)
+ vk_meta_cache_object(device, meta, key_data, key_size,
+ VK_OBJECT_TYPE_SAMPLER,
+ (uint64_t)sampler);
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_meta_create_descriptor_set_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkDescriptorSetLayoutCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkDescriptorSetLayout *layout_out)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkDescriptorSetLayout layout;
+ VkResult result = disp->CreateDescriptorSetLayout(_device, info,
+ NULL, &layout);
+ if (result != VK_SUCCESS)
+ return result;
+
+ *layout_out = (VkDescriptorSetLayout)
+ vk_meta_cache_object(device, meta, key_data, key_size,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
+ (uint64_t)layout);
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_meta_get_descriptor_set_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkDescriptorSetLayoutCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkDescriptorSetLayout *layout_out)
+{
+ VkDescriptorSetLayout cached =
+ vk_meta_lookup_descriptor_set_layout(meta, key_data, key_size);
+ if (cached != VK_NULL_HANDLE) {
+ *layout_out = cached;
+ return VK_SUCCESS;
+ }
+
+ return vk_meta_create_descriptor_set_layout(device, meta, info,
+ key_data, key_size,
+ layout_out);
+}
+
+VkResult
+vk_meta_create_pipeline_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkPipelineLayoutCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkPipelineLayout *layout_out)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkPipelineLayout layout;
+ VkResult result = disp->CreatePipelineLayout(_device, info, NULL, &layout);
+ if (result != VK_SUCCESS)
+ return result;
+
+ *layout_out = (VkPipelineLayout)
+ vk_meta_cache_object(device, meta, key_data, key_size,
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT,
+ (uint64_t)layout);
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_meta_get_pipeline_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkDescriptorSetLayoutCreateInfo *desc_info,
+ const VkPushConstantRange *push_range,
+ const void *key_data, size_t key_size,
+ VkPipelineLayout *layout_out)
+{
+ VkPipelineLayout cached =
+ vk_meta_lookup_pipeline_layout(meta, key_data, key_size);
+ if (cached != VK_NULL_HANDLE) {
+ *layout_out = cached;
+ return VK_SUCCESS;
+ }
+
+ VkDescriptorSetLayout set_layout = VK_NULL_HANDLE;
+ if (desc_info != NULL) {
+ VkResult result =
+ vk_meta_get_descriptor_set_layout(device, meta, desc_info,
+ key_data, key_size, &set_layout);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ const VkPipelineLayoutCreateInfo layout_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = set_layout != VK_NULL_HANDLE ? 1 : 0,
+ .pSetLayouts = &set_layout,
+ .pushConstantRangeCount = push_range != NULL ? 1 : 0,
+ .pPushConstantRanges = push_range,
+ };
+
+ return vk_meta_create_pipeline_layout(device, meta, &layout_info,
+ key_data, key_size, layout_out);
+}
+
+static VkResult
+create_rect_list_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkGraphicsPipelineCreateInfo *info,
+ VkPipeline *pipeline_out)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkGraphicsPipelineCreateInfo info_local = *info;
+
+ /* We always configure for layered rendering for now */
+ bool use_gs = meta->use_gs_for_layer;
+
+ STACK_ARRAY(VkPipelineShaderStageCreateInfo, stages,
+ info->stageCount + 1 + use_gs);
+ uint32_t stage_count = 0;
+
+ VkPipelineShaderStageNirCreateInfoMESA vs_nir_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA,
+ .nir = vk_meta_draw_rects_vs_nir(meta, use_gs),
+ };
+ stages[stage_count++] = (VkPipelineShaderStageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .pNext = &vs_nir_info,
+ .stage = VK_SHADER_STAGE_VERTEX_BIT,
+ .pName = "main",
+ };
+
+ VkPipelineShaderStageNirCreateInfoMESA gs_nir_info;
+ if (use_gs) {
+ gs_nir_info = (VkPipelineShaderStageNirCreateInfoMESA) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA,
+ .nir = vk_meta_draw_rects_gs_nir(meta),
+ };
+ stages[stage_count++] = (VkPipelineShaderStageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .pNext = &gs_nir_info,
+ .stage = VK_SHADER_STAGE_GEOMETRY_BIT,
+ .pName = "main",
+ };
+ }
+
+ for (uint32_t i = 0; i < info->stageCount; i++) {
+ assert(info->pStages[i].stage != VK_SHADER_STAGE_VERTEX_BIT);
+ if (use_gs)
+ assert(info->pStages[i].stage != VK_SHADER_STAGE_GEOMETRY_BIT);
+ stages[stage_count++] = info->pStages[i];
+ }
+
+ info_local.stageCount = stage_count;
+ info_local.pStages = stages;
+ info_local.pVertexInputState = &vk_meta_draw_rects_vi_state;
+ info_local.pViewportState = &vk_meta_draw_rects_vs_state;
+
+ uint32_t dyn_count = info->pDynamicState != NULL ?
+ info->pDynamicState->dynamicStateCount : 0;
+
+ STACK_ARRAY(VkDynamicState, dyn_state, dyn_count + 2);
+ for (uint32_t i = 0; i < dyn_count; i++)
+ dyn_state[i] = info->pDynamicState->pDynamicStates[i];
+
+ dyn_state[dyn_count + 0] = VK_DYNAMIC_STATE_VIEWPORT;
+ dyn_state[dyn_count + 1] = VK_DYNAMIC_STATE_SCISSOR;
+
+ const VkPipelineDynamicStateCreateInfo dyn_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = dyn_count + 2,
+ .pDynamicStates = dyn_state,
+ };
+
+ info_local.pDynamicState = &dyn_info;
+
+ VkResult result = disp->CreateGraphicsPipelines(_device, VK_NULL_HANDLE,
+ 1, &info_local, NULL,
+ pipeline_out);
+
+ STACK_ARRAY_FINISH(dyn_state);
+ STACK_ARRAY_FINISH(stages);
+
+ return result;
+}
+
+static const VkPipelineRasterizationStateCreateInfo default_rs_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .depthClampEnable = false,
+ .depthBiasEnable = false,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .cullMode = VK_CULL_MODE_NONE,
+ .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+};
+
+static const VkPipelineDepthStencilStateCreateInfo default_ds_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = false,
+ .depthBoundsTestEnable = false,
+ .stencilTestEnable = false,
+};
+
+VkResult
+vk_meta_create_graphics_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkGraphicsPipelineCreateInfo *info,
+ const struct vk_meta_rendering_info *render,
+ const void *key_data, size_t key_size,
+ VkPipeline *pipeline_out)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+ VkResult result;
+
+ VkGraphicsPipelineCreateInfo info_local = *info;
+
+ /* Add in the rendering info */
+ VkPipelineRenderingCreateInfo r_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,
+ .viewMask = render->view_mask,
+ .colorAttachmentCount = render->color_attachment_count,
+ .pColorAttachmentFormats = render->color_attachment_formats,
+ .depthAttachmentFormat = render->depth_attachment_format,
+ .stencilAttachmentFormat = render->stencil_attachment_format,
+ };
+ __vk_append_struct(&info_local, &r_info);
+
+ /* Assume rectangle pipelines */
+ if (info_local.pInputAssemblyState == NULL)
+ info_local.pInputAssemblyState = &vk_meta_draw_rects_ia_state;
+
+ if (info_local.pRasterizationState == NULL)
+ info_local.pRasterizationState = &default_rs_info;
+
+ VkPipelineMultisampleStateCreateInfo ms_info;
+ if (info_local.pMultisampleState == NULL) {
+ ms_info = (VkPipelineMultisampleStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .rasterizationSamples = render->samples,
+ };
+ info_local.pMultisampleState = &ms_info;
+ }
+
+ if (info_local.pDepthStencilState == NULL)
+ info_local.pDepthStencilState = &default_ds_info;
+
+ VkPipelineColorBlendStateCreateInfo cb_info;
+ VkPipelineColorBlendAttachmentState cb_att[MESA_VK_MAX_COLOR_ATTACHMENTS];
+ if (info_local.pColorBlendState == NULL) {
+ for (uint32_t i = 0; i < render->color_attachment_count; i++) {
+ cb_att[i] = (VkPipelineColorBlendAttachmentState) {
+ .blendEnable = false,
+ .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT |
+ VK_COLOR_COMPONENT_A_BIT,
+ };
+ }
+ cb_info = (VkPipelineColorBlendStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .attachmentCount = render->color_attachment_count,
+ .pAttachments = cb_att,
+ };
+ info_local.pColorBlendState = &cb_info;
+ }
+
+ VkPipeline pipeline;
+ if (info_local.pInputAssemblyState->topology ==
+ VK_PRIMITIVE_TOPOLOGY_META_RECT_LIST_MESA) {
+ result = create_rect_list_pipeline(device, meta,
+ &info_local,
+ &pipeline);
+ } else {
+ result = disp->CreateGraphicsPipelines(_device, VK_NULL_HANDLE,
+ 1, &info_local,
+ NULL, &pipeline);
+ }
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ *pipeline_out = (VkPipeline)vk_meta_cache_object(device, meta,
+ key_data, key_size,
+ VK_OBJECT_TYPE_PIPELINE,
+ (uint64_t)pipeline);
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_meta_create_compute_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkComputePipelineCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkPipeline *pipeline_out)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkPipeline pipeline;
+ VkResult result = disp->CreateComputePipelines(_device, VK_NULL_HANDLE,
+ 1, info, NULL, &pipeline);
+ if (result != VK_SUCCESS)
+ return result;
+
+ *pipeline_out = (VkPipeline)vk_meta_cache_object(device, meta,
+ key_data, key_size,
+ VK_OBJECT_TYPE_PIPELINE,
+ (uint64_t)pipeline);
+ return VK_SUCCESS;
+}
+
+void
+vk_meta_object_list_init(struct vk_meta_object_list *mol)
+{
+ util_dynarray_init(&mol->arr, NULL);
+}
+
+void
+vk_meta_object_list_reset(struct vk_device *device,
+ struct vk_meta_object_list *mol)
+{
+ util_dynarray_foreach(&mol->arr, struct vk_object_base *, obj)
+ destroy_object(device, *obj);
+
+ util_dynarray_clear(&mol->arr);
+}
+
+void
+vk_meta_object_list_finish(struct vk_device *device,
+ struct vk_meta_object_list *mol)
+{
+ vk_meta_object_list_reset(device, mol);
+ util_dynarray_fini(&mol->arr);
+}
+
+VkResult
+vk_meta_create_buffer(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkBufferCreateInfo *info,
+ VkBuffer *buffer_out)
+{
+ struct vk_device *device = cmd->base.device;
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkResult result = disp->CreateBuffer(_device, info, NULL, buffer_out);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ vk_meta_object_list_add_handle(&cmd->meta_objects,
+ VK_OBJECT_TYPE_BUFFER,
+ (uint64_t)*buffer_out);
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_meta_create_image_view(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkImageViewCreateInfo *info,
+ VkImageView *image_view_out)
+{
+ struct vk_device *device = cmd->base.device;
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ VkResult result = disp->CreateImageView(_device, info, NULL, image_view_out);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ vk_meta_object_list_add_handle(&cmd->meta_objects,
+ VK_OBJECT_TYPE_IMAGE_VIEW,
+ (uint64_t)*image_view_out);
+ return VK_SUCCESS;
+}
diff --git a/src/vulkan/runtime/vk_meta.h b/src/vulkan/runtime/vk_meta.h
new file mode 100644
index 00000000000..dd113b0ea13
--- /dev/null
+++ b/src/vulkan/runtime/vk_meta.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_META_H
+#define VK_META_H
+
+#include "vk_limits.h"
+#include "vk_object.h"
+
+#include "util/simple_mtx.h"
+#include "util/u_dynarray.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct hash_table;
+struct vk_command_buffer;
+struct vk_device;
+struct vk_image;
+
+struct vk_meta_rect {
+ uint32_t x0, y0, x1, y1;
+ float z;
+ uint32_t layer;
+};
+
+#define VK_PRIMITIVE_TOPOLOGY_META_RECT_LIST_MESA (VkPrimitiveTopology)11
+
+struct vk_meta_device {
+ struct hash_table *cache;
+ simple_mtx_t cache_mtx;
+
+ uint32_t max_bind_map_buffer_size_B;
+ bool use_layered_rendering;
+ bool use_gs_for_layer;
+ bool use_stencil_export;
+
+ VkResult (*cmd_bind_map_buffer)(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ VkBuffer buffer,
+ void **map_out);
+
+ void (*cmd_draw_rects)(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ uint32_t rect_count,
+ const struct vk_meta_rect *rects);
+
+ void (*cmd_draw_volume)(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const struct vk_meta_rect *rect,
+ uint32_t layer_count);
+};
+
+VkResult vk_meta_device_init(struct vk_device *device,
+ struct vk_meta_device *meta);
+void vk_meta_device_finish(struct vk_device *device,
+ struct vk_meta_device *meta);
+
+/** Keys should start with one of these to ensure uniqueness */
+enum vk_meta_object_key_type {
+ VK_META_OBJECT_KEY_TYPE_INVALID = 0,
+ VK_META_OBJECT_KEY_CLEAR_PIPELINE,
+ VK_META_OBJECT_KEY_BLIT_PIPELINE,
+ VK_META_OBJECT_KEY_BLIT_SAMPLER,
+};
+
+uint64_t vk_meta_lookup_object(struct vk_meta_device *meta,
+ VkObjectType obj_type,
+ const void *key_data, size_t key_size);
+
+uint64_t vk_meta_cache_object(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const void *key_data, size_t key_size,
+ VkObjectType obj_type,
+ uint64_t handle);
+
+static inline VkDescriptorSetLayout
+vk_meta_lookup_descriptor_set_layout(struct vk_meta_device *meta,
+ const void *key_data, size_t key_size)
+{
+ return (VkDescriptorSetLayout)
+ vk_meta_lookup_object(meta, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
+ key_data, key_size);
+}
+
+static inline VkPipelineLayout
+vk_meta_lookup_pipeline_layout(struct vk_meta_device *meta,
+ const void *key_data, size_t key_size)
+{
+ return (VkPipelineLayout)
+ vk_meta_lookup_object(meta, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
+ key_data, key_size);
+}
+
+static inline VkPipeline
+vk_meta_lookup_pipeline(struct vk_meta_device *meta,
+ const void *key_data, size_t key_size)
+{
+ return (VkPipeline)vk_meta_lookup_object(meta, VK_OBJECT_TYPE_PIPELINE,
+ key_data, key_size);
+}
+
+static inline VkSampler
+vk_meta_lookup_sampler(struct vk_meta_device *meta,
+ const void *key_data, size_t key_size)
+{
+ return (VkSampler)vk_meta_lookup_object(meta, VK_OBJECT_TYPE_SAMPLER,
+ key_data, key_size);
+}
+
+struct vk_meta_rendering_info {
+ uint32_t view_mask;
+ uint32_t samples;
+ uint32_t color_attachment_count;
+ VkFormat color_attachment_formats[MESA_VK_MAX_COLOR_ATTACHMENTS];
+ VkFormat depth_attachment_format;
+ VkFormat stencil_attachment_format;
+};
+
+VkResult
+vk_meta_create_descriptor_set_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkDescriptorSetLayoutCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkDescriptorSetLayout *layout_out);
+
+VkResult
+vk_meta_create_pipeline_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkPipelineLayoutCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkPipelineLayout *layout_out);
+
+VkResult
+vk_meta_get_pipeline_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkDescriptorSetLayoutCreateInfo *desc_info,
+ const VkPushConstantRange *push_range,
+ const void *key_data, size_t key_size,
+ VkPipelineLayout *layout_out);
+
+VkResult
+vk_meta_create_graphics_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkGraphicsPipelineCreateInfo *info,
+ const struct vk_meta_rendering_info *render,
+ const void *key_data, size_t key_size,
+ VkPipeline *pipeline_out);
+
+VkResult
+vk_meta_create_compute_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkComputePipelineCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkPipeline *pipeline_out);
+
+VkResult
+vk_meta_create_sampler(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const VkSamplerCreateInfo *info,
+ const void *key_data, size_t key_size,
+ VkSampler *sampler_out);
+
+struct vk_meta_object_list {
+ struct util_dynarray arr;
+};
+
+void vk_meta_object_list_init(struct vk_meta_object_list *mol);
+void vk_meta_object_list_reset(struct vk_device *device,
+ struct vk_meta_object_list *mol);
+void vk_meta_object_list_finish(struct vk_device *device,
+ struct vk_meta_object_list *mol);
+
+static inline void
+vk_meta_object_list_add_obj(struct vk_meta_object_list *mol,
+ struct vk_object_base *obj)
+{
+ util_dynarray_append(&mol->arr, struct vk_object_base *, obj);
+}
+
+static inline void
+vk_meta_object_list_add_handle(struct vk_meta_object_list *mol,
+ VkObjectType obj_type,
+ uint64_t handle)
+{
+ vk_meta_object_list_add_obj(mol,
+ vk_object_base_from_u64_handle(handle, obj_type));
+}
+
+VkResult vk_meta_create_buffer(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkBufferCreateInfo *info,
+ VkBuffer *buffer_out);
+VkResult vk_meta_create_image_view(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkImageViewCreateInfo *info,
+ VkImageView *image_view_out);
+
+void vk_meta_draw_rects(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ uint32_t rect_count,
+ const struct vk_meta_rect *rects);
+
+void vk_meta_draw_volume(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const struct vk_meta_rect *rect,
+ uint32_t layer_count);
+
+void vk_meta_clear_attachments(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const struct vk_meta_rendering_info *render,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t rect_count,
+ const VkClearRect *rects);
+
+void vk_meta_clear_rendering(struct vk_meta_device *meta,
+ struct vk_command_buffer *cmd,
+ const VkRenderingInfo *pRenderingInfo);
+
+void vk_meta_clear_color_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *image,
+ VkImageLayout image_layout,
+ VkFormat format,
+ const VkClearColorValue *color,
+ uint32_t range_count,
+ const VkImageSubresourceRange *ranges);
+
+void vk_meta_clear_depth_stencil_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *image,
+ VkImageLayout image_layout,
+ const VkClearDepthStencilValue *depth_stencil,
+ uint32_t range_count,
+ const VkImageSubresourceRange *ranges);
+
+void vk_meta_blit_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *src_image,
+ VkFormat src_format,
+ VkImageLayout src_image_layout,
+ struct vk_image *dst_image,
+ VkFormat dst_format,
+ VkImageLayout dst_image_layout,
+ uint32_t region_count,
+ const VkImageBlit2 *regions,
+ VkFilter filter);
+
+void vk_meta_blit_image2(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkBlitImageInfo2 *blit);
+
+void vk_meta_resolve_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *src_image,
+ VkFormat src_format,
+ VkImageLayout src_image_layout,
+ struct vk_image *dst_image,
+ VkFormat dst_format,
+ VkImageLayout dst_image_layout,
+ uint32_t region_count,
+ const VkImageResolve2 *regions,
+ VkResolveModeFlagBits resolve_mode,
+ VkResolveModeFlagBits stencil_resolve_mode);
+
+void vk_meta_resolve_image2(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkResolveImageInfo2 *resolve);
+
+void vk_meta_resolve_rendering(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkRenderingInfo *pRenderingInfo);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_META_H */
diff --git a/src/vulkan/runtime/vk_meta_blit_resolve.c b/src/vulkan/runtime/vk_meta_blit_resolve.c
new file mode 100644
index 00000000000..955f1544df0
--- /dev/null
+++ b/src/vulkan/runtime/vk_meta_blit_resolve.c
@@ -0,0 +1,1013 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_meta_private.h"
+
+#include "vk_command_buffer.h"
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_pipeline.h"
+
+#include "nir_builder.h"
+
+struct vk_meta_blit_key {
+ enum vk_meta_object_key_type key_type;
+ enum glsl_sampler_dim dim;
+ VkSampleCountFlagBits src_samples;
+ VkResolveModeFlagBits resolve_mode;
+ VkResolveModeFlagBits stencil_resolve_mode;
+ bool stencil_as_discard;
+ VkFormat dst_format;
+ VkImageAspectFlags aspects;
+};
+
+static enum glsl_sampler_dim
+vk_image_sampler_dim(const struct vk_image *image)
+{
+ switch (image->image_type) {
+ case VK_IMAGE_TYPE_1D: return GLSL_SAMPLER_DIM_1D;
+ case VK_IMAGE_TYPE_2D:
+ if (image->samples > 1)
+ return GLSL_SAMPLER_DIM_MS;
+ else
+ return GLSL_SAMPLER_DIM_2D;
+ case VK_IMAGE_TYPE_3D: return GLSL_SAMPLER_DIM_3D;
+ default: unreachable("Invalid image type");
+ }
+}
+
+enum blit_desc_binding {
+ BLIT_DESC_BINDING_SAMPLER,
+ BLIT_DESC_BINDING_COLOR,
+ BLIT_DESC_BINDING_DEPTH,
+ BLIT_DESC_BINDING_STENCIL,
+};
+
+static enum blit_desc_binding
+aspect_to_tex_binding(VkImageAspectFlagBits aspect)
+{
+ switch (aspect) {
+ case VK_IMAGE_ASPECT_COLOR_BIT: return BLIT_DESC_BINDING_COLOR;
+ case VK_IMAGE_ASPECT_DEPTH_BIT: return BLIT_DESC_BINDING_DEPTH;
+ case VK_IMAGE_ASPECT_STENCIL_BIT: return BLIT_DESC_BINDING_STENCIL;
+ default: unreachable("Unsupported aspect");
+ }
+}
+
+struct vk_meta_blit_push_data {
+ float x_off, y_off, x_scale, y_scale;
+ float z_off, z_scale;
+ int32_t arr_delta;
+ uint32_t stencil_bit;
+};
+
+static inline void
+compute_off_scale(uint32_t src_level_size,
+ uint32_t src0, uint32_t src1,
+ uint32_t dst0, uint32_t dst1,
+ uint32_t *dst0_out, uint32_t *dst1_out,
+ float *off_out, float *scale_out)
+{
+ assert(src0 <= src_level_size && src1 <= src_level_size);
+
+ if (dst0 < dst1) {
+ *dst0_out = dst0;
+ *dst1_out = dst1;
+ } else {
+ *dst0_out = dst1;
+ *dst1_out = dst0;
+
+ /* Flip the source region */
+ uint32_t tmp = src0;
+ src0 = src1;
+ src1 = tmp;
+ }
+
+ double src_region_size = (double)src1 - (double)src0;
+ assert(src_region_size != 0);
+
+ double dst_region_size = (double)*dst1_out - (double)*dst0_out;
+ assert(dst_region_size > 0);
+
+ double src_offset = src0 / (double)src_level_size;
+ double dst_scale = src_region_size / (src_level_size * dst_region_size);
+ double dst_offset = (double)*dst0_out * dst_scale;
+
+ *off_out = src_offset - dst_offset;
+ *scale_out = dst_scale;
+}
+
+static inline nir_def *
+load_struct_var(nir_builder *b, nir_variable *var, uint32_t field)
+{
+ nir_deref_instr *deref =
+ nir_build_deref_struct(b, nir_build_deref_var(b, var), field);
+ return nir_load_deref(b, deref);
+}
+
+static nir_def *
+build_tex_resolve(nir_builder *b, nir_deref_instr *t,
+ nir_def *coord,
+ VkSampleCountFlagBits samples,
+ VkResolveModeFlagBits resolve_mode)
+{
+ nir_def *accum = nir_txf_ms_deref(b, t, coord, nir_imm_int(b, 0));
+ if (resolve_mode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)
+ return accum;
+
+ const enum glsl_base_type base_type =
+ glsl_get_sampler_result_type(t->type);
+
+ for (unsigned i = 1; i < samples; i++) {
+ nir_def *val = nir_txf_ms_deref(b, t, coord, nir_imm_int(b, i));
+ switch (resolve_mode) {
+ case VK_RESOLVE_MODE_AVERAGE_BIT:
+ assert(base_type == GLSL_TYPE_FLOAT);
+ accum = nir_fadd(b, accum, val);
+ break;
+
+ case VK_RESOLVE_MODE_MIN_BIT:
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ accum = nir_umin(b, accum, val);
+ break;
+ case GLSL_TYPE_INT:
+ accum = nir_imin(b, accum, val);
+ break;
+ case GLSL_TYPE_FLOAT:
+ accum = nir_fmin(b, accum, val);
+ break;
+ default:
+ unreachable("Invalid sample result type");
+ }
+ break;
+
+ case VK_RESOLVE_MODE_MAX_BIT:
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ accum = nir_umax(b, accum, val);
+ break;
+ case GLSL_TYPE_INT:
+ accum = nir_imax(b, accum, val);
+ break;
+ case GLSL_TYPE_FLOAT:
+ accum = nir_fmax(b, accum, val);
+ break;
+ default:
+ unreachable("Invalid sample result type");
+ }
+ break;
+
+ default:
+ unreachable("Unsupported resolve mode");
+ }
+ }
+
+ if (resolve_mode == VK_RESOLVE_MODE_AVERAGE_BIT)
+ accum = nir_fmul_imm(b, accum, 1.0 / samples);
+
+ return accum;
+}
+
+static nir_shader *
+build_blit_shader(const struct vk_meta_blit_key *key)
+{
+ nir_builder build;
+ if (key->resolve_mode || key->stencil_resolve_mode) {
+ build = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL,
+ "vk-meta-resolve");
+ } else {
+ build = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
+ NULL, "vk-meta-blit");
+ }
+ nir_builder *b = &build;
+
+ struct glsl_struct_field push_fields[] = {
+ { .type = glsl_vec4_type(), .name = "xy_xform", .offset = 0 },
+ { .type = glsl_vec4_type(), .name = "z_xform", .offset = 16 },
+ };
+ const struct glsl_type *push_iface_type =
+ glsl_interface_type(push_fields, ARRAY_SIZE(push_fields),
+ GLSL_INTERFACE_PACKING_STD140,
+ false /* row_major */, "push");
+ nir_variable *push = nir_variable_create(b->shader, nir_var_mem_push_const,
+ push_iface_type, "push");
+
+ nir_def *xy_xform = load_struct_var(b, push, 0);
+ nir_def *xy_off = nir_channels(b, xy_xform, 3 << 0);
+ nir_def *xy_scale = nir_channels(b, xy_xform, 3 << 2);
+
+ nir_def *out_coord_xy = nir_load_frag_coord(b);
+ out_coord_xy = nir_trim_vector(b, out_coord_xy, 2);
+ nir_def *src_coord_xy = nir_ffma(b, out_coord_xy, xy_scale, xy_off);
+
+ nir_def *z_xform = load_struct_var(b, push, 1);
+ nir_def *out_layer = nir_load_layer_id(b);
+ nir_def *src_coord;
+ if (key->dim == GLSL_SAMPLER_DIM_3D) {
+ nir_def *z_off = nir_channel(b, z_xform, 0);
+ nir_def *z_scale = nir_channel(b, z_xform, 1);
+ nir_def *out_coord_z = nir_fadd_imm(b, nir_u2f32(b, out_layer), 0.5);
+ nir_def *src_coord_z = nir_ffma(b, out_coord_z, z_scale, z_off);
+ src_coord = nir_vec3(b, nir_channel(b, src_coord_xy, 0),
+ nir_channel(b, src_coord_xy, 1),
+ src_coord_z);
+ } else {
+ nir_def *arr_delta = nir_channel(b, z_xform, 2);
+ nir_def *in_layer = nir_iadd(b, out_layer, arr_delta);
+ if (key->dim == GLSL_SAMPLER_DIM_1D) {
+ src_coord = nir_vec2(b, nir_channel(b, src_coord_xy, 0),
+ nir_u2f32(b, in_layer));
+ } else {
+ assert(key->dim == GLSL_SAMPLER_DIM_2D ||
+ key->dim == GLSL_SAMPLER_DIM_MS);
+ src_coord = nir_vec3(b, nir_channel(b, src_coord_xy, 0),
+ nir_channel(b, src_coord_xy, 1),
+ nir_u2f32(b, in_layer));
+ }
+ }
+
+ nir_variable *sampler = nir_variable_create(b->shader, nir_var_uniform,
+ glsl_bare_sampler_type(), NULL);
+ sampler->data.descriptor_set = 0;
+ sampler->data.binding = BLIT_DESC_BINDING_SAMPLER;
+ nir_deref_instr *s = nir_build_deref_var(b, sampler);
+
+ u_foreach_bit(a, key->aspects) {
+ VkImageAspectFlagBits aspect = (1 << a);
+
+ enum glsl_base_type base_type;
+ unsigned out_location, out_comps;
+ const char *tex_name, *out_name;
+ VkResolveModeFlagBits resolve_mode;
+ switch (aspect) {
+ case VK_IMAGE_ASPECT_COLOR_BIT:
+ tex_name = "color_tex";
+ if (vk_format_is_sint(key->dst_format))
+ base_type = GLSL_TYPE_INT;
+ else if (vk_format_is_uint(key->dst_format))
+ base_type = GLSL_TYPE_UINT;
+ else
+ base_type = GLSL_TYPE_FLOAT;
+ resolve_mode = key->resolve_mode;
+ out_name = "gl_FragData[0]";
+ out_location = FRAG_RESULT_DATA0;
+ out_comps = 4;
+ break;
+ case VK_IMAGE_ASPECT_DEPTH_BIT:
+ tex_name = "depth_tex";
+ base_type = GLSL_TYPE_FLOAT;
+ resolve_mode = key->resolve_mode;
+ out_name = "gl_FragDepth";
+ out_location = FRAG_RESULT_DEPTH;
+ out_comps = 1;
+ break;
+ case VK_IMAGE_ASPECT_STENCIL_BIT:
+ tex_name = "stencil_tex";
+ base_type = GLSL_TYPE_UINT;
+ resolve_mode = key->stencil_resolve_mode;
+ out_name = "gl_FragStencilRef";
+ out_location = FRAG_RESULT_STENCIL;
+ out_comps = 1;
+ break;
+ default:
+ unreachable("Unsupported aspect");
+ }
+
+ const bool is_array = key->dim != GLSL_SAMPLER_DIM_3D;
+ const struct glsl_type *texture_type =
+ glsl_sampler_type(key->dim, false, is_array, base_type);
+ nir_variable *texture = nir_variable_create(b->shader, nir_var_uniform,
+ texture_type, tex_name);
+ texture->data.descriptor_set = 0;
+ texture->data.binding = aspect_to_tex_binding(aspect);
+ nir_deref_instr *t = nir_build_deref_var(b, texture);
+
+ nir_def *val;
+ if (resolve_mode == VK_RESOLVE_MODE_NONE) {
+ val = nir_txl_deref(b, t, s, src_coord, nir_imm_float(b, 0));
+ } else {
+ val = build_tex_resolve(b, t, nir_f2u32(b, src_coord),
+ key->src_samples, resolve_mode);
+ }
+ val = nir_trim_vector(b, val, out_comps);
+
+ if (key->stencil_as_discard) {
+ assert(key->aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
+ nir_def *stencil_bit = nir_channel(b, z_xform, 3);
+ nir_discard_if(b, nir_ieq(b, nir_iand(b, val, stencil_bit),
+ nir_imm_int(b, 0)));
+ } else {
+ const struct glsl_type *out_type =
+ glsl_vector_type(base_type, out_comps);
+ nir_variable *out = nir_variable_create(b->shader, nir_var_shader_out,
+ out_type, out_name);
+ out->data.location = out_location;
+
+ nir_store_var(b, out, val, BITFIELD_MASK(out_comps));
+ }
+ }
+
+ return b->shader;
+}
+
+static VkResult
+get_blit_pipeline_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ VkPipelineLayout *layout_out)
+{
+ const char key[] = "vk-meta-blit-pipeline-layout";
+
+ const VkDescriptorSetLayoutBinding bindings[] = {{
+ .binding = BLIT_DESC_BINDING_SAMPLER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ }, {
+ .binding = BLIT_DESC_BINDING_COLOR,
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ }, {
+ .binding = BLIT_DESC_BINDING_DEPTH,
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ }, {
+ .binding = BLIT_DESC_BINDING_STENCIL,
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ }};
+
+ const VkDescriptorSetLayoutCreateInfo desc_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
+ .bindingCount = ARRAY_SIZE(bindings),
+ .pBindings = bindings,
+ };
+
+ const VkPushConstantRange push_range = {
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .offset = 0,
+ .size = sizeof(struct vk_meta_blit_push_data),
+ };
+
+ return vk_meta_get_pipeline_layout(device, meta, &desc_info, &push_range,
+ key, sizeof(key), layout_out);
+}
+
+static VkResult
+get_blit_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const struct vk_meta_blit_key *key,
+ VkPipelineLayout layout,
+ VkPipeline *pipeline_out)
+{
+ VkPipeline from_cache = vk_meta_lookup_pipeline(meta, key, sizeof(*key));
+ if (from_cache != VK_NULL_HANDLE) {
+ *pipeline_out = from_cache;
+ return VK_SUCCESS;
+ }
+
+ const VkPipelineShaderStageNirCreateInfoMESA fs_nir_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA,
+ .nir = build_blit_shader(key),
+ };
+ const VkPipelineShaderStageCreateInfo fs_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .pNext = &fs_nir_info,
+ .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .pName = "main",
+ };
+
+ VkPipelineDepthStencilStateCreateInfo ds_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ };
+ VkDynamicState dyn_tmp;
+ VkPipelineDynamicStateCreateInfo dyn_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ };
+ struct vk_meta_rendering_info render = {
+ .samples = 1,
+ };
+ if (key->aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
+ render.color_attachment_count = 1;
+ render.color_attachment_formats[0] = key->dst_format;
+ }
+ if (key->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ ds_info.depthTestEnable = VK_TRUE;
+ ds_info.depthWriteEnable = VK_TRUE;
+ ds_info.depthCompareOp = VK_COMPARE_OP_ALWAYS;
+ render.depth_attachment_format = key->dst_format;
+ }
+ if (key->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ ds_info.stencilTestEnable = VK_TRUE;
+ ds_info.front.compareOp = VK_COMPARE_OP_ALWAYS;
+ ds_info.front.passOp = VK_STENCIL_OP_REPLACE;
+ ds_info.front.compareMask = ~0u;
+ ds_info.front.writeMask = ~0u;
+ ds_info.front.reference = ~0;
+ ds_info.back = ds_info.front;
+ if (key->stencil_as_discard) {
+ dyn_tmp = VK_DYNAMIC_STATE_STENCIL_WRITE_MASK;
+ dyn_info.dynamicStateCount = 1;
+ dyn_info.pDynamicStates = &dyn_tmp;
+ }
+ render.stencil_attachment_format = key->dst_format;
+ }
+
+ const VkGraphicsPipelineCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = 1,
+ .pStages = &fs_info,
+ .pDepthStencilState = &ds_info,
+ .pDynamicState = &dyn_info,
+ .layout = layout,
+ };
+
+ VkResult result = vk_meta_create_graphics_pipeline(device, meta, &info,
+ &render,
+ key, sizeof(*key),
+ pipeline_out);
+ ralloc_free(fs_nir_info.nir);
+
+ return result;
+}
+
+static VkResult
+get_blit_sampler(struct vk_device *device,
+ struct vk_meta_device *meta,
+ VkFilter filter,
+ VkSampler *sampler_out)
+{
+ struct {
+ enum vk_meta_object_key_type key_type;
+ VkFilter filter;
+ } key;
+
+ memset(&key, 0, sizeof(key));
+ key.key_type = VK_META_OBJECT_KEY_BLIT_SAMPLER;
+ key.filter = filter;
+
+ VkSampler from_cache = vk_meta_lookup_sampler(meta, &key, sizeof(key));
+ if (from_cache != VK_NULL_HANDLE) {
+ *sampler_out = from_cache;
+ return VK_SUCCESS;
+ }
+
+ const VkSamplerCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+ .magFilter = filter,
+ .minFilter = filter,
+ .mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+ .addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ .addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ .addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ .unnormalizedCoordinates = VK_FALSE,
+ };
+
+ return vk_meta_create_sampler(device, meta, &info,
+ &key, sizeof(key), sampler_out);
+}
+
+static void
+do_blit(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *src_image,
+ VkFormat src_format,
+ VkImageLayout src_image_layout,
+ VkImageSubresourceLayers src_subres,
+ struct vk_image *dst_image,
+ VkFormat dst_format,
+ VkImageLayout dst_image_layout,
+ VkImageSubresourceLayers dst_subres,
+ VkSampler sampler,
+ struct vk_meta_blit_key *key,
+ struct vk_meta_blit_push_data *push,
+ const struct vk_meta_rect *dst_rect,
+ uint32_t dst_layer_count)
+{
+ struct vk_device *device = cmd->base.device;
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkResult result;
+
+ VkPipelineLayout pipeline_layout;
+ result = get_blit_pipeline_layout(device, meta, &pipeline_layout);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_command_buffer_set_error(cmd, result);
+ return;
+ }
+
+ uint32_t desc_count = 0;
+ VkDescriptorImageInfo image_infos[3];
+ VkWriteDescriptorSet desc_writes[3];
+
+ if (sampler != VK_NULL_HANDLE) {
+ image_infos[desc_count] = (VkDescriptorImageInfo) {
+ .sampler = sampler,
+ };
+ desc_writes[desc_count] = (VkWriteDescriptorSet) {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstBinding = BLIT_DESC_BINDING_SAMPLER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER,
+ .descriptorCount = 1,
+ .pImageInfo = &image_infos[desc_count],
+ };
+ desc_count++;
+ }
+
+ u_foreach_bit(a, src_subres.aspectMask) {
+ VkImageAspectFlagBits aspect = (1 << a);
+
+ VkImageView src_view;
+ const VkImageViewUsageCreateInfo src_view_usage = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
+ .usage = VK_IMAGE_USAGE_SAMPLED_BIT,
+ };
+ const VkImageViewCreateInfo src_view_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .pNext = &src_view_usage,
+ .image = vk_image_to_handle(src_image),
+ .viewType = vk_image_sampled_view_type(src_image),
+ .format = src_format,
+ .subresourceRange = {
+ .aspectMask = aspect,
+ .baseMipLevel = src_subres.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = src_subres.baseArrayLayer,
+ .layerCount = src_subres.layerCount,
+ },
+ };
+ result = vk_meta_create_image_view(cmd, meta, &src_view_info,
+ &src_view);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_command_buffer_set_error(cmd, result);
+ return;
+ }
+
+ assert(desc_count < ARRAY_SIZE(image_infos));
+ assert(desc_count < ARRAY_SIZE(desc_writes));
+ image_infos[desc_count] = (VkDescriptorImageInfo) {
+ .imageView = src_view,
+ };
+ desc_writes[desc_count] = (VkWriteDescriptorSet) {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstBinding = aspect_to_tex_binding(aspect),
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ .descriptorCount = 1,
+ .pImageInfo = &image_infos[desc_count],
+ };
+ desc_count++;
+ }
+
+ disp->CmdPushDescriptorSetKHR(vk_command_buffer_to_handle(cmd),
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline_layout, 0,
+ desc_count, desc_writes);
+
+ assert(dst_subres.aspectMask == src_subres.aspectMask);
+ VkImageAspectFlags aspects_left = dst_subres.aspectMask;
+
+ while (aspects_left) {
+ key->aspects = aspects_left;
+
+ /* If we need to write stencil via iterative discard, it has to be
+ * written by itself because otherwise the discards would also throw
+ * away color or depth data.
+ */
+ if ((key->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ key->aspects != VK_IMAGE_ASPECT_STENCIL_BIT &&
+ !meta->use_stencil_export)
+ key->aspects &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ key->stencil_as_discard = key->aspects == VK_IMAGE_ASPECT_STENCIL_BIT &&
+ !meta->use_stencil_export;
+
+ VkImageView dst_view;
+ const VkImageViewUsageCreateInfo dst_view_usage = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
+ .usage = (key->aspects & VK_IMAGE_ASPECT_COLOR_BIT) ?
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT :
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
+ };
+ const VkImageViewCreateInfo dst_view_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .pNext = &dst_view_usage,
+ .image = vk_image_to_handle(dst_image),
+ .viewType = vk_image_sampled_view_type(dst_image),
+ .format = dst_format,
+ .subresourceRange = {
+ .aspectMask = dst_subres.aspectMask,
+ .baseMipLevel = dst_subres.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = dst_subres.baseArrayLayer,
+ .layerCount = dst_subres.layerCount,
+ },
+ };
+ result = vk_meta_create_image_view(cmd, meta, &dst_view_info,
+ &dst_view);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_command_buffer_set_error(cmd, result);
+ return;
+ }
+
+ const VkRenderingAttachmentInfo vk_att = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = dst_view,
+ .imageLayout = dst_image_layout,
+ .loadOp = key->stencil_as_discard ? VK_ATTACHMENT_LOAD_OP_CLEAR :
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ };
+ VkRenderingInfo vk_render = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
+ .renderArea = {
+ .offset = {
+ dst_rect->x0,
+ dst_rect->y0
+ },
+ .extent = {
+ dst_rect->x1 - dst_rect->x0,
+ dst_rect->y1 - dst_rect->y0
+ },
+ },
+ .layerCount = dst_rect->layer + dst_layer_count,
+ };
+
+ if (key->aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
+ vk_render.colorAttachmentCount = 1;
+ vk_render.pColorAttachments = &vk_att;
+ }
+ if (key->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
+ vk_render.pDepthAttachment = &vk_att;
+ if (key->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
+ vk_render.pStencilAttachment = &vk_att;
+
+ disp->CmdBeginRendering(vk_command_buffer_to_handle(cmd), &vk_render);
+
+ VkPipeline pipeline;
+ result = get_blit_pipeline(device, meta, key,
+ pipeline_layout, &pipeline);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_command_buffer_set_error(cmd, result);
+ return;
+ }
+
+ disp->CmdBindPipeline(vk_command_buffer_to_handle(cmd),
+ VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+
+ if (key->stencil_as_discard) {
+ for (uint32_t i = 0; i < 8; i++) {
+ push->stencil_bit = BITFIELD_BIT(i);
+ disp->CmdPushConstants(vk_command_buffer_to_handle(cmd),
+ pipeline_layout,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ 0, sizeof(*push), push);
+
+ disp->CmdSetStencilWriteMask(vk_command_buffer_to_handle(cmd),
+ VK_STENCIL_FACE_FRONT_AND_BACK,
+ push->stencil_bit);
+
+ meta->cmd_draw_volume(cmd, meta, dst_rect, dst_layer_count);
+ }
+ } else {
+ disp->CmdPushConstants(vk_command_buffer_to_handle(cmd),
+ pipeline_layout,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ 0, sizeof(*push), push);
+
+ meta->cmd_draw_volume(cmd, meta, dst_rect, dst_layer_count);
+ }
+
+ disp->CmdEndRendering(vk_command_buffer_to_handle(cmd));
+
+ aspects_left &= ~key->aspects;
+ }
+}
+
+void
+vk_meta_blit_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *src_image,
+ VkFormat src_format,
+ VkImageLayout src_image_layout,
+ struct vk_image *dst_image,
+ VkFormat dst_format,
+ VkImageLayout dst_image_layout,
+ uint32_t region_count,
+ const VkImageBlit2 *regions,
+ VkFilter filter)
+{
+ struct vk_device *device = cmd->base.device;
+ VkResult result;
+
+ VkSampler sampler;
+ result = get_blit_sampler(device, meta, filter, &sampler);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_command_buffer_set_error(cmd, result);
+ return;
+ }
+
+ struct vk_meta_blit_key key;
+ memset(&key, 0, sizeof(key));
+ key.key_type = VK_META_OBJECT_KEY_BLIT_PIPELINE;
+ key.src_samples = src_image->samples;
+ key.dim = vk_image_sampler_dim(src_image);
+ key.dst_format = dst_format;
+
+ for (uint32_t r = 0; r < region_count; r++) {
+ struct vk_meta_blit_push_data push = {0};
+ struct vk_meta_rect dst_rect = {0};
+
+ uint32_t src_level = regions[r].srcSubresource.mipLevel;
+ VkExtent3D src_extent = vk_image_mip_level_extent(src_image, src_level);
+
+ compute_off_scale(src_extent.width,
+ regions[r].srcOffsets[0].x,
+ regions[r].srcOffsets[1].x,
+ regions[r].dstOffsets[0].x,
+ regions[r].dstOffsets[1].x,
+ &dst_rect.x0, &dst_rect.x1,
+ &push.x_off, &push.x_scale);
+ compute_off_scale(src_extent.height,
+ regions[r].srcOffsets[0].y,
+ regions[r].srcOffsets[1].y,
+ regions[r].dstOffsets[0].y,
+ regions[r].dstOffsets[1].y,
+ &dst_rect.y0, &dst_rect.y1,
+ &push.y_off, &push.y_scale);
+
+ VkImageSubresourceLayers src_subres = regions[r].srcSubresource;
+ src_subres.layerCount =
+ vk_image_subresource_layer_count(src_image, &src_subres);
+
+ VkImageSubresourceLayers dst_subres = regions[r].dstSubresource;
+ dst_subres.layerCount =
+ vk_image_subresource_layer_count(dst_image, &dst_subres);
+
+ uint32_t dst_layer_count;
+ if (src_image->image_type == VK_IMAGE_TYPE_3D) {
+ uint32_t layer0, layer1;
+ compute_off_scale(src_extent.depth,
+ regions[r].srcOffsets[0].z,
+ regions[r].srcOffsets[1].z,
+ regions[r].dstOffsets[0].z,
+ regions[r].dstOffsets[1].z,
+ &layer0, &layer1,
+ &push.z_off, &push.z_scale);
+ dst_rect.layer = layer0;
+ dst_layer_count = layer1 - layer0;
+ } else {
+ assert(src_subres.layerCount == dst_subres.layerCount);
+ dst_layer_count = dst_subres.layerCount;
+ push.arr_delta = dst_subres.baseArrayLayer -
+ src_subres.baseArrayLayer;
+ }
+
+ do_blit(cmd, meta,
+ src_image, src_format, src_image_layout, src_subres,
+ dst_image, dst_format, dst_image_layout, dst_subres,
+ sampler, &key, &push, &dst_rect, dst_layer_count);
+ }
+}
+
+void
+vk_meta_blit_image2(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkBlitImageInfo2 *blit)
+{
+ VK_FROM_HANDLE(vk_image, src_image, blit->srcImage);
+ VK_FROM_HANDLE(vk_image, dst_image, blit->dstImage);
+
+ vk_meta_blit_image(cmd, meta,
+ src_image, src_image->format, blit->srcImageLayout,
+ dst_image, dst_image->format, blit->dstImageLayout,
+ blit->regionCount, blit->pRegions, blit->filter);
+}
+
+void
+vk_meta_resolve_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *src_image,
+ VkFormat src_format,
+ VkImageLayout src_image_layout,
+ struct vk_image *dst_image,
+ VkFormat dst_format,
+ VkImageLayout dst_image_layout,
+ uint32_t region_count,
+ const VkImageResolve2 *regions,
+ VkResolveModeFlagBits resolve_mode,
+ VkResolveModeFlagBits stencil_resolve_mode)
+{
+ struct vk_meta_blit_key key;
+ memset(&key, 0, sizeof(key));
+ key.key_type = VK_META_OBJECT_KEY_BLIT_PIPELINE;
+ key.dim = vk_image_sampler_dim(src_image);
+ key.src_samples = src_image->samples;
+ key.resolve_mode = resolve_mode;
+ key.stencil_resolve_mode = stencil_resolve_mode;
+ key.dst_format = dst_format;
+
+ for (uint32_t r = 0; r < region_count; r++) {
+ struct vk_meta_blit_push_data push = {
+ .x_off = regions[r].srcOffset.x - regions[r].dstOffset.x,
+ .y_off = regions[r].srcOffset.y - regions[r].dstOffset.y,
+ .x_scale = 1,
+ .y_scale = 1,
+ };
+ struct vk_meta_rect dst_rect = {
+ .x0 = regions[r].dstOffset.x,
+ .y0 = regions[r].dstOffset.y,
+ .x1 = regions[r].dstOffset.x + regions[r].extent.width,
+ .y1 = regions[r].dstOffset.y + regions[r].extent.height,
+ };
+
+ VkImageSubresourceLayers src_subres = regions[r].srcSubresource;
+ src_subres.layerCount =
+ vk_image_subresource_layer_count(src_image, &src_subres);
+
+ VkImageSubresourceLayers dst_subres = regions[r].dstSubresource;
+ dst_subres.layerCount =
+ vk_image_subresource_layer_count(dst_image, &dst_subres);
+
+ do_blit(cmd, meta,
+ src_image, src_format, src_image_layout, src_subres,
+ dst_image, dst_format, dst_image_layout, dst_subres,
+ VK_NULL_HANDLE, &key, &push, &dst_rect,
+ dst_subres.layerCount);
+ }
+}
+
+void
+vk_meta_resolve_image2(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkResolveImageInfo2 *resolve)
+{
+ VK_FROM_HANDLE(vk_image, src_image, resolve->srcImage);
+ VK_FROM_HANDLE(vk_image, dst_image, resolve->dstImage);
+
+ VkResolveModeFlagBits resolve_mode = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT;
+ if (vk_format_is_color(src_image->format) &&
+ !vk_format_is_int(src_image->format))
+ resolve_mode = VK_RESOLVE_MODE_AVERAGE_BIT;
+
+ vk_meta_resolve_image(cmd, meta,
+ src_image, src_image->format, resolve->srcImageLayout,
+ dst_image, dst_image->format, resolve->dstImageLayout,
+ resolve->regionCount, resolve->pRegions,
+ resolve_mode, VK_RESOLVE_MODE_SAMPLE_ZERO_BIT);
+}
+
+static void
+vk_meta_resolve_attachment(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image_view *src_view,
+ VkImageLayout src_image_layout,
+ struct vk_image_view *dst_view,
+ VkImageLayout dst_image_layout,
+ VkImageAspectFlags resolve_aspects,
+ VkResolveModeFlagBits resolve_mode,
+ VkResolveModeFlagBits stencil_resolve_mode,
+ VkRect2D area, uint32_t layer_count,
+ uint32_t view_mask)
+{
+ VkImageResolve2 region = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2,
+ .srcSubresource = {
+ .aspectMask = resolve_aspects,
+ .mipLevel = src_view->base_mip_level,
+ },
+ .srcOffset = { area.offset.x, area.offset.y, 0},
+ .dstSubresource = {
+ .aspectMask = resolve_aspects,
+ .mipLevel = dst_view->base_mip_level,
+ },
+ .dstOffset = { area.offset.x, area.offset.y, 0},
+ .extent = { area.extent.width, area.extent.height, 1},
+ };
+
+ if (view_mask) {
+ u_foreach_bit(v, view_mask) {
+ region.srcSubresource.baseArrayLayer = src_view->base_array_layer + v;
+ region.srcSubresource.layerCount = 1;
+ region.dstSubresource.baseArrayLayer = dst_view->base_array_layer + v;
+ region.dstSubresource.layerCount = 1;
+
+ vk_meta_resolve_image(cmd, meta,
+ src_view->image, src_view->format,
+ src_image_layout,
+ dst_view->image, dst_view->format,
+ dst_image_layout,
+ 1, &region, resolve_mode, stencil_resolve_mode);
+ }
+ } else {
+ region.srcSubresource.baseArrayLayer = src_view->base_array_layer;
+ region.srcSubresource.layerCount = layer_count;
+ region.dstSubresource.baseArrayLayer = dst_view->base_array_layer;
+ region.dstSubresource.layerCount = layer_count;
+
+ vk_meta_resolve_image(cmd, meta,
+ src_view->image, src_view->format,
+ src_image_layout,
+ dst_view->image, dst_view->format,
+ dst_image_layout,
+ 1, &region, resolve_mode, stencil_resolve_mode);
+ }
+}
+
+void
+vk_meta_resolve_rendering(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const VkRenderingInfo *pRenderingInfo)
+{
+ for (uint32_t c = 0; c < pRenderingInfo->colorAttachmentCount; c++) {
+ const VkRenderingAttachmentInfo *att =
+ &pRenderingInfo->pColorAttachments[c];
+ if (att->resolveMode == VK_RESOLVE_MODE_NONE)
+ continue;
+
+ VK_FROM_HANDLE(vk_image_view, view, att->imageView);
+ VK_FROM_HANDLE(vk_image_view, res_view, att->resolveImageView);
+
+ vk_meta_resolve_attachment(cmd, meta, view, att->imageLayout,
+ res_view, att->resolveImageLayout,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ att->resolveMode, VK_RESOLVE_MODE_NONE,
+ pRenderingInfo->renderArea,
+ pRenderingInfo->layerCount,
+ pRenderingInfo->viewMask);
+ }
+
+ const VkRenderingAttachmentInfo *d_att = pRenderingInfo->pDepthAttachment;
+ if (d_att && d_att->resolveMode == VK_RESOLVE_MODE_NONE)
+ d_att = NULL;
+
+ const VkRenderingAttachmentInfo *s_att = pRenderingInfo->pStencilAttachment;
+ if (s_att && s_att->resolveMode == VK_RESOLVE_MODE_NONE)
+ s_att = NULL;
+
+ if (s_att != NULL || d_att != NULL) {
+ if (s_att != NULL && d_att != NULL &&
+ s_att->imageView == d_att->imageView &&
+ s_att->resolveImageView == d_att->resolveImageView) {
+ VK_FROM_HANDLE(vk_image_view, view, d_att->imageView);
+ VK_FROM_HANDLE(vk_image_view, res_view, d_att->resolveImageView);
+
+ vk_meta_resolve_attachment(cmd, meta, view, d_att->imageLayout,
+ res_view, d_att->resolveImageLayout,
+ VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT,
+ d_att->resolveMode, s_att->resolveMode,
+ pRenderingInfo->renderArea,
+ pRenderingInfo->layerCount,
+ pRenderingInfo->viewMask);
+ } else {
+ if (d_att != NULL) {
+ VK_FROM_HANDLE(vk_image_view, view, d_att->imageView);
+ VK_FROM_HANDLE(vk_image_view, res_view, d_att->resolveImageView);
+
+ vk_meta_resolve_attachment(cmd, meta, view, d_att->imageLayout,
+ res_view, d_att->resolveImageLayout,
+ VK_IMAGE_ASPECT_DEPTH_BIT,
+ d_att->resolveMode, VK_RESOLVE_MODE_NONE,
+ pRenderingInfo->renderArea,
+ pRenderingInfo->layerCount,
+ pRenderingInfo->viewMask);
+ }
+
+ if (s_att != NULL) {
+ VK_FROM_HANDLE(vk_image_view, view, s_att->imageView);
+ VK_FROM_HANDLE(vk_image_view, res_view, s_att->resolveImageView);
+
+ vk_meta_resolve_attachment(cmd, meta, view, s_att->imageLayout,
+ res_view, s_att->resolveImageLayout,
+ VK_IMAGE_ASPECT_STENCIL_BIT,
+ VK_RESOLVE_MODE_NONE, s_att->resolveMode,
+ pRenderingInfo->renderArea,
+ pRenderingInfo->layerCount,
+ pRenderingInfo->viewMask);
+ }
+ }
+ }
+}
diff --git a/src/vulkan/runtime/vk_meta_clear.c b/src/vulkan/runtime/vk_meta_clear.c
new file mode 100644
index 00000000000..638db130403
--- /dev/null
+++ b/src/vulkan/runtime/vk_meta_clear.c
@@ -0,0 +1,609 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_meta_private.h"
+
+#include "vk_command_buffer.h"
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_image.h"
+#include "vk_pipeline.h"
+#include "vk_util.h"
+
+#include "nir_builder.h"
+
+struct vk_meta_clear_key {
+ enum vk_meta_object_key_type key_type;
+ struct vk_meta_rendering_info render;
+ uint8_t color_attachments_cleared;
+ bool clear_depth;
+ bool clear_stencil;
+};
+
+struct vk_meta_clear_push_data {
+ VkClearColorValue color_values[MESA_VK_MAX_COLOR_ATTACHMENTS];
+};
+
+static nir_shader *
+build_clear_shader(const struct vk_meta_clear_key *key)
+{
+ nir_builder build = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
+ NULL, "vk-meta-clear");
+ nir_builder *b = &build;
+
+ struct glsl_struct_field push_field = {
+ .type = glsl_array_type(glsl_vec4_type(),
+ MESA_VK_MAX_COLOR_ATTACHMENTS,
+ 16 /* explicit_stride */),
+ .name = "color_values",
+ };
+ const struct glsl_type *push_iface_type =
+ glsl_interface_type(&push_field, 1, GLSL_INTERFACE_PACKING_STD140,
+ false /* row_major */, "push");
+
+ nir_variable *push = nir_variable_create(b->shader, nir_var_mem_push_const,
+ push_iface_type, "push");
+ nir_deref_instr *push_arr =
+ nir_build_deref_struct(b, nir_build_deref_var(b, push), 0);
+
+ u_foreach_bit(a, key->color_attachments_cleared) {
+ nir_def *color_value =
+ nir_load_deref(b, nir_build_deref_array_imm(b, push_arr, a));
+
+ const struct glsl_type *out_type;
+ if (vk_format_is_sint(key->render.color_attachment_formats[a]))
+ out_type = glsl_ivec4_type();
+ else if (vk_format_is_uint(key->render.color_attachment_formats[a]))
+ out_type = glsl_uvec4_type();
+ else
+ out_type = glsl_vec4_type();
+
+ char out_name[8];
+ snprintf(out_name, sizeof(out_name), "color%u", a);
+
+ nir_variable *out = nir_variable_create(b->shader, nir_var_shader_out,
+ out_type, out_name);
+ out->data.location = FRAG_RESULT_DATA0 + a;
+
+ nir_store_var(b, out, color_value, 0xf);
+ }
+
+ return b->shader;
+}
+
+static VkResult
+get_clear_pipeline_layout(struct vk_device *device,
+ struct vk_meta_device *meta,
+ VkPipelineLayout *layout_out)
+{
+ const char key[] = "vk-meta-clear-pipeline-layout";
+
+ VkPipelineLayout from_cache =
+ vk_meta_lookup_pipeline_layout(meta, key, sizeof(key));
+ if (from_cache != VK_NULL_HANDLE) {
+ *layout_out = from_cache;
+ return VK_SUCCESS;
+ }
+
+ const VkPushConstantRange push_range = {
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .offset = 0,
+ .size = sizeof(struct vk_meta_clear_push_data),
+ };
+
+ const VkPipelineLayoutCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .pushConstantRangeCount = 1,
+ .pPushConstantRanges = &push_range,
+ };
+
+ return vk_meta_create_pipeline_layout(device, meta, &info,
+ key, sizeof(key), layout_out);
+}
+
+static VkResult
+get_clear_pipeline(struct vk_device *device,
+ struct vk_meta_device *meta,
+ const struct vk_meta_clear_key *key,
+ VkPipelineLayout layout,
+ VkPipeline *pipeline_out)
+{
+ VkPipeline from_cache = vk_meta_lookup_pipeline(meta, key, sizeof(*key));
+ if (from_cache != VK_NULL_HANDLE) {
+ *pipeline_out = from_cache;
+ return VK_SUCCESS;
+ }
+
+ const VkPipelineShaderStageNirCreateInfoMESA fs_nir_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA,
+ .nir = build_clear_shader(key),
+ };
+ const VkPipelineShaderStageCreateInfo fs_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .pNext = &fs_nir_info,
+ .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .pName = "main",
+ };
+
+ VkPipelineDepthStencilStateCreateInfo ds_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ };
+ const VkDynamicState dyn_stencil_ref = VK_DYNAMIC_STATE_STENCIL_REFERENCE;
+ VkPipelineDynamicStateCreateInfo dyn_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ };
+ if (key->clear_depth) {
+ ds_info.depthTestEnable = VK_TRUE;
+ ds_info.depthWriteEnable = VK_TRUE;
+ ds_info.depthCompareOp = VK_COMPARE_OP_ALWAYS;
+ }
+ if (key->clear_stencil) {
+ ds_info.stencilTestEnable = VK_TRUE;
+ ds_info.front.compareOp = VK_COMPARE_OP_ALWAYS;
+ ds_info.front.passOp = VK_STENCIL_OP_REPLACE;
+ ds_info.front.compareMask = ~0u;
+ ds_info.front.writeMask = ~0u;
+ ds_info.back = ds_info.front;
+ dyn_info.dynamicStateCount = 1;
+ dyn_info.pDynamicStates = &dyn_stencil_ref;
+ }
+
+ const VkGraphicsPipelineCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = 1,
+ .pStages = &fs_info,
+ .pDepthStencilState = &ds_info,
+ .pDynamicState = &dyn_info,
+ .layout = layout,
+ };
+
+ VkResult result = vk_meta_create_graphics_pipeline(device, meta, &info,
+ &key->render,
+ key, sizeof(*key),
+ pipeline_out);
+ ralloc_free(fs_nir_info.nir);
+
+ return result;
+}
+
+static int
+vk_meta_rect_cmp_layer(const void *_a, const void *_b)
+{
+ const struct vk_meta_rect *a = _a, *b = _b;
+ assert(a->layer <= INT_MAX && b->layer <= INT_MAX);
+ return a->layer - b->layer;
+}
+
+void
+vk_meta_clear_attachments(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const struct vk_meta_rendering_info *render,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t clear_rect_count,
+ const VkClearRect *clear_rects)
+{
+ struct vk_device *device = cmd->base.device;
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkResult result;
+
+ struct vk_meta_clear_key key;
+ memset(&key, 0, sizeof(key));
+ key.key_type = VK_META_OBJECT_KEY_CLEAR_PIPELINE;
+ vk_meta_rendering_info_copy(&key.render, render);
+
+ struct vk_meta_clear_push_data push = {0};
+ float depth_value = 1.0f;
+ uint32_t stencil_value = 0;
+
+ for (uint32_t i = 0; i < attachment_count; i++) {
+ if (attachments[i].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
+ const uint32_t a = attachments[i].colorAttachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ assert(a < MESA_VK_MAX_COLOR_ATTACHMENTS);
+ if (render->color_attachment_formats[a] == VK_FORMAT_UNDEFINED)
+ continue;
+
+ key.color_attachments_cleared |= BITFIELD_BIT(a);
+ push.color_values[a] = attachments[i].clearValue.color;
+ }
+ if (attachments[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ key.clear_depth = true;
+ depth_value = attachments[i].clearValue.depthStencil.depth;
+ }
+ if (attachments[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ key.clear_stencil = true;
+ stencil_value = attachments[i].clearValue.depthStencil.stencil;
+ }
+ }
+
+ VkPipelineLayout layout;
+ result = get_clear_pipeline_layout(device, meta, &layout);
+ if (unlikely(result != VK_SUCCESS)) {
+ /* TODO: Report error */
+ return;
+ }
+
+ VkPipeline pipeline;
+ result = get_clear_pipeline(device, meta, &key, layout, &pipeline);
+ if (unlikely(result != VK_SUCCESS)) {
+ /* TODO: Report error */
+ return;
+ }
+
+ disp->CmdBindPipeline(vk_command_buffer_to_handle(cmd),
+ VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+
+ if (key.clear_stencil) {
+ disp->CmdSetStencilReference(vk_command_buffer_to_handle(cmd),
+ VK_STENCIL_FACE_FRONT_AND_BACK,
+ stencil_value);
+ }
+
+ disp->CmdPushConstants(vk_command_buffer_to_handle(cmd),
+ layout, VK_SHADER_STAGE_FRAGMENT_BIT,
+ 0, sizeof(push), &push);
+
+ if (render->view_mask == 0) {
+ if (clear_rect_count == 1 && clear_rects[0].layerCount > 1) {
+ struct vk_meta_rect rect = {
+ .x0 = clear_rects[0].rect.offset.x,
+ .x1 = clear_rects[0].rect.offset.x +
+ clear_rects[0].rect.extent.width,
+ .y0 = clear_rects[0].rect.offset.y,
+ .y1 = clear_rects[0].rect.offset.y +
+ clear_rects[0].rect.extent.height,
+ .z = depth_value,
+ .layer = clear_rects[0].baseArrayLayer,
+ };
+
+ meta->cmd_draw_volume(cmd, meta, &rect, clear_rects[0].layerCount);
+ } else {
+ uint32_t max_rect_count = 0;
+ for (uint32_t r = 0; r < clear_rect_count; r++)
+ max_rect_count += clear_rects[r].layerCount;
+
+ STACK_ARRAY(struct vk_meta_rect, rects, max_rect_count);
+
+ uint32_t rect_count = 0;
+ for (uint32_t r = 0; r < clear_rect_count; r++) {
+ struct vk_meta_rect rect = {
+ .x0 = clear_rects[r].rect.offset.x,
+ .x1 = clear_rects[r].rect.offset.x +
+ clear_rects[r].rect.extent.width,
+ .y0 = clear_rects[r].rect.offset.y,
+ .y1 = clear_rects[r].rect.offset.y +
+ clear_rects[r].rect.extent.height,
+ .z = depth_value,
+ };
+ for (uint32_t a = 0; a < clear_rects[r].layerCount; a++) {
+ rect.layer = clear_rects[r].baseArrayLayer + a;
+ rects[rect_count++] = rect;
+ }
+ }
+ assert(rect_count <= max_rect_count);
+
+ /* If we have more than one clear rect, sort by layer in the hopes
+ * the hardware more or less does all the clears for one layer before
+ * moving on to the next, thus reducing cache thrashing.
+ */
+ qsort(rects, rect_count, sizeof(*rects), vk_meta_rect_cmp_layer);
+
+ meta->cmd_draw_rects(cmd, meta, rect_count, rects);
+
+ STACK_ARRAY_FINISH(rects);
+ }
+ } else {
+ const uint32_t rect_count = clear_rect_count *
+ util_bitcount(render->view_mask);
+ STACK_ARRAY(struct vk_meta_rect, rects, rect_count);
+
+ uint32_t rect_idx = 0;
+ u_foreach_bit(v, render->view_mask) {
+ for (uint32_t r = 0; r < clear_rect_count; r++) {
+ assert(clear_rects[r].baseArrayLayer == 0);
+ assert(clear_rects[r].layerCount == 1);
+ rects[rect_idx++] = (struct vk_meta_rect) {
+ .x0 = clear_rects[r].rect.offset.x,
+ .x1 = clear_rects[r].rect.offset.x +
+ clear_rects[r].rect.extent.width,
+ .y0 = clear_rects[r].rect.offset.y,
+ .y1 = clear_rects[r].rect.offset.y +
+ clear_rects[r].rect.extent.height,
+ .z = depth_value,
+ .layer = v,
+ };
+ }
+ }
+ assert(rect_idx == rect_count);
+
+ meta->cmd_draw_rects(cmd, meta, rect_count, rects);
+
+ STACK_ARRAY_FINISH(rects);
+ }
+}
+
+void
+vk_meta_clear_rendering(struct vk_meta_device *meta,
+ struct vk_command_buffer *cmd,
+ const VkRenderingInfo *pRenderingInfo)
+{
+ assert(!(pRenderingInfo->flags & VK_RENDERING_RESUMING_BIT));
+
+ struct vk_meta_rendering_info render = {
+ .view_mask = pRenderingInfo->viewMask,
+ .color_attachment_count = pRenderingInfo->colorAttachmentCount,
+ };
+
+ uint32_t clear_count = 0;
+ VkClearAttachment clear_att[MESA_VK_MAX_COLOR_ATTACHMENTS + 1];
+ for (uint32_t i = 0; i < pRenderingInfo->colorAttachmentCount; i++) {
+ const VkRenderingAttachmentInfo *att_info =
+ &pRenderingInfo->pColorAttachments[i];
+ if (att_info->imageView == VK_NULL_HANDLE ||
+ att_info->loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
+ continue;
+
+ VK_FROM_HANDLE(vk_image_view, iview, att_info->imageView);
+ render.color_attachment_formats[i] = iview->format;
+ assert(render.samples == 0 || render.samples == iview->image->samples);
+ render.samples = MAX2(render.samples, iview->image->samples);
+
+ clear_att[clear_count++] = (VkClearAttachment) {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .colorAttachment = i,
+ .clearValue = att_info->clearValue,
+ };
+ }
+
+ /* One more for depth/stencil, if needed */
+ clear_att[clear_count] = (VkClearAttachment) { .aspectMask = 0, };
+
+ const VkRenderingAttachmentInfo *d_att_info =
+ pRenderingInfo->pDepthAttachment;
+ if (d_att_info != NULL && d_att_info->imageView != VK_NULL_HANDLE &&
+ d_att_info->loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ VK_FROM_HANDLE(vk_image_view, iview, d_att_info->imageView);
+ render.depth_attachment_format = iview->format;
+ render.samples = MAX2(render.samples, iview->image->samples);
+
+ clear_att[clear_count].aspectMask |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ clear_att[clear_count].clearValue.depthStencil.depth =
+ d_att_info->clearValue.depthStencil.depth;
+ }
+
+ const VkRenderingAttachmentInfo *s_att_info =
+ pRenderingInfo->pStencilAttachment;
+ if (s_att_info != NULL && s_att_info->imageView != VK_NULL_HANDLE &&
+ s_att_info->loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ VK_FROM_HANDLE(vk_image_view, iview, s_att_info->imageView);
+ render.stencil_attachment_format = iview->format;
+ render.samples = MAX2(render.samples, iview->image->samples);
+
+ clear_att[clear_count].aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ clear_att[clear_count].clearValue.depthStencil.stencil =
+ s_att_info->clearValue.depthStencil.depth;
+ }
+ if (clear_att[clear_count].aspectMask != 0)
+ clear_count++;
+
+ if (clear_count > 0) {
+ const VkClearRect clear_rect = {
+ .rect = pRenderingInfo->renderArea,
+ .baseArrayLayer = 0,
+ .layerCount = pRenderingInfo->viewMask ?
+ 1 : pRenderingInfo->layerCount,
+ };
+ vk_meta_clear_attachments(cmd, meta, &render,
+ clear_count, clear_att,
+ 1, &clear_rect);
+ }
+}
+
+static void
+clear_image_level_layers(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *image,
+ VkImageLayout image_layout,
+ VkFormat format,
+ const VkClearValue *clear_value,
+ VkImageAspectFlags aspects,
+ uint32_t level,
+ uint32_t base_array_layer,
+ uint32_t layer_count)
+{
+ struct vk_device *device = cmd->base.device;
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkCommandBuffer _cmd = vk_command_buffer_to_handle(cmd);
+ VkResult result;
+
+ const VkImageViewCreateInfo view_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = vk_image_to_handle(image),
+ .viewType = vk_image_render_view_type(image, layer_count),
+ .format = format,
+ .subresourceRange = {
+ .aspectMask = aspects,
+ .baseMipLevel = level,
+ .levelCount = 1,
+ .baseArrayLayer = base_array_layer,
+ .layerCount = layer_count,
+ }
+ };
+
+ VkImageView image_view;
+ result = vk_meta_create_image_view(cmd, meta, &view_info, &image_view);
+ if (unlikely(result != VK_SUCCESS)) {
+ /* TODO: Report error */
+ return;
+ }
+
+ const VkExtent3D level_extent = vk_image_mip_level_extent(image, level);
+
+ VkRenderingAttachmentInfo vk_att = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = image_view,
+ .imageLayout = image_layout,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ };
+ VkRenderingInfo vk_render = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
+ .renderArea = {
+ .offset = { 0, 0 },
+ .extent = { level_extent.width, level_extent.height },
+ },
+ .layerCount = layer_count,
+ };
+ struct vk_meta_rendering_info meta_render = {
+ .samples = image->samples,
+ };
+
+ if (image->aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
+ vk_render.colorAttachmentCount = 1;
+ vk_render.pColorAttachments = &vk_att;
+ meta_render.color_attachment_count = 1;
+ meta_render.color_attachment_formats[0] = format;
+ }
+
+ if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ vk_render.pDepthAttachment = &vk_att;
+ meta_render.depth_attachment_format = format;
+ }
+
+ if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ vk_render.pStencilAttachment = &vk_att;
+ meta_render.stencil_attachment_format = format;
+ }
+
+ const VkClearAttachment clear_att = {
+ .aspectMask = aspects,
+ .colorAttachment = 0,
+ .clearValue = *clear_value,
+ };
+
+ const VkClearRect clear_rect = {
+ .rect = {
+ .offset = { 0, 0 },
+ .extent = { level_extent.width, level_extent.height },
+ },
+ .baseArrayLayer = 0,
+ .layerCount = layer_count,
+ };
+
+ disp->CmdBeginRendering(_cmd, &vk_render);
+
+ vk_meta_clear_attachments(cmd, meta, &meta_render,
+ 1, &clear_att, 1, &clear_rect);
+
+ disp->CmdEndRendering(_cmd);
+}
+
+static void
+clear_image_level(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *image,
+ VkImageLayout image_layout,
+ VkFormat format,
+ const VkClearValue *clear_value,
+ uint32_t level,
+ const VkImageSubresourceRange *range)
+{
+ const VkExtent3D level_extent = vk_image_mip_level_extent(image, level);
+
+ uint32_t base_array_layer, layer_count;
+ if (image->image_type == VK_IMAGE_TYPE_3D) {
+ base_array_layer = 0;
+ layer_count = level_extent.depth;
+ } else {
+ base_array_layer = range->baseArrayLayer;
+ layer_count = vk_image_subresource_layer_count(image, range);
+ }
+
+ if (layer_count > 1 && !meta->use_layered_rendering) {
+ for (uint32_t a = 0; a < layer_count; a++) {
+ clear_image_level_layers(cmd, meta, image, image_layout,
+ format, clear_value,
+ range->aspectMask, level,
+ base_array_layer + a, 1);
+ }
+ } else {
+ clear_image_level_layers(cmd, meta, image, image_layout,
+ format, clear_value,
+ range->aspectMask, level,
+ base_array_layer, layer_count);
+ }
+}
+
+void
+vk_meta_clear_color_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *image,
+ VkImageLayout image_layout,
+ VkFormat format,
+ const VkClearColorValue *color,
+ uint32_t range_count,
+ const VkImageSubresourceRange *ranges)
+{
+ const VkClearValue clear_value = {
+ .color = *color,
+ };
+ for (uint32_t r = 0; r < range_count; r++) {
+ const uint32_t level_count =
+ vk_image_subresource_level_count(image, &ranges[r]);
+
+ for (uint32_t l = 0; l < level_count; l++) {
+ clear_image_level(cmd, meta, image, image_layout,
+ format, &clear_value,
+ ranges[r].baseMipLevel + l,
+ &ranges[r]);
+ }
+ }
+}
+
+void
+vk_meta_clear_depth_stencil_image(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ struct vk_image *image,
+ VkImageLayout image_layout,
+ const VkClearDepthStencilValue *depth_stencil,
+ uint32_t range_count,
+ const VkImageSubresourceRange *ranges)
+{
+ const VkClearValue clear_value = {
+ .depthStencil = *depth_stencil,
+ };
+ for (uint32_t r = 0; r < range_count; r++) {
+ const uint32_t level_count =
+ vk_image_subresource_level_count(image, &ranges[r]);
+
+ for (uint32_t l = 0; l < level_count; l++) {
+ clear_image_level(cmd, meta, image, image_layout,
+ image->format, &clear_value,
+ ranges[r].baseMipLevel + l,
+ &ranges[r]);
+ }
+ }
+}
diff --git a/src/vulkan/runtime/vk_meta_draw_rects.c b/src/vulkan/runtime/vk_meta_draw_rects.c
new file mode 100644
index 00000000000..fd76e582b97
--- /dev/null
+++ b/src/vulkan/runtime/vk_meta_draw_rects.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_meta_private.h"
+
+#include "vk_command_buffer.h"
+#include "vk_command_pool.h"
+#include "vk_device.h"
+
+#include "nir_builder.h"
+
+const VkPipelineVertexInputStateCreateInfo vk_meta_draw_rects_vi_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = 1,
+ .pVertexBindingDescriptions = &(const VkVertexInputBindingDescription) {
+ .binding = 0,
+ .stride = 4 * sizeof(uint32_t),
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
+ },
+ .vertexAttributeDescriptionCount = 1,
+ .pVertexAttributeDescriptions = &(const VkVertexInputAttributeDescription) {
+ .location = 0,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32B32A32_UINT,
+ .offset = 0,
+ },
+};
+
+const VkPipelineInputAssemblyStateCreateInfo vk_meta_draw_rects_ia_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_META_RECT_LIST_MESA,
+ .primitiveRestartEnable = VK_FALSE,
+};
+
+const VkPipelineViewportStateCreateInfo vk_meta_draw_rects_vs_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .scissorCount = 1,
+};
+
+nir_shader *
+vk_meta_draw_rects_vs_nir(struct vk_meta_device *device, bool use_gs)
+{
+ nir_builder build = nir_builder_init_simple_shader(MESA_SHADER_VERTEX, NULL,
+ "vk-meta-draw-rects-vs");
+ nir_builder *b = &build;
+
+ nir_variable *in = nir_variable_create(b->shader, nir_var_shader_in,
+ glsl_uvec4_type(), "vtx_in");
+ in->data.location = VERT_ATTRIB_GENERIC0;
+
+ nir_variable *pos =
+ nir_variable_create(b->shader, nir_var_shader_out, glsl_vec4_type(),
+ use_gs ? "pos_out" : "gl_Position");
+ pos->data.location = use_gs ? VARYING_SLOT_VAR0 : VARYING_SLOT_POS;
+
+ nir_variable *layer =
+ nir_variable_create(b->shader, nir_var_shader_out, glsl_int_type(),
+ use_gs ? "layer_out" : "gl_Layer");
+ layer->data.location = use_gs ? VARYING_SLOT_VAR1 : VARYING_SLOT_LAYER;
+
+ nir_def *vtx = nir_load_var(b, in);
+ nir_store_var(b, pos, nir_vec4(b, nir_channel(b, vtx, 0),
+ nir_channel(b, vtx, 1),
+ nir_channel(b, vtx, 2),
+ nir_imm_float(b, 1)),
+ 0xf);
+
+ nir_store_var(b, layer, nir_iadd(b, nir_load_instance_id(b),
+ nir_channel(b, vtx, 3)),
+ 0x1);
+
+ return b->shader;
+}
+
+nir_shader *
+vk_meta_draw_rects_gs_nir(struct vk_meta_device *device)
+{
+ nir_builder build =
+ nir_builder_init_simple_shader(MESA_SHADER_GEOMETRY, NULL,
+ "vk-meta-draw-rects-gs");
+ nir_builder *b = &build;
+
+ nir_variable *pos_in =
+ nir_variable_create(b->shader, nir_var_shader_in,
+ glsl_array_type(glsl_vec4_type(), 3, 0), "pos_in");
+ pos_in->data.location = VARYING_SLOT_VAR0;
+
+ nir_variable *layer_in =
+ nir_variable_create(b->shader, nir_var_shader_in,
+ glsl_array_type(glsl_int_type(), 3, 0), "layer_in");
+ layer_in->data.location = VARYING_SLOT_VAR1;
+
+ nir_variable *pos_out =
+ nir_variable_create(b->shader, nir_var_shader_out,
+ glsl_vec4_type(), "gl_Position");
+ pos_out->data.location = VARYING_SLOT_POS;
+
+ nir_variable *layer_out =
+ nir_variable_create(b->shader, nir_var_shader_out,
+ glsl_int_type(), "gl_Layer");
+ layer_out->data.location = VARYING_SLOT_LAYER;
+
+ for (unsigned i = 0; i < 3; i++) {
+ nir_deref_instr *pos_in_deref =
+ nir_build_deref_array_imm(b, nir_build_deref_var(b, pos_in), i);
+ nir_deref_instr *layer_in_deref =
+ nir_build_deref_array_imm(b, nir_build_deref_var(b, layer_in), i);
+
+ nir_store_var(b, pos_out, nir_load_deref(b, pos_in_deref), 0xf);
+ nir_store_var(b, layer_out, nir_load_deref(b, layer_in_deref), 1);
+ nir_emit_vertex(b);
+ }
+
+ nir_end_primitive(b);
+
+ struct shader_info *info = &build.shader->info;
+ info->gs.input_primitive = MESA_PRIM_TRIANGLES;
+ info->gs.output_primitive = MESA_PRIM_TRIANGLE_STRIP;
+ info->gs.vertices_in = 3;
+ info->gs.vertices_out = 3;
+ info->gs.invocations = 1;
+ info->gs.active_stream_mask = 1;
+
+ return b->shader;
+}
+
+struct vertex {
+ float x, y, z;
+ uint32_t layer;
+};
+
+static void
+setup_viewport_scissor(struct vk_command_buffer *cmd,
+ uint32_t rect_count,
+ const struct vk_meta_rect *rects,
+ float *x_scale, float *y_scale)
+{
+ const struct vk_device_dispatch_table *disp =
+ &cmd->base.device->dispatch_table;
+ VkCommandBuffer _cmd = vk_command_buffer_to_handle(cmd);
+
+ assert(rects[0].x0 < rects[0].x1 && rects[0].y0 < rects[0].y1);
+ uint32_t xbits = rects[0].x1 - 1, ybits = rects[0].y1 - 1;
+ float zmin = rects[0].z, zmax = rects[0].z;
+ for (uint32_t r = 1; r < rect_count; r++) {
+ assert(rects[r].x0 < rects[r].x1 && rects[r].y0 < rects[r].y1);
+ xbits |= rects[r].x1 - 1;
+ ybits |= rects[r].y1 - 1;
+ zmin = fminf(zmin, rects[r].z);
+ zmax = fminf(zmax, rects[r].z);
+ }
+
+ /* Annoyingly, we don't actually know the render area. We assume that all
+ * our rects are inside the render area. We further assume the maximum
+ * image and/or viewport size is a power of two. This means we can round
+ * up to a power of two without going outside any maximums. Using a power
+ * of two will ensure we don't lose precision when scaling coordinates.
+ */
+ int xmax_log2 = 1 + util_logbase2(xbits);
+ int ymax_log2 = 1 + util_logbase2(ybits);
+
+ assert(xmax_log2 >= 0 && xmax_log2 <= 31);
+ assert(ymax_log2 >= 0 && ymax_log2 <= 31);
+
+ /* We don't care about precise bounds on Z, only that it's inside [0, 1] if
+ * the implementaiton only supports [0, 1].
+ */
+ if (zmin >= 0.0f && zmax <= 1.0f) {
+ zmin = 0.0f;
+ zmax = 1.0f;
+ }
+
+ VkViewport viewport = {
+ .x = 0,
+ .y = 0,
+ .width = ldexpf(1.0, xmax_log2),
+ .height = ldexpf(1.0, ymax_log2),
+ .minDepth = zmin,
+ .maxDepth = zmax,
+ };
+ disp->CmdSetViewport(_cmd, 0, 1, &viewport);
+
+ VkRect2D scissor = {
+ .offset = { 0, 0 },
+ .extent = { 1u << xmax_log2, 1u << ymax_log2 },
+ };
+ disp->CmdSetScissor(_cmd, 0, 1, &scissor);
+
+ /* Scaling factors */
+ *x_scale = ldexpf(2.0, -xmax_log2);
+ *y_scale = ldexpf(2.0, -ymax_log2);
+}
+
+static const uint32_t rect_vb_size_B = 6 * 4 * sizeof(float);
+
+static VkResult
+create_vertex_buffer(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ float x_scale, float y_scale,
+ uint32_t rect_count,
+ const struct vk_meta_rect *rects,
+ VkBuffer *buffer_out)
+{
+ VkResult result;
+
+ const VkBufferCreateInfo vtx_buffer_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = rect_count * rect_vb_size_B,
+ .usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
+ .queueFamilyIndexCount = 1,
+ .pQueueFamilyIndices = &cmd->pool->queue_family_index,
+ };
+
+ result = vk_meta_create_buffer(cmd, meta, &vtx_buffer_info, buffer_out);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ void *map;
+ result = meta->cmd_bind_map_buffer(cmd, meta, *buffer_out, &map);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ for (uint32_t r = 0; r < rect_count; r++) {
+ float x0 = rects[r].x0 * x_scale - 1.0f;
+ float y0 = rects[r].y0 * y_scale - 1.0f;
+ float x1 = rects[r].x1 * x_scale - 1.0f;
+ float y1 = rects[r].y1 * y_scale - 1.0f;
+ float z = rects[r].z;
+ uint32_t w = rects[r].layer;
+
+ struct vertex rect_vb_data[6] = {
+ { x0, y1, z, w },
+ { x0, y0, z, w },
+ { x1, y1, z, w },
+
+ { x1, y0, z, w },
+ { x1, y1, z, w },
+ { x0, y0, z, w },
+ };
+ assert(sizeof(rect_vb_data) == rect_vb_size_B);
+ memcpy((char *)map + r * rect_vb_size_B, rect_vb_data, rect_vb_size_B);
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+vk_meta_draw_volume(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ const struct vk_meta_rect *rect,
+ uint32_t layer_count)
+{
+ const struct vk_device_dispatch_table *disp =
+ &cmd->base.device->dispatch_table;
+ VkCommandBuffer _cmd = vk_command_buffer_to_handle(cmd);
+
+ float x_scale, y_scale;
+ setup_viewport_scissor(cmd, 1, rect, &x_scale, &y_scale);
+
+ VkBuffer vtx_buffer;
+ VkResult result = create_vertex_buffer(cmd, meta, x_scale, y_scale,
+ 1, rect, &vtx_buffer);
+ if (unlikely(result != VK_SUCCESS)) {
+ /* TODO: Report error */
+ return;
+ }
+
+ const VkDeviceSize zero = 0;
+ disp->CmdBindVertexBuffers(_cmd, 0, 1, &vtx_buffer, &zero);
+
+ disp->CmdDraw(_cmd, 6, layer_count, 0, 0);
+}
+
+void
+vk_meta_draw_rects(struct vk_command_buffer *cmd,
+ struct vk_meta_device *meta,
+ uint32_t rect_count,
+ const struct vk_meta_rect *rects)
+{
+ const struct vk_device_dispatch_table *disp =
+ &cmd->base.device->dispatch_table;
+ VkCommandBuffer _cmd = vk_command_buffer_to_handle(cmd);
+
+ /* Two triangles with VK_FORMAT_R16G16_UINT */
+ const uint32_t rect_vb_size_B = 6 * 3 * sizeof(float);
+ const uint32_t rects_per_draw =
+ meta->max_bind_map_buffer_size_B / rect_vb_size_B;
+
+ if (rect_count == 0)
+ return;
+
+ float x_scale, y_scale;
+ setup_viewport_scissor(cmd, rect_count, rects, &x_scale, &y_scale);
+
+ uint32_t next_rect = 0;
+ while (next_rect < rect_count) {
+ const uint32_t count = MIN2(rects_per_draw, rect_count - next_rect);
+
+ VkBuffer vtx_buffer;
+ VkResult result = create_vertex_buffer(cmd, meta, x_scale, y_scale,
+ count, &rects[next_rect],
+ &vtx_buffer);
+ if (unlikely(result != VK_SUCCESS)) {
+ /* TODO: Report error */
+ return;
+ }
+
+ const VkDeviceSize zero = 0;
+ disp->CmdBindVertexBuffers(_cmd, 0, 1, &vtx_buffer, &zero);
+
+ disp->CmdDraw(_cmd, 6 * count, 1, 0, 0);
+
+ next_rect += count;
+ }
+ assert(next_rect == rect_count);
+}
diff --git a/src/vulkan/runtime/vk_meta_private.h b/src/vulkan/runtime/vk_meta_private.h
new file mode 100644
index 00000000000..a8b2a97d91b
--- /dev/null
+++ b/src/vulkan/runtime/vk_meta_private.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_META_PRIVATE_H
+#define VK_META_PRIVATE_H
+
+#include "vk_image.h"
+#include "vk_meta.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const VkPipelineVertexInputStateCreateInfo vk_meta_draw_rects_vi_state;
+extern const VkPipelineInputAssemblyStateCreateInfo vk_meta_draw_rects_ia_state;
+extern const VkPipelineViewportStateCreateInfo vk_meta_draw_rects_vs_state;
+
+struct nir_shader *
+vk_meta_draw_rects_vs_nir(struct vk_meta_device *device, bool use_gs);
+
+struct nir_shader *
+vk_meta_draw_rects_gs_nir(struct vk_meta_device *device);
+
+static inline void
+vk_meta_rendering_info_copy(struct vk_meta_rendering_info *dst,
+ const struct vk_meta_rendering_info *src)
+{
+ dst->view_mask = src->view_mask;
+ dst->samples = src->samples;
+ dst->color_attachment_count = src->color_attachment_count;
+ for (uint32_t a = 0; a < src->color_attachment_count; a++)
+ dst->color_attachment_formats[a] = src->color_attachment_formats[a];
+ dst->depth_attachment_format = src->depth_attachment_format;
+ dst->stencil_attachment_format = src->stencil_attachment_format;
+}
+
+static inline VkImageViewType
+vk_image_sampled_view_type(const struct vk_image *image)
+{
+ switch (image->image_type) {
+ case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
+ default: unreachable("Invalid image type");
+ }
+}
+
+static inline VkImageViewType
+vk_image_render_view_type(const struct vk_image *image, uint32_t layer_count)
+{
+ switch (image->image_type) {
+ case VK_IMAGE_TYPE_1D:
+ return layer_count == 1 ? VK_IMAGE_VIEW_TYPE_1D :
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ case VK_IMAGE_TYPE_2D:
+ case VK_IMAGE_TYPE_3D:
+ return layer_count == 1 ? VK_IMAGE_VIEW_TYPE_2D :
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ default:
+ unreachable("Invalid image type");
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_META_PRIVATE_H */
diff --git a/src/vulkan/runtime/vk_nir.c b/src/vulkan/runtime/vk_nir.c
new file mode 100644
index 00000000000..c36d38b9634
--- /dev/null
+++ b/src/vulkan/runtime/vk_nir.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_nir.h"
+
+#include "compiler/nir/nir_xfb_info.h"
+#include "compiler/spirv/nir_spirv.h"
+#include "vk_log.h"
+#include "vk_util.h"
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+uint32_t
+vk_spirv_version(const uint32_t *spirv_data, size_t spirv_size_B)
+{
+ assert(spirv_size_B >= 8);
+ assert(spirv_data[0] == SPIR_V_MAGIC_NUMBER);
+ return spirv_data[1];
+}
+
+static void
+spirv_nir_debug(void *private_data,
+ enum nir_spirv_debug_level level,
+ size_t spirv_offset,
+ const char *message)
+{
+ const struct vk_object_base *log_obj = private_data;
+
+ switch (level) {
+ case NIR_SPIRV_DEBUG_LEVEL_INFO:
+ //vk_logi(VK_LOG_OBJS(log_obj), "SPIR-V offset %lu: %s",
+ // (unsigned long) spirv_offset, message);
+ break;
+ case NIR_SPIRV_DEBUG_LEVEL_WARNING:
+ vk_logw(VK_LOG_OBJS(log_obj), "SPIR-V offset %lu: %s",
+ (unsigned long) spirv_offset, message);
+ break;
+ case NIR_SPIRV_DEBUG_LEVEL_ERROR:
+ vk_loge(VK_LOG_OBJS(log_obj), "SPIR-V offset %lu: %s",
+ (unsigned long) spirv_offset, message);
+ break;
+ default:
+ break;
+ }
+}
+
+bool
+nir_vk_is_not_xfb_output(nir_variable *var, void *data)
+{
+ if (var->data.mode != nir_var_shader_out)
+ return true;
+
+ /* From the Vulkan 1.3.259 spec:
+ *
+ * VUID-StandaloneSpirv-Offset-04716
+ *
+ * "Only variables or block members in the output interface decorated
+ * with Offset can be captured for transform feedback, and those
+ * variables or block members must also be decorated with XfbBuffer
+ * and XfbStride, or inherit XfbBuffer and XfbStride decorations from
+ * a block containing them"
+ *
+ * glslang generates gl_PerVertex builtins when they are not declared,
+ * enabled XFB should not prevent them from being DCE'd.
+ *
+ * The logic should match nir_gather_xfb_info_with_varyings
+ */
+
+ if (!var->data.explicit_xfb_buffer)
+ return true;
+
+ bool is_array_block = var->interface_type != NULL &&
+ glsl_type_is_array(var->type) &&
+ glsl_without_array(var->type) == var->interface_type;
+
+ if (!is_array_block) {
+ return !var->data.explicit_offset;
+ } else {
+ /* For array of blocks we have to check each element */
+ unsigned aoa_size = glsl_get_aoa_size(var->type);
+ const struct glsl_type *itype = var->interface_type;
+ unsigned nfields = glsl_get_length(itype);
+ for (unsigned b = 0; b < aoa_size; b++) {
+ for (unsigned f = 0; f < nfields; f++) {
+ if (glsl_get_struct_field_offset(itype, f) >= 0)
+ return false;
+ }
+ }
+
+ return true;
+ }
+}
+
+nir_shader *
+vk_spirv_to_nir(struct vk_device *device,
+ const uint32_t *spirv_data, size_t spirv_size_B,
+ gl_shader_stage stage, const char *entrypoint_name,
+ enum gl_subgroup_size subgroup_size,
+ const VkSpecializationInfo *spec_info,
+ const struct spirv_to_nir_options *spirv_options,
+ const struct nir_shader_compiler_options *nir_options,
+ bool internal,
+ void *mem_ctx)
+{
+ assert(spirv_size_B >= 4 && spirv_size_B % 4 == 0);
+ assert(spirv_data[0] == SPIR_V_MAGIC_NUMBER);
+
+ struct spirv_to_nir_options spirv_options_local = *spirv_options;
+ spirv_options_local.debug.func = spirv_nir_debug;
+ spirv_options_local.debug.private_data = (void *)device;
+ spirv_options_local.subgroup_size = subgroup_size;
+
+ uint32_t num_spec_entries = 0;
+ struct nir_spirv_specialization *spec_entries =
+ vk_spec_info_to_nir_spirv(spec_info, &num_spec_entries);
+
+ nir_shader *nir = spirv_to_nir(spirv_data, spirv_size_B / 4,
+ spec_entries, num_spec_entries,
+ stage, entrypoint_name,
+ &spirv_options_local, nir_options);
+ free(spec_entries);
+
+ if (nir == NULL)
+ return NULL;
+
+ assert(nir->info.stage == stage);
+ nir_validate_shader(nir, "after spirv_to_nir");
+ nir_validate_ssa_dominance(nir, "after spirv_to_nir");
+ if (mem_ctx != NULL)
+ ralloc_steal(mem_ctx, nir);
+
+ nir->info.internal = internal;
+
+ /* We have to lower away local constant initializers right before we
+ * inline functions. That way they get properly initialized at the top
+ * of the function and not at the top of its caller.
+ */
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_returns);
+ NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_copy_prop);
+ NIR_PASS_V(nir, nir_opt_deref);
+
+ /* Pick off the single entrypoint that we want */
+ nir_remove_non_entrypoints(nir);
+
+ /* Now that we've deleted all but the main function, we can go ahead and
+ * lower the rest of the constant initializers. We do this here so that
+ * nir_remove_dead_variables and split_per_member_structs below see the
+ * corresponding stores.
+ */
+ NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
+
+ /* Split member structs. We do this before lower_io_to_temporaries so that
+ * it doesn't lower system values to temporaries by accident.
+ */
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_split_per_member_structs);
+
+ nir_remove_dead_variables_options dead_vars_opts = {
+ .can_remove_var = nir_vk_is_not_xfb_output,
+ };
+ NIR_PASS_V(nir, nir_remove_dead_variables,
+ nir_var_shader_in | nir_var_shader_out | nir_var_system_value |
+ nir_var_shader_call_data | nir_var_ray_hit_attrib,
+ &dead_vars_opts);
+
+ /* This needs to happen after remove_dead_vars because GLSLang likes to
+ * insert dead clip/cull vars and we don't want to clip/cull based on
+ * uninitialized garbage.
+ */
+ NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
+
+ if (nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL ||
+ nir->info.stage == MESA_SHADER_GEOMETRY)
+ NIR_PASS_V(nir, nir_shader_gather_xfb_info);
+
+ NIR_PASS_V(nir, nir_propagate_invariant, false);
+
+ return nir;
+}
diff --git a/src/vulkan/runtime/vk_nir.h b/src/vulkan/runtime/vk_nir.h
new file mode 100644
index 00000000000..48b1ba8915e
--- /dev/null
+++ b/src/vulkan/runtime/vk_nir.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_NIR_H
+#define VK_NIR_H
+
+#include "nir.h"
+#include "vulkan/vulkan_core.h"
+
+struct spirv_to_nir_options;
+struct vk_device;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+uint32_t vk_spirv_version(const uint32_t *spirv_data, size_t spirv_size_B);
+
+bool
+nir_vk_is_not_xfb_output(nir_variable *var, void *data);
+
+nir_shader *
+vk_spirv_to_nir(struct vk_device *device,
+ const uint32_t *spirv_data, size_t spirv_size_B,
+ gl_shader_stage stage, const char *entrypoint_name,
+ enum gl_subgroup_size subgroup_size,
+ const VkSpecializationInfo *spec_info,
+ const struct spirv_to_nir_options *spirv_options,
+ const struct nir_shader_compiler_options *nir_options,
+ bool internal,
+ void *mem_ctx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_NIR_H */
diff --git a/src/vulkan/runtime/vk_nir_convert_ycbcr.c b/src/vulkan/runtime/vk_nir_convert_ycbcr.c
new file mode 100644
index 00000000000..8a660954284
--- /dev/null
+++ b/src/vulkan/runtime/vk_nir_convert_ycbcr.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_nir_convert_ycbcr.h"
+
+#include "vk_format.h"
+#include "vk_ycbcr_conversion.h"
+
+#include <math.h>
+
+static nir_def *
+y_range(nir_builder *b,
+ nir_def *y_channel,
+ int bpc,
+ VkSamplerYcbcrRange range)
+{
+ switch (range) {
+ case VK_SAMPLER_YCBCR_RANGE_ITU_FULL:
+ return y_channel;
+ case VK_SAMPLER_YCBCR_RANGE_ITU_NARROW:
+ return nir_fmul_imm(b,
+ nir_fadd_imm(b,
+ nir_fmul_imm(b, y_channel,
+ pow(2, bpc) - 1),
+ -16.0f * pow(2, bpc - 8)),
+ 1.0f / (219.0f * pow(2, bpc - 8)));
+
+ default:
+ unreachable("missing Ycbcr range");
+ return NULL;
+ }
+}
+
+static nir_def *
+chroma_range(nir_builder *b,
+ nir_def *chroma_channel,
+ int bpc,
+ VkSamplerYcbcrRange range)
+{
+ switch (range) {
+ case VK_SAMPLER_YCBCR_RANGE_ITU_FULL:
+ return nir_fadd(b, chroma_channel,
+ nir_imm_float(b, -pow(2, bpc - 1) / (pow(2, bpc) - 1.0f)));
+ case VK_SAMPLER_YCBCR_RANGE_ITU_NARROW:
+ return nir_fmul_imm(b,
+ nir_fadd_imm(b,
+ nir_fmul_imm(b, chroma_channel,
+ pow(2, bpc) - 1),
+ -128.0f * pow(2, bpc - 8)),
+ 1.0f / (224.0f * pow(2, bpc - 8)));
+ default:
+ unreachable("missing Ycbcr range");
+ return NULL;
+ }
+}
+
+typedef struct nir_const_value_3_4 {
+ nir_const_value v[3][4];
+} nir_const_value_3_4;
+
+static const nir_const_value_3_4 *
+ycbcr_model_to_rgb_matrix(VkSamplerYcbcrModelConversion model)
+{
+ switch (model) {
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601: {
+ static const nir_const_value_3_4 bt601 = { {
+ { { .f32 = 1.402f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
+ { { .f32 = -0.714136286201022f }, { .f32 = 1.0f }, { .f32 = -0.344136286201022f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.772f }, { .f32 = 0.0f } },
+ } };
+
+ return &bt601;
+ }
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709: {
+ static const nir_const_value_3_4 bt709 = { {
+ { { .f32 = 1.5748031496063f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
+ { { .f32 = -0.468125209181067f }, { .f32 = 1.0f }, { .f32 = -0.187327487470334f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.85563184264242f }, { .f32 = 0.0f } },
+ } };
+
+ return &bt709;
+ }
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020: {
+ static const nir_const_value_3_4 bt2020 = { {
+ { { .f32 = 1.4746f }, { .f32 = 1.0f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
+ { { .f32 = -0.571353126843658f }, { .f32 = 1.0f }, { .f32 = -0.164553126843658f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = 1.0f }, { .f32 = 1.8814f }, { .f32 = 0.0f } },
+ } };
+
+ return &bt2020;
+ }
+ default:
+ unreachable("missing Ycbcr model");
+ return NULL;
+ }
+}
+
+nir_def *
+nir_convert_ycbcr_to_rgb(nir_builder *b,
+ VkSamplerYcbcrModelConversion model,
+ VkSamplerYcbcrRange range,
+ nir_def *raw_channels,
+ uint32_t *bpcs)
+{
+ nir_def *expanded_channels =
+ nir_vec4(b,
+ chroma_range(b, nir_channel(b, raw_channels, 0), bpcs[0], range),
+ y_range(b, nir_channel(b, raw_channels, 1), bpcs[1], range),
+ chroma_range(b, nir_channel(b, raw_channels, 2), bpcs[2], range),
+ nir_channel(b, raw_channels, 3));
+
+ if (model == VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY)
+ return expanded_channels;
+
+ const nir_const_value_3_4 *conversion_matrix =
+ ycbcr_model_to_rgb_matrix(model);
+
+ nir_def *converted_channels[] = {
+ nir_fdot(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[0])),
+ nir_fdot(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[1])),
+ nir_fdot(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[2]))
+ };
+
+ return nir_vec4(b,
+ converted_channels[0], converted_channels[1],
+ converted_channels[2], nir_channel(b, raw_channels, 3));
+}
+
+struct ycbcr_state {
+ nir_builder *builder;
+ nir_def *image_size;
+ nir_tex_instr *origin_tex;
+ nir_deref_instr *tex_deref;
+ const struct vk_ycbcr_conversion_state *conversion;
+ const struct vk_format_ycbcr_info *format_ycbcr_info;
+};
+
+/* TODO: we should probably replace this with a push constant/uniform. */
+static nir_def *
+get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
+{
+ if (state->image_size)
+ return state->image_size;
+
+ nir_builder *b = state->builder;
+ const struct glsl_type *type = texture->type;
+ nir_tex_instr *tex = nir_tex_instr_create(b->shader, 1);
+
+ tex->op = nir_texop_txs;
+ tex->sampler_dim = glsl_get_sampler_dim(type);
+ tex->is_array = glsl_sampler_type_is_array(type);
+ tex->is_shadow = glsl_sampler_type_is_shadow(type);
+ tex->dest_type = nir_type_int32;
+
+ tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
+ &texture->def);
+
+ nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32);
+ nir_builder_instr_insert(b, &tex->instr);
+
+ state->image_size = nir_i2f32(b, &tex->def);
+
+ return state->image_size;
+}
+
+static nir_def *
+implicit_downsampled_coord(nir_builder *b,
+ nir_def *value,
+ nir_def *max_value,
+ int div_scale)
+{
+ return nir_fadd(b,
+ value,
+ nir_frcp(b,
+ nir_fmul(b,
+ nir_imm_float(b, div_scale),
+ max_value)));
+}
+
+static nir_def *
+implicit_downsampled_coords(struct ycbcr_state *state,
+ nir_def *old_coords,
+ const struct vk_format_ycbcr_plane *format_plane)
+{
+ nir_builder *b = state->builder;
+ const struct vk_ycbcr_conversion_state *conversion = state->conversion;
+ nir_def *image_size = get_texture_size(state, state->tex_deref);
+ nir_def *comp[4] = { NULL, };
+ int c;
+
+ for (c = 0; c < ARRAY_SIZE(conversion->chroma_offsets); c++) {
+ if (format_plane->denominator_scales[c] > 1 &&
+ conversion->chroma_offsets[c] == VK_CHROMA_LOCATION_COSITED_EVEN) {
+ comp[c] = implicit_downsampled_coord(b,
+ nir_channel(b, old_coords, c),
+ nir_channel(b, image_size, c),
+ format_plane->denominator_scales[c]);
+ } else {
+ comp[c] = nir_channel(b, old_coords, c);
+ }
+ }
+
+ /* Leave other coordinates untouched */
+ for (; c < old_coords->num_components; c++)
+ comp[c] = nir_channel(b, old_coords, c);
+
+ return nir_vec(b, comp, old_coords->num_components);
+}
+
+static nir_def *
+create_plane_tex_instr_implicit(struct ycbcr_state *state,
+ uint32_t plane)
+{
+ nir_builder *b = state->builder;
+ const struct vk_ycbcr_conversion_state *conversion = state->conversion;
+ const struct vk_format_ycbcr_plane *format_plane =
+ &state->format_ycbcr_info->planes[plane];
+ nir_tex_instr *old_tex = state->origin_tex;
+ nir_tex_instr *tex = nir_tex_instr_create(b->shader, old_tex->num_srcs + 1);
+
+ for (uint32_t i = 0; i < old_tex->num_srcs; i++) {
+ tex->src[i].src_type = old_tex->src[i].src_type;
+
+ switch (old_tex->src[i].src_type) {
+ case nir_tex_src_coord:
+ if (format_plane->has_chroma && conversion->chroma_reconstruction) {
+ tex->src[i].src =
+ nir_src_for_ssa(implicit_downsampled_coords(state,
+ old_tex->src[i].src.ssa,
+ format_plane));
+ break;
+ }
+ FALLTHROUGH;
+ default:
+ tex->src[i].src = nir_src_for_ssa(old_tex->src[i].src.ssa);
+ break;
+ }
+ }
+ tex->src[tex->num_srcs - 1] = nir_tex_src_for_ssa(nir_tex_src_plane,
+ nir_imm_int(b, plane));
+ tex->sampler_dim = old_tex->sampler_dim;
+ tex->dest_type = old_tex->dest_type;
+
+ tex->op = old_tex->op;
+ tex->coord_components = old_tex->coord_components;
+ tex->is_new_style_shadow = old_tex->is_new_style_shadow;
+ tex->component = old_tex->component;
+
+ tex->texture_index = old_tex->texture_index;
+ tex->sampler_index = old_tex->sampler_index;
+ tex->is_array = old_tex->is_array;
+
+ nir_def_init(&tex->instr, &tex->def, old_tex->def.num_components,
+ old_tex->def.bit_size);
+ nir_builder_instr_insert(b, &tex->instr);
+
+ return &tex->def;
+}
+
+static unsigned
+swizzle_to_component(VkComponentSwizzle swizzle)
+{
+ switch (swizzle) {
+ case VK_COMPONENT_SWIZZLE_R:
+ return 0;
+ case VK_COMPONENT_SWIZZLE_G:
+ return 1;
+ case VK_COMPONENT_SWIZZLE_B:
+ return 2;
+ case VK_COMPONENT_SWIZZLE_A:
+ return 3;
+ default:
+ unreachable("invalid channel");
+ return 0;
+ }
+}
+
+struct lower_ycbcr_tex_state {
+ nir_vk_ycbcr_conversion_lookup_cb cb;
+ const void *cb_data;
+};
+
+static bool
+lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state)
+{
+ const struct lower_ycbcr_tex_state *state = _state;
+
+ if (instr->type != nir_instr_type_tex)
+ return false;
+
+ nir_tex_instr *tex = nir_instr_as_tex(instr);
+
+ /* For the following instructions, we don't apply any change and let the
+ * instruction apply to the first plane.
+ */
+ if (tex->op == nir_texop_txs ||
+ tex->op == nir_texop_query_levels ||
+ tex->op == nir_texop_lod)
+ return false;
+
+ int deref_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
+ assert(deref_src_idx >= 0);
+ nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
+
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+ uint32_t set = var->data.descriptor_set;
+ uint32_t binding = var->data.binding;
+
+ assert(tex->texture_index == 0);
+ unsigned array_index = 0;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->deref_type == nir_deref_type_array);
+ if (!nir_src_is_const(deref->arr.index))
+ return false;
+ array_index = nir_src_as_uint(deref->arr.index);
+ }
+
+ const struct vk_ycbcr_conversion_state *conversion =
+ state->cb(state->cb_data, set, binding, array_index);
+ if (conversion == NULL)
+ return false;
+
+ const struct vk_format_ycbcr_info *format_ycbcr_info =
+ vk_format_get_ycbcr_info(conversion->format);
+
+ /* This can happen if the driver hasn't done a good job of filtering on
+ * sampler creation and lets through a VkYcbcrConversion object which isn't
+ * actually YCbCr. We're supposed to ignore those.
+ */
+ if (format_ycbcr_info == NULL)
+ return false;
+
+ b->cursor = nir_before_instr(&tex->instr);
+
+ VkFormat y_format = VK_FORMAT_UNDEFINED;
+ for (uint32_t p = 0; p < format_ycbcr_info->n_planes; p++) {
+ if (!format_ycbcr_info->planes[p].has_chroma)
+ y_format = format_ycbcr_info->planes[p].format;
+ }
+ assert(y_format != VK_FORMAT_UNDEFINED);
+ const struct util_format_description *y_format_desc =
+ util_format_description(vk_format_to_pipe_format(y_format));
+ uint8_t y_bpc = y_format_desc->channel[0].size;
+
+ /* |ycbcr_comp| holds components in the order : Cr-Y-Cb */
+ nir_def *zero = nir_imm_float(b, 0.0f);
+ nir_def *one = nir_imm_float(b, 1.0f);
+ /* Use extra 2 channels for following swizzle */
+ nir_def *ycbcr_comp[5] = { zero, zero, zero, one, zero };
+
+ uint8_t ycbcr_bpcs[5];
+ memset(ycbcr_bpcs, y_bpc, sizeof(ycbcr_bpcs));
+
+ /* Go through all the planes and gather the samples into a |ycbcr_comp|
+ * while applying a swizzle required by the spec:
+ *
+ * R, G, B should respectively map to Cr, Y, Cb
+ */
+ for (uint32_t p = 0; p < format_ycbcr_info->n_planes; p++) {
+ const struct vk_format_ycbcr_plane *format_plane =
+ &format_ycbcr_info->planes[p];
+
+ struct ycbcr_state tex_state = {
+ .builder = b,
+ .origin_tex = tex,
+ .tex_deref = deref,
+ .conversion = conversion,
+ .format_ycbcr_info = format_ycbcr_info,
+ };
+ nir_def *plane_sample = create_plane_tex_instr_implicit(&tex_state, p);
+
+ for (uint32_t pc = 0; pc < 4; pc++) {
+ VkComponentSwizzle ycbcr_swizzle = format_plane->ycbcr_swizzle[pc];
+ if (ycbcr_swizzle == VK_COMPONENT_SWIZZLE_ZERO)
+ continue;
+
+ unsigned ycbcr_component = swizzle_to_component(ycbcr_swizzle);
+ ycbcr_comp[ycbcr_component] = nir_channel(b, plane_sample, pc);
+
+ /* Also compute the number of bits for each component. */
+ const struct util_format_description *plane_format_desc =
+ util_format_description(vk_format_to_pipe_format(format_plane->format));
+ ycbcr_bpcs[ycbcr_component] = plane_format_desc->channel[pc].size;
+ }
+ }
+
+ /* Now remaps components to the order specified by the conversion. */
+ nir_def *swizzled_comp[4] = { NULL, };
+ uint32_t swizzled_bpcs[4] = { 0, };
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(conversion->mapping); i++) {
+ /* Maps to components in |ycbcr_comp| */
+ static const uint32_t swizzle_mapping[] = {
+ [VK_COMPONENT_SWIZZLE_ZERO] = 4,
+ [VK_COMPONENT_SWIZZLE_ONE] = 3,
+ [VK_COMPONENT_SWIZZLE_R] = 0,
+ [VK_COMPONENT_SWIZZLE_G] = 1,
+ [VK_COMPONENT_SWIZZLE_B] = 2,
+ [VK_COMPONENT_SWIZZLE_A] = 3,
+ };
+ const VkComponentSwizzle m = conversion->mapping[i];
+
+ if (m == VK_COMPONENT_SWIZZLE_IDENTITY) {
+ swizzled_comp[i] = ycbcr_comp[i];
+ swizzled_bpcs[i] = ycbcr_bpcs[i];
+ } else {
+ swizzled_comp[i] = ycbcr_comp[swizzle_mapping[m]];
+ swizzled_bpcs[i] = ycbcr_bpcs[swizzle_mapping[m]];
+ }
+ }
+
+ nir_def *result = nir_vec(b, swizzled_comp, 4);
+ if (conversion->ycbcr_model != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) {
+ result = nir_convert_ycbcr_to_rgb(b, conversion->ycbcr_model,
+ conversion->ycbcr_range,
+ result,
+ swizzled_bpcs);
+ }
+
+ nir_def_rewrite_uses(&tex->def, result);
+ nir_instr_remove(&tex->instr);
+
+ return true;
+}
+
+bool nir_vk_lower_ycbcr_tex(nir_shader *nir,
+ nir_vk_ycbcr_conversion_lookup_cb cb,
+ const void *cb_data)
+{
+ struct lower_ycbcr_tex_state state = {
+ .cb = cb,
+ .cb_data = cb_data,
+ };
+
+ return nir_shader_instructions_pass(nir, lower_ycbcr_tex_instr,
+ nir_metadata_block_index |
+ nir_metadata_dominance,
+ &state);
+}
diff --git a/src/vulkan/runtime/vk_nir_convert_ycbcr.h b/src/vulkan/runtime/vk_nir_convert_ycbcr.h
new file mode 100644
index 00000000000..b17a8cb83c7
--- /dev/null
+++ b/src/vulkan/runtime/vk_nir_convert_ycbcr.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright © 2020 Jonathan Marek
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_NIR_CONVERT_YCBCR_H
+#define VK_NIR_CONVERT_YCBCR_H
+
+#include "nir.h"
+#include "nir_builder.h"
+#include "vulkan/vulkan_core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+nir_def *
+nir_convert_ycbcr_to_rgb(nir_builder *b,
+ VkSamplerYcbcrModelConversion model,
+ VkSamplerYcbcrRange range,
+ nir_def *raw_channels,
+ uint32_t *bpcs);
+
+struct vk_ycbcr_conversion;
+
+typedef const struct vk_ycbcr_conversion_state *
+ (*nir_vk_ycbcr_conversion_lookup_cb)(const void *data, uint32_t set,
+ uint32_t binding, uint32_t array_index);
+
+bool nir_vk_lower_ycbcr_tex(nir_shader *nir,
+ nir_vk_ycbcr_conversion_lookup_cb cb,
+ const void *cb_data);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* VK_NIR_CONVERT_YCBCR_H */
diff --git a/src/vulkan/util/vk_object.c b/src/vulkan/runtime/vk_object.c
index c6921ca5e62..7015342924e 100644
--- a/src/vulkan/util/vk_object.c
+++ b/src/vulkan/runtime/vk_object.c
@@ -25,38 +25,61 @@
#include "vk_alloc.h"
#include "vk_common_entrypoints.h"
+#include "vk_instance.h"
#include "vk_device.h"
#include "util/hash_table.h"
#include "util/ralloc.h"
+#include "vk_enum_to_str.h"
-static void
-vk_object_base_reinit(struct vk_object_base *base)
+void
+vk_object_base_init(struct vk_device *device,
+ struct vk_object_base *base,
+ VkObjectType obj_type)
{
base->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ base->type = obj_type;
+ base->client_visible = false;
+ base->device = device;
+ base->instance = NULL;
+ base->object_name = NULL;
util_sparse_array_init(&base->private_data, sizeof(uint64_t), 8);
}
-void
-vk_object_base_init(struct vk_device *device,
- struct vk_object_base *base,
- UNUSED VkObjectType obj_type)
+void vk_object_base_instance_init(struct vk_instance *instance,
+ struct vk_object_base *base,
+ VkObjectType obj_type)
{
- vk_object_base_reinit(base);
+ base->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
base->type = obj_type;
- base->device = device;
+ base->client_visible = false;
+ base->device = NULL;
+ base->instance = instance;
+ base->object_name = NULL;
+ util_sparse_array_init(&base->private_data, sizeof(uint64_t), 8);
}
void
vk_object_base_finish(struct vk_object_base *base)
{
util_sparse_array_finish(&base->private_data);
+
+ if (base->object_name == NULL)
+ return;
+
+ assert(base->device != NULL || base->instance != NULL);
+ if (base->device)
+ vk_free(&base->device->alloc, base->object_name);
+ else
+ vk_free(&base->instance->alloc, base->object_name);
}
void
-vk_object_base_reset(struct vk_object_base *base)
+vk_object_base_recycle(struct vk_object_base *base)
{
+ struct vk_device *device = base->device;
+ VkObjectType obj_type = base->type;
vk_object_base_finish(base);
- vk_object_base_reinit(base);
+ vk_object_base_init(device, base, obj_type);
}
void *
@@ -134,9 +157,9 @@ vk_object_free(struct vk_device *device,
VkResult
vk_private_data_slot_create(struct vk_device *device,
- const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+ const VkPrivateDataSlotCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- VkPrivateDataSlotEXT* pPrivateDataSlot)
+ VkPrivateDataSlot* pPrivateDataSlot)
{
struct vk_private_data_slot *slot =
vk_alloc2(&device->alloc, pAllocator, sizeof(*slot), 8,
@@ -145,7 +168,7 @@ vk_private_data_slot_create(struct vk_device *device,
return VK_ERROR_OUT_OF_HOST_MEMORY;
vk_object_base_init(device, &slot->base,
- VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT);
+ VK_OBJECT_TYPE_PRIVATE_DATA_SLOT);
slot->index = p_atomic_inc_return(&device->private_data_next_index);
*pPrivateDataSlot = vk_private_data_slot_to_handle(slot);
@@ -155,7 +178,7 @@ vk_private_data_slot_create(struct vk_device *device,
void
vk_private_data_slot_destroy(struct vk_device *device,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(vk_private_data_slot, slot, privateDataSlot);
@@ -166,7 +189,6 @@ vk_private_data_slot_destroy(struct vk_device *device,
vk_free2(&device->alloc, pAllocator, slot);
}
-#ifdef ANDROID
static VkResult
get_swapchain_private_data_locked(struct vk_device *device,
uint64_t objectHandle,
@@ -174,8 +196,8 @@ get_swapchain_private_data_locked(struct vk_device *device,
uint64_t **private_data)
{
if (unlikely(device->swapchain_private == NULL)) {
- /* Even though VkSwapchain is a non-dispatchable object, we know a
- * priori that Android swapchains are actually pointers so we can use
+ /* Even though VkSwapchain/Surface are non-dispatchable objects, we know
+ * a priori that these are actually pointers so we can use
* the pointer hash table for them.
*/
device->swapchain_private = _mesa_pointer_hash_table_create(NULL);
@@ -203,34 +225,36 @@ get_swapchain_private_data_locked(struct vk_device *device,
return VK_SUCCESS;
}
-#endif /* ANDROID */
static VkResult
vk_object_base_private_data(struct vk_device *device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t **private_data)
{
VK_FROM_HANDLE(vk_private_data_slot, slot, privateDataSlot);
-#ifdef ANDROID
/* There is an annoying spec corner here on Android. Because WSI is
* implemented in the Vulkan loader which doesn't know about the
* VK_EXT_private_data extension, we have to handle VkSwapchainKHR in the
* driver as a special case. On future versions of Android where the
* loader does understand VK_EXT_private_data, we'll never see a
- * vkGet/SetPrivateDataEXT call on a swapchain because the loader will
+ * vkGet/SetPrivateData call on a swapchain because the loader will
* handle it.
*/
- if (objectType == VK_OBJECT_TYPE_SWAPCHAIN_KHR) {
+#if DETECT_OS_ANDROID
+ if (objectType == VK_OBJECT_TYPE_SWAPCHAIN_KHR ||
+ objectType == VK_OBJECT_TYPE_SURFACE_KHR) {
+#else
+ if (objectType == VK_OBJECT_TYPE_SURFACE_KHR) {
+#endif
mtx_lock(&device->swapchain_private_mtx);
VkResult result = get_swapchain_private_data_locked(device, objectHandle,
slot, private_data);
mtx_unlock(&device->swapchain_private_mtx);
return result;
}
-#endif /* ANDROID */
struct vk_object_base *obj =
vk_object_base_from_u64_handle(objectHandle, objectType);
@@ -243,7 +267,7 @@ VkResult
vk_object_base_set_private_data(struct vk_device *device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t data)
{
uint64_t *private_data;
@@ -262,7 +286,7 @@ void
vk_object_base_get_private_data(struct vk_device *device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t *pData)
{
uint64_t *private_data;
@@ -278,10 +302,10 @@ vk_object_base_get_private_data(struct vk_device *device,
}
VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_CreatePrivateDataSlotEXT(VkDevice _device,
- const VkPrivateDataSlotCreateInfoEXT *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkPrivateDataSlotEXT *pPrivateDataSlot)
+vk_common_CreatePrivateDataSlot(VkDevice _device,
+ const VkPrivateDataSlotCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPrivateDataSlot *pPrivateDataSlot)
{
VK_FROM_HANDLE(vk_device, device, _device);
return vk_private_data_slot_create(device, pCreateInfo, pAllocator,
@@ -289,20 +313,20 @@ vk_common_CreatePrivateDataSlotEXT(VkDevice _device,
}
VKAPI_ATTR void VKAPI_CALL
-vk_common_DestroyPrivateDataSlotEXT(VkDevice _device,
- VkPrivateDataSlotEXT privateDataSlot,
- const VkAllocationCallbacks *pAllocator)
+vk_common_DestroyPrivateDataSlot(VkDevice _device,
+ VkPrivateDataSlot privateDataSlot,
+ const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(vk_device, device, _device);
vk_private_data_slot_destroy(device, privateDataSlot, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_SetPrivateDataEXT(VkDevice _device,
- VkObjectType objectType,
- uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
- uint64_t data)
+vk_common_SetPrivateData(VkDevice _device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ VkPrivateDataSlot privateDataSlot,
+ uint64_t data)
{
VK_FROM_HANDLE(vk_device, device, _device);
return vk_object_base_set_private_data(device,
@@ -311,14 +335,29 @@ vk_common_SetPrivateDataEXT(VkDevice _device,
}
VKAPI_ATTR void VKAPI_CALL
-vk_common_GetPrivateDataEXT(VkDevice _device,
- VkObjectType objectType,
- uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
- uint64_t *pData)
+vk_common_GetPrivateData(VkDevice _device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ VkPrivateDataSlot privateDataSlot,
+ uint64_t *pData)
{
VK_FROM_HANDLE(vk_device, device, _device);
vk_object_base_get_private_data(device,
objectType, objectHandle,
privateDataSlot, pData);
}
+
+const char *
+vk_object_base_name(struct vk_object_base *obj)
+{
+ if (obj->object_name)
+ return obj->object_name;
+
+ obj->object_name = vk_asprintf(&obj->device->alloc,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
+ "%s(0x%"PRIx64")",
+ vk_ObjectType_to_ObjectName(obj->type),
+ (uint64_t)(uintptr_t)obj);
+
+ return obj->object_name;
+}
diff --git a/src/vulkan/util/vk_object.h b/src/vulkan/runtime/vk_object.h
index c9c751ae261..c94c7050215 100644
--- a/src/vulkan/util/vk_object.h
+++ b/src/vulkan/runtime/vk_object.h
@@ -23,10 +23,11 @@
#ifndef VK_OBJECT_H
#define VK_OBJECT_H
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
#include <vulkan/vk_icd.h>
#include "c11/threads.h"
+#include "util/detect_os.h"
#include "util/macros.h"
#include "util/sparse_array.h"
@@ -38,21 +39,79 @@ struct hash_table;
struct vk_device;
+/** Base struct for all Vulkan objects */
struct vk_object_base {
VK_LOADER_DATA _loader_data;
+
+ /** Type of this object
+ *
+ * This is used for runtime type checking when casting to and from Vulkan
+ * handle types since compile-time type checking doesn't always work.
+ */
VkObjectType type;
+ /* True if this object is fully constructed and visible to the client */
+ bool client_visible;
+
+ /** Pointer to the device in which this object exists, if any
+ *
+ * This is NULL for instances and physical devices but should point to a
+ * valid vk_device for almost everything else. (There are a few WSI
+ * objects that don't inherit from a device.)
+ */
struct vk_device *device;
+ /** Pointer to the instance in which this object exists
+ *
+ * This is NULL for device level objects as it's main purpose is to make
+ * the instance allocator reachable for freeing data owned by instance
+ * level objects.
+ */
+ struct vk_instance *instance;
+
/* For VK_EXT_private_data */
struct util_sparse_array private_data;
+
+ /* VK_EXT_debug_utils */
+ char *object_name;
};
-void vk_object_base_init(UNUSED struct vk_device *device,
+/** Initialize a vk_base_object
+ *
+ * :param device: |in| The vk_device this object was created from or NULL
+ * :param base: |out| The vk_object_base to initialize
+ * :param obj_type: |in| The VkObjectType of the object being initialized
+ */
+void vk_object_base_init(struct vk_device *device,
struct vk_object_base *base,
- UNUSED VkObjectType obj_type);
-void vk_object_base_finish(UNUSED struct vk_object_base *base);
-void vk_object_base_reset(struct vk_object_base *base);
+ VkObjectType obj_type);
+
+/** Initialize a vk_base_object for an instance level object
+ *
+ * :param instance: |in| The vk_instance this object was created from
+ * :param base: |out| The vk_object_base to initialize
+ * :param obj_type: |in| The VkObjectType of the object being initialized
+ */
+void vk_object_base_instance_init(struct vk_instance *instance,
+ struct vk_object_base *base,
+ VkObjectType obj_type);
+
+/** Tear down a vk_object_base
+ *
+ * :param base: |out| The vk_object_base being torn down
+ */
+void vk_object_base_finish(struct vk_object_base *base);
+
+/** Recycles a vk_object_base
+ *
+ * This should be called when an object is recycled and handed back to the
+ * client as if it were a new object. When it's called is not important as
+ * long as it's called between when the client thinks the object was destroyed
+ * and when the client sees it again as a supposedly new object.
+ *
+ * :param base: |inout| The vk_object_base being recycled
+ */
+void vk_object_base_recycle(struct vk_object_base *base);
static inline void
vk_object_base_assert_valid(ASSERTED struct vk_object_base *base,
@@ -69,6 +128,26 @@ vk_object_base_from_u64_handle(uint64_t handle, VkObjectType obj_type)
return base;
}
+/** Define handle cast macros for the given dispatchable handle type
+ *
+ * For a given `driver_struct`, this defines `driver_struct_to_handle()` and
+ * `driver_struct_from_handle()` helpers which provide type-safe (as much as
+ * possible with Vulkan handle types) casts to and from the `driver_struct`
+ * type. As an added layer of protection, these casts use the provided
+ * `VkObjectType` to assert that the object is of the correct type when
+ * running with a debug build.
+ *
+ * :param __driver_type: The name of the driver struct; it is assumed this is
+ * the name of a struct type and ``struct`` will be
+ * prepended automatically
+ *
+ * :param __base: The name of the vk_base_object member
+ *
+ * :param __VkType: The Vulkan object type such as VkImage
+ *
+ * :param __VK_TYPE: The VkObjectType corresponding to __VkType, such as
+ * VK_OBJECT_TYPE_IMAGE
+ */
#define VK_DEFINE_HANDLE_CASTS(__driver_type, __base, __VkType, __VK_TYPE) \
static inline struct __driver_type * \
__driver_type ## _from_handle(__VkType _handle) \
@@ -83,11 +162,33 @@ vk_object_base_from_u64_handle(uint64_t handle, VkObjectType obj_type)
__driver_type ## _to_handle(struct __driver_type *_obj) \
{ \
vk_object_base_assert_valid(&_obj->__base, __VK_TYPE); \
+ if (_obj != NULL) \
+ _obj->__base.client_visible = true; \
return (__VkType) _obj; \
}
+/** Define handle cast macros for the given non-dispatchable handle type
+ *
+ * For a given `driver_struct`, this defines `driver_struct_to_handle()` and
+ * `driver_struct_from_handle()` helpers which provide type-safe (as much as
+ * possible with Vulkan handle types) casts to and from the `driver_struct`
+ * type. As an added layer of protection, these casts use the provided
+ * `VkObjectType` to assert that the object is of the correct type when
+ * running with a debug build.
+ *
+ * :param __driver_type: The name of the driver struct; it is assumed this is
+ * the name of a struct type and ``struct`` will be
+ * prepended automatically
+ *
+ * :param __base: The name of the vk_base_object member
+ *
+ * :param __VkType: The Vulkan object type such as VkImage
+ *
+ * :param __VK_TYPE: The VkObjectType corresponding to __VkType, such as
+ * VK_OBJECT_TYPE_IMAGE
+ */
#define VK_DEFINE_NONDISP_HANDLE_CASTS(__driver_type, __base, __VkType, __VK_TYPE) \
- static inline struct __driver_type * \
+ UNUSED static inline struct __driver_type * \
__driver_type ## _from_handle(__VkType _handle) \
{ \
struct vk_object_base *base = \
@@ -97,13 +198,26 @@ vk_object_base_from_u64_handle(uint64_t handle, VkObjectType obj_type)
return (struct __driver_type *)base; \
} \
\
- static inline __VkType \
+ UNUSED static inline __VkType \
__driver_type ## _to_handle(struct __driver_type *_obj) \
{ \
vk_object_base_assert_valid(&_obj->__base, __VK_TYPE); \
+ if (_obj != NULL) \
+ _obj->__base.client_visible = true; \
return (__VkType)(uintptr_t) _obj; \
}
+/** Declares a __driver_type pointer which represents __handle
+ *
+ * :param __driver_type: The name of the driver struct; it is assumed this is
+ * the name of a struct type and ``struct`` will be
+ * prepended automatically
+ *
+ * :param __name: The name of the declared pointer
+ *
+ * :param __handle: The Vulkan object handle with which to initialize
+ * `__name`
+ */
#define VK_FROM_HANDLE(__driver_type, __name, __handle) \
struct __driver_type *__name = __driver_type ## _from_handle(__handle)
@@ -145,31 +259,34 @@ struct vk_private_data_slot {
uint32_t index;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vk_private_data_slot, base,
- VkPrivateDataSlotEXT,
- VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT);
+ VkPrivateDataSlot,
+ VK_OBJECT_TYPE_PRIVATE_DATA_SLOT);
VkResult
vk_private_data_slot_create(struct vk_device *device,
- const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+ const VkPrivateDataSlotCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- VkPrivateDataSlotEXT* pPrivateDataSlot);
+ VkPrivateDataSlot* pPrivateDataSlot);
void
vk_private_data_slot_destroy(struct vk_device *device,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
const VkAllocationCallbacks *pAllocator);
VkResult
vk_object_base_set_private_data(struct vk_device *device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t data);
void
vk_object_base_get_private_data(struct vk_device *device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t *pData);
+const char *
+vk_object_base_name(struct vk_object_base *obj);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/vulkan/util/vk_physical_device.c b/src/vulkan/runtime/vk_physical_device.c
index e6504c88ec0..c524ee313da 100644
--- a/src/vulkan/util/vk_physical_device.c
+++ b/src/vulkan/runtime/vk_physical_device.c
@@ -30,21 +30,32 @@ VkResult
vk_physical_device_init(struct vk_physical_device *pdevice,
struct vk_instance *instance,
const struct vk_device_extension_table *supported_extensions,
+ const struct vk_features *supported_features,
+ const struct vk_properties *properties,
const struct vk_physical_device_dispatch_table *dispatch_table)
{
memset(pdevice, 0, sizeof(*pdevice));
- vk_object_base_init(NULL, &pdevice->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
+ vk_object_base_instance_init(instance, &pdevice->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
pdevice->instance = instance;
if (supported_extensions != NULL)
pdevice->supported_extensions = *supported_extensions;
+ if (supported_features != NULL)
+ pdevice->supported_features = *supported_features;
+
+ if (properties != NULL)
+ pdevice->properties = *properties;
+
pdevice->dispatch_table = *dispatch_table;
/* Add common entrypoints without overwriting driver-provided ones. */
vk_physical_device_dispatch_table_from_entrypoints(
&pdevice->dispatch_table, &vk_common_physical_device_entrypoints, false);
+ /* TODO */
+ pdevice->disk_cache = NULL;
+
return VK_SUCCESS;
}
@@ -81,7 +92,7 @@ vk_common_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
if (!pdevice->supported_extensions.extensions[i])
continue;
-#ifdef ANDROID
+#ifdef ANDROID_STRICT
if (!vk_android_allowed_device_extensions.extensions[i])
continue;
#endif
@@ -127,6 +138,37 @@ vk_common_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
}
VKAPI_ATTR void VKAPI_CALL
+vk_common_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+
+ if (!pQueueFamilyProperties) {
+ pdevice->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice,
+ pQueueFamilyPropertyCount,
+ NULL);
+ return;
+ }
+
+ STACK_ARRAY(VkQueueFamilyProperties2, props2, *pQueueFamilyPropertyCount);
+
+ for (unsigned i = 0; i < *pQueueFamilyPropertyCount; ++i) {
+ props2[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2;
+ props2[i].pNext = NULL;
+ }
+
+ pdevice->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice,
+ pQueueFamilyPropertyCount,
+ props2);
+
+ for (unsigned i = 0; i < *pQueueFamilyPropertyCount; ++i)
+ pQueueFamilyProperties[i] = props2[i].queueFamilyProperties;
+
+ STACK_ARRAY_FINISH(props2);
+}
+
+VKAPI_ATTR void VKAPI_CALL
vk_common_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryProperties *pMemoryProperties)
{
@@ -139,21 +181,7 @@ vk_common_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,
pdevice->dispatch_table.GetPhysicalDeviceMemoryProperties2(physicalDevice,
&props2);
- /* dEQP-VK.api.info.get_physical_device_properties2.memory_properties memsets
- * the struct to 0xcd and expects that the unused array elements are
- * untouched.
- */
- pMemoryProperties->memoryHeapCount = props2.memoryProperties.memoryHeapCount;
- for (int i = 0; i < pMemoryProperties->memoryHeapCount; i++) {
- pMemoryProperties->memoryHeaps[i].flags = props2.memoryProperties.memoryHeaps[i].flags;
- pMemoryProperties->memoryHeaps[i].size = props2.memoryProperties.memoryHeaps[i].size;
- }
-
- pMemoryProperties->memoryTypeCount = props2.memoryProperties.memoryTypeCount;
- for (int i = 0; i < pMemoryProperties->memoryTypeCount; i++) {
- pMemoryProperties->memoryTypes[i].heapIndex = props2.memoryProperties.memoryTypes[i].heapIndex;
- pMemoryProperties->memoryTypes[i].propertyFlags = props2.memoryProperties.memoryTypes[i].propertyFlags;
- }
+ *pMemoryProperties = props2.memoryProperties;
}
VKAPI_ATTR void VKAPI_CALL
@@ -210,7 +238,7 @@ VKAPI_ATTR void VKAPI_CALL
vk_common_GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
- uint32_t samples,
+ VkSampleCountFlagBits samples,
VkImageUsageFlags usage,
VkImageTiling tiling,
uint32_t *pNumProperties,
@@ -252,3 +280,14 @@ vk_common_GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physical
STACK_ARRAY_FINISH(props2);
}
+
+/* VK_EXT_tooling_info */
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetPhysicalDeviceToolProperties(VkPhysicalDevice physicalDevice,
+ uint32_t *pToolCount,
+ VkPhysicalDeviceToolProperties *pToolProperties)
+{
+ VK_OUTARRAY_MAKE_TYPED(VkPhysicalDeviceToolProperties, out, pToolProperties, pToolCount);
+
+ return vk_outarray_status(&out);
+}
diff --git a/src/vulkan/runtime/vk_physical_device.h b/src/vulkan/runtime/vk_physical_device.h
new file mode 100644
index 00000000000..e7da1ec34da
--- /dev/null
+++ b/src/vulkan/runtime/vk_physical_device.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_PHYSICAL_DEVICE_H
+#define VK_PHYSICAL_DEVICE_H
+
+#include "vk_dispatch_table.h"
+#include "vk_extensions.h"
+#include "vk_object.h"
+#include "vk_physical_device_features.h"
+#include "vk_physical_device_properties.h"
+
+#include "util/list.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct disk_cache;
+struct wsi_device;
+struct vk_sync_type;
+struct vk_pipeline_cache_object_ops;
+
+/** Base struct for all VkPhysicalDevice implementations
+ */
+struct vk_physical_device {
+ struct vk_object_base base;
+
+ /* See vk_instance::pdevices::list */
+ struct list_head link;
+
+ /** Instance which is the parent of this physical device */
+ struct vk_instance *instance;
+
+ /** Table of all supported device extensions
+ *
+ * This table is initialized from the `supported_extensions` parameter
+ * passed to `vk_physical_device_init()` if not `NULL`. If a `NULL`
+ * extension table is passed, all extensions are initialized to false and
+ * it's the responsibility of the driver to populate the table. This may
+ * be useful if the driver's physical device initialization order is such
+ * that extension support cannot be determined until significant physical
+ * device setup work has already been done.
+ */
+ struct vk_device_extension_table supported_extensions;
+
+ /** Table of all supported features
+ *
+ * This table is initialized from the `supported_features` parameter
+ * passed to `vk_physical_device_init()` if not `NULL`. If a `NULL`
+ * features table is passed, all features are initialized to false and
+ * it's the responsibility of the driver to populate the table. This may
+ * be useful if the driver's physical device initialization order is such
+ * that feature support cannot be determined until significant physical
+ * device setup work has already been done.
+ */
+ struct vk_features supported_features;
+
+ /** Table of all physical device properties which is initialized similarly
+ * to supported_features
+ */
+ struct vk_properties properties;
+
+ /** Physical-device-level dispatch table */
+ struct vk_physical_device_dispatch_table dispatch_table;
+
+ /** Disk cache, or NULL */
+ struct disk_cache *disk_cache;
+
+ /** WSI device, or NULL */
+ struct wsi_device *wsi_device;
+
+ /** A null-terminated array of supported sync types, in priority order
+ *
+ * The common implementations of VkFence and VkSemaphore use this list to
+ * determine what vk_sync_type to use for each scenario. The list is
+ * walked and the first vk_sync_type matching their criterion is taken.
+ * For instance, VkFence requires that it not be a timeline and support
+ * reset and CPU wait. If an external handle type is requested, that is
+ * considered just one more criterion.
+ */
+ const struct vk_sync_type *const *supported_sync_types;
+
+ /** A null-terminated array of supported pipeline cache object types
+ *
+ * The common implementation of VkPipelineCache uses this to remember the
+ * type of objects stored in the cache and deserialize them immediately
+ * when importing the cache. If an object type isn't in this list, then it
+ * will be loaded as a raw data object and then deserialized when we first
+ * look it up. Deserializing immediately avoids a copy but may be more
+ * expensive for objects that aren't hit.
+ */
+ const struct vk_pipeline_cache_object_ops *const *pipeline_cache_import_ops;
+};
+
+VK_DEFINE_HANDLE_CASTS(vk_physical_device, base, VkPhysicalDevice,
+ VK_OBJECT_TYPE_PHYSICAL_DEVICE);
+
+/** Initialize a vk_physical_device
+ *
+ * :param physical_device: |out| The physical device to initialize
+ * :param instance: |in| The instance which is the parent of this
+ * physical device
+ * :param supported_extensions: |in| Table of all device extensions supported
+ * by this physical device
+ * :param supported_features: |in| Table of all features supported by this
+ * physical device
+ * :param dispatch_table: |in| Physical-device-level dispatch table
+ */
+VkResult MUST_CHECK
+vk_physical_device_init(struct vk_physical_device *physical_device,
+ struct vk_instance *instance,
+ const struct vk_device_extension_table *supported_extensions,
+ const struct vk_features *supported_features,
+ const struct vk_properties *properties,
+ const struct vk_physical_device_dispatch_table *dispatch_table);
+
+/** Tears down a vk_physical_device
+ *
+ * :param physical_device: |out| The physical device to tear down
+ */
+void
+vk_physical_device_finish(struct vk_physical_device *physical_device);
+
+VkResult
+vk_physical_device_check_device_features(struct vk_physical_device *physical_device,
+ const VkDeviceCreateInfo *pCreateInfo);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_PHYSICAL_DEVICE_H */
diff --git a/src/vulkan/runtime/vk_pipeline.c b/src/vulkan/runtime/vk_pipeline.c
new file mode 100644
index 00000000000..a22f682ecb9
--- /dev/null
+++ b/src/vulkan/runtime/vk_pipeline.c
@@ -0,0 +1,2186 @@
+/*
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_pipeline.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_command_buffer.h"
+#include "vk_descriptor_set_layout.h"
+#include "vk_device.h"
+#include "vk_graphics_state.h"
+#include "vk_log.h"
+#include "vk_nir.h"
+#include "vk_physical_device.h"
+#include "vk_pipeline_layout.h"
+#include "vk_shader.h"
+#include "vk_shader_module.h"
+#include "vk_util.h"
+
+#include "nir_serialize.h"
+
+#include "util/mesa-sha1.h"
+
+bool
+vk_pipeline_shader_stage_is_null(const VkPipelineShaderStageCreateInfo *info)
+{
+ if (info->module != VK_NULL_HANDLE)
+ return false;
+
+ vk_foreach_struct_const(ext, info->pNext) {
+ if (ext->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO ||
+ ext->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT)
+ return false;
+ }
+
+ return true;
+}
+
+static nir_shader *
+get_builtin_nir(const VkPipelineShaderStageCreateInfo *info)
+{
+ VK_FROM_HANDLE(vk_shader_module, module, info->module);
+
+ nir_shader *nir = NULL;
+ if (module != NULL) {
+ nir = module->nir;
+ } else {
+ const VkPipelineShaderStageNirCreateInfoMESA *nir_info =
+ vk_find_struct_const(info->pNext, PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA);
+ if (nir_info != NULL)
+ nir = nir_info->nir;
+ }
+
+ if (nir == NULL)
+ return NULL;
+
+ assert(nir->info.stage == vk_to_mesa_shader_stage(info->stage));
+ ASSERTED nir_function_impl *entrypoint = nir_shader_get_entrypoint(nir);
+ assert(strcmp(entrypoint->function->name, info->pName) == 0);
+ assert(info->pSpecializationInfo == NULL);
+
+ return nir;
+}
+
+static uint32_t
+get_required_subgroup_size(const void *info_pNext)
+{
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *rss_info =
+ vk_find_struct_const(info_pNext,
+ PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO);
+ return rss_info != NULL ? rss_info->requiredSubgroupSize : 0;
+}
+
+enum gl_subgroup_size
+vk_get_subgroup_size(uint32_t spirv_version,
+ gl_shader_stage stage,
+ const void *info_pNext,
+ bool allow_varying,
+ bool require_full)
+{
+ uint32_t req_subgroup_size = get_required_subgroup_size(info_pNext);
+ if (req_subgroup_size > 0) {
+ assert(util_is_power_of_two_nonzero(req_subgroup_size));
+ assert(req_subgroup_size >= 8 && req_subgroup_size <= 128);
+ return req_subgroup_size;
+ } else if (allow_varying || spirv_version >= 0x10600) {
+ /* Starting with SPIR-V 1.6, varying subgroup size the default */
+ return SUBGROUP_SIZE_VARYING;
+ } else if (require_full) {
+ assert(stage == MESA_SHADER_COMPUTE);
+ return SUBGROUP_SIZE_FULL_SUBGROUPS;
+ } else {
+ return SUBGROUP_SIZE_API_CONSTANT;
+ }
+}
+
+VkResult
+vk_pipeline_shader_stage_to_nir(struct vk_device *device,
+ const VkPipelineShaderStageCreateInfo *info,
+ const struct spirv_to_nir_options *spirv_options,
+ const struct nir_shader_compiler_options *nir_options,
+ void *mem_ctx, nir_shader **nir_out)
+{
+ VK_FROM_HANDLE(vk_shader_module, module, info->module);
+ const gl_shader_stage stage = vk_to_mesa_shader_stage(info->stage);
+
+ assert(info->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO);
+
+ nir_shader *builtin_nir = get_builtin_nir(info);
+ if (builtin_nir != NULL) {
+ nir_validate_shader(builtin_nir, "internal shader");
+
+ nir_shader *clone = nir_shader_clone(mem_ctx, builtin_nir);
+ if (clone == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ assert(clone->options == NULL || clone->options == nir_options);
+ clone->options = nir_options;
+
+ *nir_out = clone;
+ return VK_SUCCESS;
+ }
+
+ const uint32_t *spirv_data;
+ uint32_t spirv_size;
+ if (module != NULL) {
+ spirv_data = (uint32_t *)module->data;
+ spirv_size = module->size;
+ } else {
+ const VkShaderModuleCreateInfo *minfo =
+ vk_find_struct_const(info->pNext, SHADER_MODULE_CREATE_INFO);
+ if (unlikely(minfo == NULL)) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "No shader module provided");
+ }
+ spirv_data = minfo->pCode;
+ spirv_size = minfo->codeSize;
+ }
+
+ enum gl_subgroup_size subgroup_size = vk_get_subgroup_size(
+ vk_spirv_version(spirv_data, spirv_size),
+ stage, info->pNext,
+ info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT,
+ info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT);
+
+ nir_shader *nir = vk_spirv_to_nir(device, spirv_data, spirv_size, stage,
+ info->pName, subgroup_size,
+ info->pSpecializationInfo,
+ spirv_options, nir_options,
+ false /* internal */,
+ mem_ctx);
+ if (nir == NULL)
+ return vk_errorf(device, VK_ERROR_UNKNOWN, "spirv_to_nir failed");
+
+ *nir_out = nir;
+
+ return VK_SUCCESS;
+}
+
+void
+vk_pipeline_hash_shader_stage(const VkPipelineShaderStageCreateInfo *info,
+ const struct vk_pipeline_robustness_state *rstate,
+ unsigned char *stage_sha1)
+{
+ VK_FROM_HANDLE(vk_shader_module, module, info->module);
+
+ const nir_shader *builtin_nir = get_builtin_nir(info);
+ if (builtin_nir != NULL) {
+ /* Internal NIR module: serialize and hash the NIR shader.
+ * We don't need to hash other info fields since they should match the
+ * NIR data.
+ */
+ struct blob blob;
+
+ blob_init(&blob);
+ nir_serialize(&blob, builtin_nir, false);
+ assert(!blob.out_of_memory);
+ _mesa_sha1_compute(blob.data, blob.size, stage_sha1);
+ blob_finish(&blob);
+ return;
+ }
+
+ const VkShaderModuleCreateInfo *minfo =
+ vk_find_struct_const(info->pNext, SHADER_MODULE_CREATE_INFO);
+ const VkPipelineShaderStageModuleIdentifierCreateInfoEXT *iinfo =
+ vk_find_struct_const(info->pNext, PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT);
+
+ struct mesa_sha1 ctx;
+
+ _mesa_sha1_init(&ctx);
+
+ _mesa_sha1_update(&ctx, &info->flags, sizeof(info->flags));
+
+ assert(util_bitcount(info->stage) == 1);
+ _mesa_sha1_update(&ctx, &info->stage, sizeof(info->stage));
+
+ if (module) {
+ _mesa_sha1_update(&ctx, module->hash, sizeof(module->hash));
+ } else if (minfo) {
+ blake3_hash spirv_hash;
+
+ _mesa_blake3_compute(minfo->pCode, minfo->codeSize, spirv_hash);
+ _mesa_sha1_update(&ctx, spirv_hash, sizeof(spirv_hash));
+ } else {
+ /* It is legal to pass in arbitrary identifiers as long as they don't exceed
+ * the limit. Shaders with bogus identifiers are more or less guaranteed to fail. */
+ assert(iinfo);
+ assert(iinfo->identifierSize <= VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT);
+ _mesa_sha1_update(&ctx, iinfo->pIdentifier, iinfo->identifierSize);
+ }
+
+ if (rstate) {
+ _mesa_sha1_update(&ctx, &rstate->storage_buffers, sizeof(rstate->storage_buffers));
+ _mesa_sha1_update(&ctx, &rstate->uniform_buffers, sizeof(rstate->uniform_buffers));
+ _mesa_sha1_update(&ctx, &rstate->vertex_inputs, sizeof(rstate->vertex_inputs));
+ _mesa_sha1_update(&ctx, &rstate->images, sizeof(rstate->images));
+ }
+
+ _mesa_sha1_update(&ctx, info->pName, strlen(info->pName));
+
+ if (info->pSpecializationInfo) {
+ _mesa_sha1_update(&ctx, info->pSpecializationInfo->pMapEntries,
+ info->pSpecializationInfo->mapEntryCount *
+ sizeof(*info->pSpecializationInfo->pMapEntries));
+ _mesa_sha1_update(&ctx, info->pSpecializationInfo->pData,
+ info->pSpecializationInfo->dataSize);
+ }
+
+ uint32_t req_subgroup_size = get_required_subgroup_size(info);
+ _mesa_sha1_update(&ctx, &req_subgroup_size, sizeof(req_subgroup_size));
+
+ _mesa_sha1_final(&ctx, stage_sha1);
+}
+
+static VkPipelineRobustnessBufferBehaviorEXT
+vk_device_default_robust_buffer_behavior(const struct vk_device *device)
+{
+ if (device->enabled_features.robustBufferAccess2) {
+ return VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT;
+ } else if (device->enabled_features.robustBufferAccess) {
+ return VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
+ } else {
+ return VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
+ }
+}
+
+static VkPipelineRobustnessImageBehaviorEXT
+vk_device_default_robust_image_behavior(const struct vk_device *device)
+{
+ if (device->enabled_features.robustImageAccess2) {
+ return VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT;
+ } else if (device->enabled_features.robustImageAccess) {
+ return VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT;
+ } else {
+ return VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
+ }
+}
+
+void
+vk_pipeline_robustness_state_fill(const struct vk_device *device,
+ struct vk_pipeline_robustness_state *rs,
+ const void *pipeline_pNext,
+ const void *shader_stage_pNext)
+{
+ rs->uniform_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
+ rs->storage_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
+ rs->vertex_inputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
+ rs->images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT;
+
+ const VkPipelineRobustnessCreateInfoEXT *shader_info =
+ vk_find_struct_const(shader_stage_pNext,
+ PIPELINE_ROBUSTNESS_CREATE_INFO_EXT);
+ if (shader_info) {
+ rs->storage_buffers = shader_info->storageBuffers;
+ rs->uniform_buffers = shader_info->uniformBuffers;
+ rs->vertex_inputs = shader_info->vertexInputs;
+ rs->images = shader_info->images;
+ } else {
+ const VkPipelineRobustnessCreateInfoEXT *pipeline_info =
+ vk_find_struct_const(pipeline_pNext,
+ PIPELINE_ROBUSTNESS_CREATE_INFO_EXT);
+ if (pipeline_info) {
+ rs->storage_buffers = pipeline_info->storageBuffers;
+ rs->uniform_buffers = pipeline_info->uniformBuffers;
+ rs->vertex_inputs = pipeline_info->vertexInputs;
+ rs->images = pipeline_info->images;
+ }
+ }
+
+ if (rs->storage_buffers ==
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT)
+ rs->storage_buffers = vk_device_default_robust_buffer_behavior(device);
+
+ if (rs->uniform_buffers ==
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT)
+ rs->uniform_buffers = vk_device_default_robust_buffer_behavior(device);
+
+ if (rs->vertex_inputs ==
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT)
+ rs->vertex_inputs = vk_device_default_robust_buffer_behavior(device);
+
+ if (rs->images == VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT)
+ rs->images = vk_device_default_robust_image_behavior(device);
+}
+
+void *
+vk_pipeline_zalloc(struct vk_device *device,
+ const struct vk_pipeline_ops *ops,
+ VkPipelineBindPoint bind_point,
+ VkPipelineCreateFlags2KHR flags,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_pipeline *pipeline;
+
+ pipeline = vk_object_zalloc(device, alloc, size, VK_OBJECT_TYPE_PIPELINE);
+ if (pipeline == NULL)
+ return NULL;
+
+ pipeline->ops = ops;
+ pipeline->bind_point = bind_point;
+ pipeline->flags = flags;
+
+ return pipeline;
+}
+
+void
+vk_pipeline_free(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_pipeline *pipeline)
+{
+ vk_object_free(device, alloc, &pipeline->base);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyPipeline(VkDevice _device,
+ VkPipeline _pipeline,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline, pipeline, _pipeline);
+
+ if (pipeline == NULL)
+ return;
+
+ pipeline->ops->destroy(device, pipeline, pAllocator);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetPipelineExecutablePropertiesKHR(
+ VkDevice _device,
+ const VkPipelineInfoKHR *pPipelineInfo,
+ uint32_t *pExecutableCount,
+ VkPipelineExecutablePropertiesKHR *pProperties)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline, pipeline, pPipelineInfo->pipeline);
+
+ return pipeline->ops->get_executable_properties(device, pipeline,
+ pExecutableCount,
+ pProperties);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetPipelineExecutableStatisticsKHR(
+ VkDevice _device,
+ const VkPipelineExecutableInfoKHR *pExecutableInfo,
+ uint32_t *pStatisticCount,
+ VkPipelineExecutableStatisticKHR *pStatistics)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline, pipeline, pExecutableInfo->pipeline);
+
+ return pipeline->ops->get_executable_statistics(device, pipeline,
+ pExecutableInfo->executableIndex,
+ pStatisticCount, pStatistics);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetPipelineExecutableInternalRepresentationsKHR(
+ VkDevice _device,
+ const VkPipelineExecutableInfoKHR *pExecutableInfo,
+ uint32_t *pInternalRepresentationCount,
+ VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline, pipeline, pExecutableInfo->pipeline);
+
+ return pipeline->ops->get_internal_representations(device, pipeline,
+ pExecutableInfo->executableIndex,
+ pInternalRepresentationCount,
+ pInternalRepresentations);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBindPipeline(VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline _pipeline)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(vk_pipeline, pipeline, _pipeline);
+
+ assert(pipeline->bind_point == pipelineBindPoint);
+
+ pipeline->ops->cmd_bind(cmd_buffer, pipeline);
+}
+
+static const struct vk_pipeline_cache_object_ops pipeline_shader_cache_ops;
+
+static struct vk_shader *
+vk_shader_from_cache_obj(struct vk_pipeline_cache_object *object)
+{
+ assert(object->ops == &pipeline_shader_cache_ops);
+ return container_of(object, struct vk_shader, pipeline.cache_obj);
+}
+
+static bool
+vk_pipeline_shader_serialize(struct vk_pipeline_cache_object *object,
+ struct blob *blob)
+{
+ struct vk_shader *shader = vk_shader_from_cache_obj(object);
+ struct vk_device *device = shader->base.device;
+
+ return shader->ops->serialize(device, shader, blob);
+}
+
+static void
+vk_shader_init_cache_obj(struct vk_device *device, struct vk_shader *shader,
+ const void *key_data, size_t key_size)
+{
+ assert(key_size == sizeof(shader->pipeline.cache_key));
+ memcpy(&shader->pipeline.cache_key, key_data,
+ sizeof(shader->pipeline.cache_key));
+
+ vk_pipeline_cache_object_init(device, &shader->pipeline.cache_obj,
+ &pipeline_shader_cache_ops,
+ &shader->pipeline.cache_key,
+ sizeof(shader->pipeline.cache_key));
+}
+
+static struct vk_pipeline_cache_object *
+vk_pipeline_shader_deserialize(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ struct blob_reader *blob)
+{
+ struct vk_device *device = cache->base.device;
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+
+ /* TODO: Do we really want to always use the latest version? */
+ const uint32_t version = device->physical->properties.shaderBinaryVersion;
+
+ struct vk_shader *shader;
+ VkResult result = ops->deserialize(device, blob, version,
+ &device->alloc, &shader);
+ if (result != VK_SUCCESS) {
+ assert(result == VK_ERROR_OUT_OF_HOST_MEMORY);
+ return NULL;
+ }
+
+ vk_shader_init_cache_obj(device, shader, key_data, key_size);
+
+ return &shader->pipeline.cache_obj;
+}
+
+static void
+vk_pipeline_shader_destroy(struct vk_device *device,
+ struct vk_pipeline_cache_object *object)
+{
+ struct vk_shader *shader = vk_shader_from_cache_obj(object);
+ assert(shader->base.device == device);
+
+ vk_shader_destroy(device, shader, &device->alloc);
+}
+
+static const struct vk_pipeline_cache_object_ops pipeline_shader_cache_ops = {
+ .serialize = vk_pipeline_shader_serialize,
+ .deserialize = vk_pipeline_shader_deserialize,
+ .destroy = vk_pipeline_shader_destroy,
+};
+
+static struct vk_shader *
+vk_shader_ref(struct vk_shader *shader)
+{
+ vk_pipeline_cache_object_ref(&shader->pipeline.cache_obj);
+ return shader;
+}
+
+static void
+vk_shader_unref(struct vk_device *device, struct vk_shader *shader)
+{
+ vk_pipeline_cache_object_unref(device, &shader->pipeline.cache_obj);
+}
+
+PRAGMA_DIAGNOSTIC_PUSH
+PRAGMA_DIAGNOSTIC_ERROR(-Wpadded)
+struct vk_pipeline_tess_info {
+ unsigned tcs_vertices_out : 8;
+ unsigned primitive_mode : 2; /* tess_primitive_mode */
+ unsigned spacing : 2; /* gl_tess_spacing */
+ unsigned ccw : 1;
+ unsigned point_mode : 1;
+ unsigned _pad : 18;
+};
+PRAGMA_DIAGNOSTIC_POP
+static_assert(sizeof(struct vk_pipeline_tess_info) == 4,
+ "This struct has no holes");
+
+static void
+vk_pipeline_gather_nir_tess_info(const nir_shader *nir,
+ struct vk_pipeline_tess_info *info)
+{
+ info->tcs_vertices_out = nir->info.tess.tcs_vertices_out;
+ info->primitive_mode = nir->info.tess._primitive_mode;
+ info->spacing = nir->info.tess.spacing;
+ info->ccw = nir->info.tess.ccw;
+ info->point_mode = nir->info.tess.point_mode;
+}
+
+static void
+vk_pipeline_replace_nir_tess_info(nir_shader *nir,
+ const struct vk_pipeline_tess_info *info)
+{
+ nir->info.tess.tcs_vertices_out = info->tcs_vertices_out;
+ nir->info.tess._primitive_mode = info->primitive_mode;
+ nir->info.tess.spacing = info->spacing;
+ nir->info.tess.ccw = info->ccw;
+ nir->info.tess.point_mode = info->point_mode;
+}
+
+static void
+vk_pipeline_tess_info_merge(struct vk_pipeline_tess_info *dst,
+ const struct vk_pipeline_tess_info *src)
+{
+ /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
+ *
+ * "PointMode. Controls generation of points rather than triangles
+ * or lines. This functionality defaults to disabled, and is
+ * enabled if either shader stage includes the execution mode.
+ *
+ * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
+ * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
+ * and OutputVertices, it says:
+ *
+ * "One mode must be set in at least one of the tessellation
+ * shader stages."
+ *
+ * So, the fields can be set in either the TCS or TES, but they must
+ * agree if set in both.
+ */
+ assert(dst->tcs_vertices_out == 0 ||
+ src->tcs_vertices_out == 0 ||
+ dst->tcs_vertices_out == src->tcs_vertices_out);
+ dst->tcs_vertices_out |= src->tcs_vertices_out;
+
+ static_assert(TESS_SPACING_UNSPECIFIED == 0, "");
+ assert(dst->spacing == TESS_SPACING_UNSPECIFIED ||
+ src->spacing == TESS_SPACING_UNSPECIFIED ||
+ dst->spacing == src->spacing);
+ dst->spacing |= src->spacing;
+
+ static_assert(TESS_PRIMITIVE_UNSPECIFIED == 0, "");
+ assert(dst->primitive_mode == TESS_PRIMITIVE_UNSPECIFIED ||
+ src->primitive_mode == TESS_PRIMITIVE_UNSPECIFIED ||
+ dst->primitive_mode == src->primitive_mode);
+ dst->primitive_mode |= src->primitive_mode;
+ dst->ccw |= src->ccw;
+ dst->point_mode |= src->point_mode;
+}
+
+struct vk_pipeline_precomp_shader {
+ struct vk_pipeline_cache_object cache_obj;
+
+ /* Key for this cache_obj in the pipeline cache.
+ *
+ * This is always the output of vk_pipeline_hash_shader_stage() so it must
+ * be a SHA1 hash.
+ */
+ uint8_t cache_key[SHA1_DIGEST_LENGTH];
+
+ gl_shader_stage stage;
+
+ struct vk_pipeline_robustness_state rs;
+
+ /* Tessellation info if the shader is a tessellation shader */
+ struct vk_pipeline_tess_info tess;
+
+ /* Hash of the vk_pipeline_precomp_shader
+ *
+ * This is the hash of the final compiled NIR together with tess info and
+ * robustness state. It's used as a key for final binary lookups. By
+ * having this as a separate key, we can de-duplicate cases where you have
+ * different SPIR-V or specialization constants but end up compiling the
+ * same NIR shader in the end anyway.
+ */
+ blake3_hash blake3;
+
+ struct blob nir_blob;
+};
+
+static struct vk_pipeline_precomp_shader *
+vk_pipeline_precomp_shader_ref(struct vk_pipeline_precomp_shader *shader)
+{
+ vk_pipeline_cache_object_ref(&shader->cache_obj);
+ return shader;
+}
+
+static void
+vk_pipeline_precomp_shader_unref(struct vk_device *device,
+ struct vk_pipeline_precomp_shader *shader)
+{
+ vk_pipeline_cache_object_unref(device, &shader->cache_obj);
+}
+
+static const struct vk_pipeline_cache_object_ops pipeline_precomp_shader_cache_ops;
+
+static struct vk_pipeline_precomp_shader *
+vk_pipeline_precomp_shader_from_cache_obj(struct vk_pipeline_cache_object *obj)
+{
+ assert(obj->ops == & pipeline_precomp_shader_cache_ops);
+ return container_of(obj, struct vk_pipeline_precomp_shader, cache_obj);
+}
+
+static struct vk_pipeline_precomp_shader *
+vk_pipeline_precomp_shader_create(struct vk_device *device,
+ const void *key_data, size_t key_size,
+ const struct vk_pipeline_robustness_state *rs,
+ nir_shader *nir)
+{
+ struct blob blob;
+ blob_init(&blob);
+
+ nir_serialize(&blob, nir, false);
+
+ if (blob.out_of_memory)
+ goto fail_blob;
+
+ struct vk_pipeline_precomp_shader *shader =
+ vk_zalloc(&device->alloc, sizeof(*shader), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (shader == NULL)
+ goto fail_blob;
+
+ assert(sizeof(shader->cache_key) == key_size);
+ memcpy(shader->cache_key, key_data, sizeof(shader->cache_key));
+
+ vk_pipeline_cache_object_init(device, &shader->cache_obj,
+ &pipeline_precomp_shader_cache_ops,
+ shader->cache_key,
+ sizeof(shader->cache_key));
+
+ shader->stage = nir->info.stage;
+ shader->rs = *rs;
+
+ vk_pipeline_gather_nir_tess_info(nir, &shader->tess);
+
+ struct mesa_blake3 blake3_ctx;
+ _mesa_blake3_init(&blake3_ctx);
+ _mesa_blake3_update(&blake3_ctx, rs, sizeof(*rs));
+ _mesa_blake3_update(&blake3_ctx, blob.data, blob.size);
+ _mesa_blake3_final(&blake3_ctx, shader->blake3);
+
+ shader->nir_blob = blob;
+
+ return shader;
+
+fail_blob:
+ blob_finish(&blob);
+
+ return NULL;
+}
+
+static bool
+vk_pipeline_precomp_shader_serialize(struct vk_pipeline_cache_object *obj,
+ struct blob *blob)
+{
+ struct vk_pipeline_precomp_shader *shader =
+ vk_pipeline_precomp_shader_from_cache_obj(obj);
+
+ blob_write_uint32(blob, shader->stage);
+ blob_write_bytes(blob, &shader->rs, sizeof(shader->rs));
+ blob_write_bytes(blob, &shader->tess, sizeof(shader->tess));
+ blob_write_bytes(blob, shader->blake3, sizeof(shader->blake3));
+ blob_write_uint64(blob, shader->nir_blob.size);
+ blob_write_bytes(blob, shader->nir_blob.data, shader->nir_blob.size);
+
+ return !blob->out_of_memory;
+}
+
+static struct vk_pipeline_cache_object *
+vk_pipeline_precomp_shader_deserialize(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ struct blob_reader *blob)
+{
+ struct vk_device *device = cache->base.device;
+
+ struct vk_pipeline_precomp_shader *shader =
+ vk_zalloc(&device->alloc, sizeof(*shader), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (shader == NULL)
+ return NULL;
+
+ assert(sizeof(shader->cache_key) == key_size);
+ memcpy(shader->cache_key, key_data, sizeof(shader->cache_key));
+
+ vk_pipeline_cache_object_init(device, &shader->cache_obj,
+ &pipeline_precomp_shader_cache_ops,
+ shader->cache_key,
+ sizeof(shader->cache_key));
+
+ shader->stage = blob_read_uint32(blob);
+ blob_copy_bytes(blob, &shader->rs, sizeof(shader->rs));
+ blob_copy_bytes(blob, &shader->tess, sizeof(shader->tess));
+ blob_copy_bytes(blob, shader->blake3, sizeof(shader->blake3));
+
+ uint64_t nir_size = blob_read_uint64(blob);
+ if (blob->overrun || nir_size > SIZE_MAX)
+ goto fail_shader;
+
+ const void *nir_data = blob_read_bytes(blob, nir_size);
+ if (blob->overrun)
+ goto fail_shader;
+
+ blob_init(&shader->nir_blob);
+ blob_write_bytes(&shader->nir_blob, nir_data, nir_size);
+ if (shader->nir_blob.out_of_memory)
+ goto fail_nir_blob;
+
+ return &shader->cache_obj;
+
+fail_nir_blob:
+ blob_finish(&shader->nir_blob);
+fail_shader:
+ vk_pipeline_cache_object_finish(&shader->cache_obj);
+ vk_free(&device->alloc, shader);
+
+ return NULL;
+}
+
+static void
+vk_pipeline_precomp_shader_destroy(struct vk_device *device,
+ struct vk_pipeline_cache_object *obj)
+{
+ struct vk_pipeline_precomp_shader *shader =
+ vk_pipeline_precomp_shader_from_cache_obj(obj);
+
+ blob_finish(&shader->nir_blob);
+ vk_pipeline_cache_object_finish(&shader->cache_obj);
+ vk_free(&device->alloc, shader);
+}
+
+static nir_shader *
+vk_pipeline_precomp_shader_get_nir(const struct vk_pipeline_precomp_shader *shader,
+ const struct nir_shader_compiler_options *nir_options)
+{
+ struct blob_reader blob;
+ blob_reader_init(&blob, shader->nir_blob.data, shader->nir_blob.size);
+
+ nir_shader *nir = nir_deserialize(NULL, nir_options, &blob);
+ if (blob.overrun) {
+ ralloc_free(nir);
+ return NULL;
+ }
+
+ return nir;
+}
+
+static const struct vk_pipeline_cache_object_ops pipeline_precomp_shader_cache_ops = {
+ .serialize = vk_pipeline_precomp_shader_serialize,
+ .deserialize = vk_pipeline_precomp_shader_deserialize,
+ .destroy = vk_pipeline_precomp_shader_destroy,
+};
+
+static VkResult
+vk_pipeline_precompile_shader(struct vk_device *device,
+ struct vk_pipeline_cache *cache,
+ VkPipelineCreateFlags2KHR pipeline_flags,
+ const void *pipeline_info_pNext,
+ const VkPipelineShaderStageCreateInfo *info,
+ struct vk_pipeline_precomp_shader **ps_out)
+{
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+ VkResult result;
+
+ struct vk_pipeline_robustness_state rs;
+ vk_pipeline_robustness_state_fill(device, &rs,
+ pipeline_info_pNext,
+ info->pNext);
+
+ uint8_t stage_sha1[SHA1_DIGEST_LENGTH];
+ vk_pipeline_hash_shader_stage(info, &rs, stage_sha1);
+
+ if (cache != NULL) {
+ struct vk_pipeline_cache_object *cache_obj =
+ vk_pipeline_cache_lookup_object(cache, stage_sha1, sizeof(stage_sha1),
+ &pipeline_precomp_shader_cache_ops,
+ NULL /* cache_hit */);
+ if (cache_obj != NULL) {
+ *ps_out = vk_pipeline_precomp_shader_from_cache_obj(cache_obj);
+ return VK_SUCCESS;
+ }
+ }
+
+ if (pipeline_flags &
+ VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
+ return VK_PIPELINE_COMPILE_REQUIRED;
+
+ const gl_shader_stage stage = vk_to_mesa_shader_stage(info->stage);
+ const struct nir_shader_compiler_options *nir_options =
+ ops->get_nir_options(device->physical, stage, &rs);
+ const struct spirv_to_nir_options spirv_options =
+ ops->get_spirv_options(device->physical, stage, &rs);
+
+ nir_shader *nir;
+ result = vk_pipeline_shader_stage_to_nir(device, info, &spirv_options,
+ nir_options, NULL, &nir);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (ops->preprocess_nir != NULL)
+ ops->preprocess_nir(device->physical, nir);
+
+ struct vk_pipeline_precomp_shader *shader =
+ vk_pipeline_precomp_shader_create(device, stage_sha1,
+ sizeof(stage_sha1),
+ &rs, nir);
+ ralloc_free(nir);
+ if (shader == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (cache != NULL) {
+ struct vk_pipeline_cache_object *cache_obj = &shader->cache_obj;
+ cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
+ shader = vk_pipeline_precomp_shader_from_cache_obj(cache_obj);
+ }
+
+ *ps_out = shader;
+
+ return VK_SUCCESS;
+}
+
+struct vk_pipeline_stage {
+ gl_shader_stage stage;
+
+ struct vk_pipeline_precomp_shader *precomp;
+ struct vk_shader *shader;
+};
+
+static int
+cmp_vk_pipeline_stages(const void *_a, const void *_b)
+{
+ const struct vk_pipeline_stage *a = _a, *b = _b;
+ return vk_shader_cmp_graphics_stages(a->stage, b->stage);
+}
+
+static bool
+vk_pipeline_stage_is_null(const struct vk_pipeline_stage *stage)
+{
+ return stage->precomp == NULL && stage->shader == NULL;
+}
+
+static void
+vk_pipeline_stage_finish(struct vk_device *device,
+ struct vk_pipeline_stage *stage)
+{
+ if (stage->precomp != NULL)
+ vk_pipeline_precomp_shader_unref(device, stage->precomp);
+
+ if (stage->shader)
+ vk_shader_unref(device, stage->shader);
+}
+
+static struct vk_pipeline_stage
+vk_pipeline_stage_clone(const struct vk_pipeline_stage *in)
+{
+ struct vk_pipeline_stage out = {
+ .stage = in->stage,
+ };
+
+ if (in->precomp)
+ out.precomp = vk_pipeline_precomp_shader_ref(in->precomp);
+
+ if (in->shader)
+ out.shader = vk_shader_ref(in->shader);
+
+ return out;
+}
+
+struct vk_graphics_pipeline {
+ struct vk_pipeline base;
+
+ union {
+ struct {
+ struct vk_graphics_pipeline_all_state all_state;
+ struct vk_graphics_pipeline_state state;
+ } lib;
+
+ struct {
+ struct vk_vertex_input_state _dynamic_vi;
+ struct vk_sample_locations_state _dynamic_sl;
+ struct vk_dynamic_graphics_state dynamic;
+ } linked;
+ };
+
+ uint32_t set_layout_count;
+ struct vk_descriptor_set_layout *set_layouts[MESA_VK_MAX_DESCRIPTOR_SETS];
+
+ uint32_t stage_count;
+ struct vk_pipeline_stage stages[MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES];
+};
+
+static void
+vk_graphics_pipeline_destroy(struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ const VkAllocationCallbacks *pAllocator)
+{
+ struct vk_graphics_pipeline *gfx_pipeline =
+ container_of(pipeline, struct vk_graphics_pipeline, base);
+
+ for (uint32_t i = 0; i < gfx_pipeline->stage_count; i++)
+ vk_pipeline_stage_finish(device, &gfx_pipeline->stages[i]);
+
+ for (uint32_t i = 0; i < gfx_pipeline->set_layout_count; i++) {
+ if (gfx_pipeline->set_layouts[i] != NULL)
+ vk_descriptor_set_layout_unref(device, gfx_pipeline->set_layouts[i]);
+ }
+
+ vk_pipeline_free(device, pAllocator, pipeline);
+}
+
+static bool
+vk_device_supports_stage(struct vk_device *device,
+ gl_shader_stage stage)
+{
+ const struct vk_features *features = &device->physical->supported_features;
+
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_COMPUTE:
+ return true;
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ return features->tessellationShader;
+ case MESA_SHADER_GEOMETRY:
+ return features->geometryShader;
+ case MESA_SHADER_TASK:
+ return features->taskShader;
+ case MESA_SHADER_MESH:
+ return features->meshShader;
+ default:
+ return false;
+ }
+}
+
+static const gl_shader_stage all_gfx_stages[] = {
+ MESA_SHADER_VERTEX,
+ MESA_SHADER_TESS_CTRL,
+ MESA_SHADER_TESS_EVAL,
+ MESA_SHADER_GEOMETRY,
+ MESA_SHADER_TASK,
+ MESA_SHADER_MESH,
+ MESA_SHADER_FRAGMENT,
+};
+
+static void
+vk_graphics_pipeline_cmd_bind(struct vk_command_buffer *cmd_buffer,
+ struct vk_pipeline *pipeline)
+{
+ struct vk_device *device = cmd_buffer->base.device;
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+
+ struct vk_graphics_pipeline *gfx_pipeline = NULL;
+ struct vk_shader *stage_shader[PIPE_SHADER_MESH_TYPES] = { NULL, };
+ if (pipeline != NULL) {
+ assert(pipeline->bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ assert(!(pipeline->flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR));
+ gfx_pipeline = container_of(pipeline, struct vk_graphics_pipeline, base);
+
+ for (uint32_t i = 0; i < gfx_pipeline->stage_count; i++) {
+ struct vk_shader *shader = gfx_pipeline->stages[i].shader;
+ stage_shader[shader->stage] = shader;
+ }
+ }
+
+ uint32_t stage_count = 0;
+ gl_shader_stage stages[ARRAY_SIZE(all_gfx_stages)];
+ struct vk_shader *shaders[ARRAY_SIZE(all_gfx_stages)];
+
+ VkShaderStageFlags vk_stages = 0;
+ for (uint32_t i = 0; i < ARRAY_SIZE(all_gfx_stages); i++) {
+ gl_shader_stage stage = all_gfx_stages[i];
+ if (!vk_device_supports_stage(device, stage)) {
+ assert(stage_shader[stage] == NULL);
+ continue;
+ }
+
+ vk_stages |= mesa_to_vk_shader_stage(stage);
+
+ stages[stage_count] = stage;
+ shaders[stage_count] = stage_shader[stage];
+ stage_count++;
+ }
+ ops->cmd_bind_shaders(cmd_buffer, stage_count, stages, shaders);
+
+ if (gfx_pipeline != NULL) {
+ cmd_buffer->pipeline_shader_stages |= vk_stages;
+ ops->cmd_set_dynamic_graphics_state(cmd_buffer,
+ &gfx_pipeline->linked.dynamic);
+ } else {
+ cmd_buffer->pipeline_shader_stages &= ~vk_stages;
+ }
+}
+
+static VkShaderCreateFlagsEXT
+vk_pipeline_to_shader_flags(VkPipelineCreateFlags2KHR pipeline_flags,
+ gl_shader_stage stage)
+{
+ VkShaderCreateFlagsEXT shader_flags = 0;
+
+ if (pipeline_flags & VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)
+ shader_flags |= VK_SHADER_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_MESA;
+
+ if (stage == MESA_SHADER_FRAGMENT) {
+ if (pipeline_flags & VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)
+ shader_flags |= VK_SHADER_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_EXT;
+
+ if (pipeline_flags & VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT)
+ shader_flags |= VK_SHADER_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT;
+ }
+
+ if (stage == MESA_SHADER_COMPUTE) {
+ if (pipeline_flags & VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR)
+ shader_flags |= VK_SHADER_CREATE_DISPATCH_BASE_BIT_EXT;
+ }
+
+ return shader_flags;
+}
+
+static VkResult
+vk_graphics_pipeline_compile_shaders(struct vk_device *device,
+ struct vk_pipeline_cache *cache,
+ struct vk_graphics_pipeline *pipeline,
+ struct vk_pipeline_layout *pipeline_layout,
+ const struct vk_graphics_pipeline_state *state,
+ uint32_t stage_count,
+ struct vk_pipeline_stage *stages,
+ VkPipelineCreationFeedback *stage_feedbacks)
+{
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+ VkResult result;
+
+ if (stage_count == 0)
+ return VK_SUCCESS;
+
+ /* If we're linking, throw away any previously compiled shaders as they
+ * likely haven't been properly linked. We keep the precompiled shaders
+ * and we still look it up in the cache so it may still be fast.
+ */
+ if (pipeline->base.flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) {
+ for (uint32_t i = 0; i < stage_count; i++) {
+ if (stages[i].shader != NULL) {
+ vk_shader_unref(device, stages[i].shader);
+ stages[i].shader = NULL;
+ }
+ }
+ }
+
+ bool have_all_shaders = true;
+ VkShaderStageFlags all_stages = 0;
+ struct vk_pipeline_precomp_shader *tcs_precomp = NULL, *tes_precomp = NULL;
+ for (uint32_t i = 0; i < stage_count; i++) {
+ all_stages |= mesa_to_vk_shader_stage(stages[i].stage);
+
+ if (stages[i].shader == NULL)
+ have_all_shaders = false;
+
+ if (stages[i].stage == MESA_SHADER_TESS_CTRL)
+ tcs_precomp = stages[i].precomp;
+
+ if (stages[i].stage == MESA_SHADER_TESS_EVAL)
+ tes_precomp = stages[i].precomp;
+ }
+
+ /* If we already have a shader for each stage, there's nothing to do. */
+ if (have_all_shaders)
+ return VK_SUCCESS;
+
+ struct vk_pipeline_tess_info tess_info = { ._pad = 0 };
+ if (tcs_precomp != NULL && tes_precomp != NULL) {
+ tess_info = tcs_precomp->tess;
+ vk_pipeline_tess_info_merge(&tess_info, &tes_precomp->tess);
+ }
+
+ struct mesa_blake3 blake3_ctx;
+ _mesa_blake3_init(&blake3_ctx);
+ for (uint32_t i = 0; i < pipeline->set_layout_count; i++) {
+ if (pipeline->set_layouts[i] != NULL) {
+ _mesa_blake3_update(&blake3_ctx, pipeline->set_layouts[i]->blake3,
+ sizeof(pipeline->set_layouts[i]->blake3));
+ }
+ }
+ if (pipeline_layout != NULL) {
+ _mesa_blake3_update(&blake3_ctx, &pipeline_layout->push_ranges,
+ sizeof(pipeline_layout->push_ranges[0]) *
+ pipeline_layout->push_range_count);
+ }
+ blake3_hash layout_blake3;
+ _mesa_blake3_final(&blake3_ctx, layout_blake3);
+
+ /* Partition the shaders */
+ uint32_t part_count;
+ uint32_t partition[MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES + 1] = { 0 };
+ if (pipeline->base.flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) {
+ partition[1] = stage_count;
+ part_count = 1;
+ } else if (ops->link_geom_stages) {
+ if (stages[0].stage == MESA_SHADER_FRAGMENT) {
+ assert(stage_count == 1);
+ partition[1] = stage_count;
+ part_count = 1;
+ } else if (stages[stage_count - 1].stage == MESA_SHADER_FRAGMENT) {
+ /* In this case we have both */
+ assert(stage_count > 1);
+ partition[1] = stage_count - 1;
+ partition[2] = stage_count;
+ part_count = 2;
+ } else {
+ /* In this case we only have geometry */
+ partition[1] = stage_count;
+ part_count = 1;
+ }
+ } else {
+ /* Otherwise, we're don't want to link anything */
+ part_count = stage_count;
+ for (uint32_t i = 0; i < stage_count; i++)
+ partition[i + 1] = i + 1;
+ }
+
+ for (uint32_t p = 0; p < part_count; p++) {
+ const int64_t part_start = os_time_get_nano();
+
+ /* Don't try to re-compile any fast-link shaders */
+ if (!(pipeline->base.flags &
+ VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT)) {
+ assert(partition[p + 1] == partition[p] + 1);
+ if (stages[partition[p]].shader != NULL)
+ continue;
+ }
+
+ struct vk_shader_pipeline_cache_key shader_key = { 0 };
+
+ _mesa_blake3_init(&blake3_ctx);
+
+ VkShaderStageFlags part_stages = 0;
+ for (uint32_t i = partition[p]; i < partition[p + 1]; i++) {
+ const struct vk_pipeline_stage *stage = &stages[i];
+
+ part_stages |= mesa_to_vk_shader_stage(stage->stage);
+ _mesa_blake3_update(&blake3_ctx, stage->precomp->blake3,
+ sizeof(stage->precomp->blake3));
+
+ VkShaderCreateFlagsEXT shader_flags =
+ vk_pipeline_to_shader_flags(pipeline->base.flags, stage->stage);
+ _mesa_blake3_update(&blake3_ctx, &shader_flags, sizeof(shader_flags));
+ }
+
+ blake3_hash state_blake3;
+ ops->hash_graphics_state(device->physical, state,
+ part_stages, state_blake3);
+
+ _mesa_blake3_update(&blake3_ctx, state_blake3, sizeof(state_blake3));
+ _mesa_blake3_update(&blake3_ctx, layout_blake3, sizeof(layout_blake3));
+
+ if (part_stages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
+ _mesa_blake3_update(&blake3_ctx, &tess_info, sizeof(tess_info));
+
+ /* The set of geometry stages used together is used to generate the
+ * nextStage mask as well as VK_SHADER_CREATE_NO_TASK_SHADER_BIT_EXT.
+ */
+ const VkShaderStageFlags geom_stages =
+ all_stages & ~VK_SHADER_STAGE_FRAGMENT_BIT;
+ _mesa_blake3_update(&blake3_ctx, &geom_stages, sizeof(geom_stages));
+
+ _mesa_blake3_final(&blake3_ctx, shader_key.blake3);
+
+ if (cache != NULL) {
+ /* From the Vulkan 1.3.278 spec:
+ *
+ * "VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT
+ * indicates that a readily usable pipeline or pipeline stage was
+ * found in the pipelineCache specified by the application in the
+ * pipeline creation command.
+ *
+ * [...]
+ *
+ * Note
+ *
+ * Implementations are encouraged to provide a meaningful signal
+ * to applications using this bit. The intention is to communicate
+ * to the application that the pipeline or pipeline stage was
+ * created “as fast as it gets” using the pipeline cache provided
+ * by the application. If an implementation uses an internal
+ * cache, it is discouraged from setting this bit as the feedback
+ * would be unactionable."
+ *
+ * The cache_hit value returned by vk_pipeline_cache_lookup_object()
+ * is only set to true when the shader is found in the provided
+ * pipeline cache. It is left false if we fail to find it in the
+ * memory cache but find it in the disk cache even though that's
+ * still a cache hit from the perspective of the compile pipeline.
+ */
+ bool all_shaders_found = true;
+ bool all_cache_hits = true;
+ for (uint32_t i = partition[p]; i < partition[p + 1]; i++) {
+ struct vk_pipeline_stage *stage = &stages[i];
+
+ shader_key.stage = stage->stage;
+
+ if (stage->shader) {
+ /* If we have a shader from some library pipeline and the key
+ * matches, just use that.
+ */
+ if (memcmp(&stage->shader->pipeline.cache_key,
+ &shader_key, sizeof(shader_key)) == 0)
+ continue;
+
+ /* Otherwise, throw it away */
+ vk_shader_unref(device, stage->shader);
+ stage->shader = NULL;
+ }
+
+ bool cache_hit = false;
+ struct vk_pipeline_cache_object *cache_obj =
+ vk_pipeline_cache_lookup_object(cache, &shader_key,
+ sizeof(shader_key),
+ &pipeline_shader_cache_ops,
+ &cache_hit);
+ if (cache_obj != NULL) {
+ assert(stage->shader == NULL);
+ stage->shader = vk_shader_from_cache_obj(cache_obj);
+ } else {
+ all_shaders_found = false;
+ }
+
+ if (cache_obj == NULL && !cache_hit)
+ all_cache_hits = false;
+ }
+
+ if (all_cache_hits && cache != device->mem_cache) {
+ /* The pipeline cache only really helps if we hit for everything
+ * in the partition. Otherwise, we have to go re-compile it all
+ * anyway.
+ */
+ for (uint32_t i = partition[p]; i < partition[p + 1]; i++) {
+ struct vk_pipeline_stage *stage = &stages[i];
+
+ stage_feedbacks[stage->stage].flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
+ }
+ }
+
+ if (all_shaders_found) {
+ /* Update duration to take cache lookups into account */
+ const int64_t part_end = os_time_get_nano();
+ for (uint32_t i = partition[p]; i < partition[p + 1]; i++) {
+ struct vk_pipeline_stage *stage = &stages[i];
+ stage_feedbacks[stage->stage].duration += part_end - part_start;
+ }
+ continue;
+ }
+ }
+
+ if (pipeline->base.flags &
+ VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
+ return VK_PIPELINE_COMPILE_REQUIRED;
+
+ struct vk_shader_compile_info infos[MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES];
+ for (uint32_t i = partition[p]; i < partition[p + 1]; i++) {
+ struct vk_pipeline_stage *stage = &stages[i];
+
+ VkShaderCreateFlagsEXT shader_flags =
+ vk_pipeline_to_shader_flags(pipeline->base.flags, stage->stage);
+
+ if (partition[p + 1] - partition[p] > 1)
+ shader_flags |= VK_SHADER_CREATE_LINK_STAGE_BIT_EXT;
+
+ if ((part_stages & VK_SHADER_STAGE_MESH_BIT_EXT) &&
+ !(geom_stages & VK_SHADER_STAGE_TASK_BIT_EXT))
+ shader_flags = VK_SHADER_CREATE_NO_TASK_SHADER_BIT_EXT;
+
+ VkShaderStageFlags next_stage;
+ if (stage->stage == MESA_SHADER_FRAGMENT) {
+ next_stage = 0;
+ } else if (i + 1 < stage_count) {
+ /* We hash geom_stages above so this is safe */
+ next_stage = mesa_to_vk_shader_stage(stages[i + 1].stage);
+ } else {
+ /* We're the last geometry stage */
+ next_stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+
+ const struct nir_shader_compiler_options *nir_options =
+ ops->get_nir_options(device->physical, stage->stage,
+ &stage->precomp->rs);
+
+ nir_shader *nir =
+ vk_pipeline_precomp_shader_get_nir(stage->precomp, nir_options);
+ if (nir == NULL) {
+ for (uint32_t j = partition[p]; j < i; j++)
+ ralloc_free(infos[i].nir);
+
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ if (stage->stage == MESA_SHADER_TESS_CTRL ||
+ stage->stage == MESA_SHADER_TESS_EVAL)
+ vk_pipeline_replace_nir_tess_info(nir, &tess_info);
+
+ const VkPushConstantRange *push_range = NULL;
+ if (pipeline_layout != NULL) {
+ for (uint32_t r = 0; r < pipeline_layout->push_range_count; r++) {
+ if (pipeline_layout->push_ranges[r].stageFlags &
+ mesa_to_vk_shader_stage(stage->stage)) {
+ assert(push_range == NULL);
+ push_range = &pipeline_layout->push_ranges[r];
+ }
+ }
+ }
+
+ infos[i] = (struct vk_shader_compile_info) {
+ .stage = stage->stage,
+ .flags = shader_flags,
+ .next_stage_mask = next_stage,
+ .nir = nir,
+ .robustness = &stage->precomp->rs,
+ .set_layout_count = pipeline->set_layout_count,
+ .set_layouts = pipeline->set_layouts,
+ .push_constant_range_count = push_range != NULL,
+ .push_constant_ranges = push_range != NULL ? push_range : NULL,
+ };
+ }
+
+ /* vk_shader_ops::compile() consumes the NIR regardless of whether or
+ * not it succeeds and only generates shaders on success. Once this
+ * returns, we own the shaders but not the NIR in infos.
+ */
+ struct vk_shader *shaders[MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES];
+ result = ops->compile(device, partition[p + 1] - partition[p],
+ &infos[partition[p]],
+ state,
+ &device->alloc,
+ &shaders[partition[p]]);
+ if (result != VK_SUCCESS)
+ return result;
+
+ const int64_t part_end = os_time_get_nano();
+ for (uint32_t i = partition[p]; i < partition[p + 1]; i++) {
+ struct vk_pipeline_stage *stage = &stages[i];
+
+ shader_key.stage = stage->stage;
+ vk_shader_init_cache_obj(device, shaders[i], &shader_key,
+ sizeof(shader_key));
+
+ if (stage->shader == NULL) {
+ struct vk_pipeline_cache_object *cache_obj =
+ &shaders[i]->pipeline.cache_obj;
+ if (cache != NULL)
+ cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
+
+ stage->shader = vk_shader_from_cache_obj(cache_obj);
+ } else {
+ /* This can fail to happen if only some of the shaders were found
+ * in the pipeline cache. In this case, we just throw away the
+ * shader as vk_pipeline_cache_add_object() would throw it away
+ * for us anyway.
+ */
+ assert(memcmp(&stage->shader->pipeline.cache_key,
+ &shaders[i]->pipeline.cache_key,
+ sizeof(shaders[i]->pipeline.cache_key)) == 0);
+
+ vk_shader_unref(device, shaders[i]);
+ }
+
+ stage_feedbacks[stage->stage].duration += part_end - part_start;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_graphics_pipeline_get_executable_properties(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t *executable_count,
+ VkPipelineExecutablePropertiesKHR *properties)
+{
+ struct vk_graphics_pipeline *gfx_pipeline =
+ container_of(pipeline, struct vk_graphics_pipeline, base);
+ VkResult result;
+
+ if (properties == NULL) {
+ *executable_count = 0;
+ for (uint32_t i = 0; i < gfx_pipeline->stage_count; i++) {
+ struct vk_shader *shader = gfx_pipeline->stages[i].shader;
+
+ uint32_t shader_exec_count = 0;
+ result = shader->ops->get_executable_properties(device, shader,
+ &shader_exec_count,
+ NULL);
+ assert(result == VK_SUCCESS);
+ *executable_count += shader_exec_count;
+ }
+ } else {
+ uint32_t arr_len = *executable_count;
+ *executable_count = 0;
+ for (uint32_t i = 0; i < gfx_pipeline->stage_count; i++) {
+ struct vk_shader *shader = gfx_pipeline->stages[i].shader;
+
+ uint32_t shader_exec_count = arr_len - *executable_count;
+ result = shader->ops->get_executable_properties(device, shader,
+ &shader_exec_count,
+ &properties[*executable_count]);
+ if (result != VK_SUCCESS)
+ return result;
+
+ *executable_count += shader_exec_count;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static inline struct vk_shader *
+vk_graphics_pipeline_executable_shader(struct vk_device *device,
+ struct vk_graphics_pipeline *gfx_pipeline,
+ uint32_t *executable_index)
+{
+ for (uint32_t i = 0; i < gfx_pipeline->stage_count; i++) {
+ struct vk_shader *shader = gfx_pipeline->stages[i].shader;
+
+ uint32_t shader_exec_count = 0;
+ shader->ops->get_executable_properties(device, shader,
+ &shader_exec_count, NULL);
+
+ if (*executable_index < shader_exec_count)
+ return shader;
+ else
+ *executable_index -= shader_exec_count;
+ }
+
+ return NULL;
+}
+
+static VkResult
+vk_graphics_pipeline_get_executable_statistics(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t executable_index,
+ uint32_t *statistic_count,
+ VkPipelineExecutableStatisticKHR *statistics)
+{
+ struct vk_graphics_pipeline *gfx_pipeline =
+ container_of(pipeline, struct vk_graphics_pipeline, base);
+
+ struct vk_shader *shader =
+ vk_graphics_pipeline_executable_shader(device, gfx_pipeline,
+ &executable_index);
+ if (shader == NULL) {
+ *statistic_count = 0;
+ return VK_SUCCESS;
+ }
+
+ return shader->ops->get_executable_statistics(device, shader,
+ executable_index,
+ statistic_count,
+ statistics);
+}
+
+static VkResult
+vk_graphics_pipeline_get_internal_representations(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t executable_index,
+ uint32_t *internal_representation_count,
+ VkPipelineExecutableInternalRepresentationKHR* internal_representations)
+{
+ struct vk_graphics_pipeline *gfx_pipeline =
+ container_of(pipeline, struct vk_graphics_pipeline, base);
+
+ struct vk_shader *shader =
+ vk_graphics_pipeline_executable_shader(device, gfx_pipeline,
+ &executable_index);
+ if (shader == NULL) {
+ *internal_representation_count = 0;
+ return VK_SUCCESS;
+ }
+
+ return shader->ops->get_executable_internal_representations(
+ device, shader, executable_index,
+ internal_representation_count, internal_representations);
+}
+
+static const struct vk_pipeline_ops vk_graphics_pipeline_ops = {
+ .destroy = vk_graphics_pipeline_destroy,
+ .get_executable_statistics = vk_graphics_pipeline_get_executable_statistics,
+ .get_executable_properties = vk_graphics_pipeline_get_executable_properties,
+ .get_internal_representations = vk_graphics_pipeline_get_internal_representations,
+ .cmd_bind = vk_graphics_pipeline_cmd_bind,
+};
+
+static VkResult
+vk_create_graphics_pipeline(struct vk_device *device,
+ struct vk_pipeline_cache *cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipeline)
+{
+ VK_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ const int64_t pipeline_start = os_time_get_nano();
+ VkResult result;
+
+ const VkPipelineCreateFlags2KHR pipeline_flags =
+ vk_graphics_pipeline_create_flags(pCreateInfo);
+
+ const VkPipelineCreationFeedbackCreateInfo *feedback_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
+
+ const VkPipelineLibraryCreateInfoKHR *libs_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ PIPELINE_LIBRARY_CREATE_INFO_KHR);
+
+ struct vk_graphics_pipeline *pipeline =
+ vk_pipeline_zalloc(device, &vk_graphics_pipeline_ops,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline_flags, pAllocator, sizeof(*pipeline));
+ if (pipeline == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ struct vk_pipeline_stage stages[PIPE_SHADER_MESH_TYPES];
+ memset(stages, 0, sizeof(stages));
+
+ VkPipelineCreationFeedback stage_feedbacks[PIPE_SHADER_MESH_TYPES];
+ memset(stage_feedbacks, 0, sizeof(stage_feedbacks));
+
+ struct vk_graphics_pipeline_state state_tmp, *state;
+ struct vk_graphics_pipeline_all_state all_state_tmp, *all_state;
+ if (pipeline->base.flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR) {
+ /* For pipeline libraries, the state is stored in the pipeline */
+ state = &pipeline->lib.state;
+ all_state = &pipeline->lib.all_state;
+ } else {
+ /* For linked pipelines, we throw the state away at the end of pipeline
+ * creation and only keep the dynamic state.
+ */
+ memset(&state_tmp, 0, sizeof(state_tmp));
+ state = &state_tmp;
+ all_state = &all_state_tmp;
+ }
+
+ /* If we have libraries, import them first. */
+ if (libs_info) {
+ for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
+ VK_FROM_HANDLE(vk_pipeline, lib_pipeline, libs_info->pLibraries[i]);
+ assert(lib_pipeline->bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ assert(lib_pipeline->flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR);
+ struct vk_graphics_pipeline *lib_gfx_pipeline =
+ container_of(lib_pipeline, struct vk_graphics_pipeline, base);
+
+ vk_graphics_pipeline_state_merge(state, &lib_gfx_pipeline->lib.state);
+
+ pipeline->set_layout_count = MAX2(pipeline->set_layout_count,
+ lib_gfx_pipeline->set_layout_count);
+ for (uint32_t i = 0; i < lib_gfx_pipeline->set_layout_count; i++) {
+ if (lib_gfx_pipeline->set_layouts[i] == NULL)
+ continue;
+
+ if (pipeline->set_layouts[i] == NULL) {
+ pipeline->set_layouts[i] =
+ vk_descriptor_set_layout_ref(lib_gfx_pipeline->set_layouts[i]);
+ }
+ }
+
+ for (uint32_t i = 0; i < lib_gfx_pipeline->stage_count; i++) {
+ const struct vk_pipeline_stage *lib_stage =
+ &lib_gfx_pipeline->stages[i];
+
+ /* We shouldn't have duplicated stages in the imported pipeline
+ * but it's cheap enough to protect against it so we may as well.
+ */
+ assert(lib_stage->stage < ARRAY_SIZE(stages));
+ assert(vk_pipeline_stage_is_null(&stages[lib_stage->stage]));
+ if (!vk_pipeline_stage_is_null(&stages[lib_stage->stage]))
+ continue;
+
+ stages[lib_stage->stage] = vk_pipeline_stage_clone(lib_stage);
+ }
+ }
+ }
+
+ result = vk_graphics_pipeline_state_fill(device, state,
+ pCreateInfo,
+ NULL /* driver_rp */,
+ 0 /* driver_rp_flags */,
+ all_state,
+ NULL, 0, NULL);
+ if (result != VK_SUCCESS)
+ goto fail_stages;
+
+ if (!(pipeline->base.flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)) {
+ pipeline->linked.dynamic.vi = &pipeline->linked._dynamic_vi;
+ pipeline->linked.dynamic.ms.sample_locations =
+ &pipeline->linked._dynamic_sl;
+ vk_dynamic_graphics_state_fill(&pipeline->linked.dynamic, &state_tmp);
+ }
+
+ if (pipeline_layout != NULL) {
+ pipeline->set_layout_count = MAX2(pipeline->set_layout_count,
+ pipeline_layout->set_count);
+ for (uint32_t i = 0; i < pipeline_layout->set_count; i++) {
+ if (pipeline_layout->set_layouts[i] == NULL)
+ continue;
+
+ if (pipeline->set_layouts[i] == NULL) {
+ pipeline->set_layouts[i] =
+ vk_descriptor_set_layout_ref(pipeline_layout->set_layouts[i]);
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+ const VkPipelineShaderStageCreateInfo *stage_info =
+ &pCreateInfo->pStages[i];
+
+ const int64_t stage_start = os_time_get_nano();
+
+ assert(util_bitcount(stage_info->stage) == 1);
+ if (!(state->shader_stages & stage_info->stage))
+ continue;
+
+ gl_shader_stage stage = vk_to_mesa_shader_stage(stage_info->stage);
+ assert(vk_device_supports_stage(device, stage));
+
+ stage_feedbacks[stage].flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
+
+ if (!vk_pipeline_stage_is_null(&stages[stage]))
+ continue;
+
+ struct vk_pipeline_precomp_shader *precomp;
+ result = vk_pipeline_precompile_shader(device, cache, pipeline_flags,
+ pCreateInfo->pNext,
+ stage_info,
+ &precomp);
+ if (result != VK_SUCCESS)
+ goto fail_stages;
+
+ stages[stage] = (struct vk_pipeline_stage) {
+ .stage = stage,
+ .precomp = precomp,
+ };
+
+ const int64_t stage_end = os_time_get_nano();
+ stage_feedbacks[stage].duration += stage_end - stage_start;
+ }
+
+ /* Compact the array of stages */
+ uint32_t stage_count = 0;
+ for (uint32_t s = 0; s < ARRAY_SIZE(stages); s++) {
+ assert(s >= stage_count);
+ if (!vk_pipeline_stage_is_null(&stages[s]))
+ stages[stage_count++] = stages[s];
+ }
+ for (uint32_t s = stage_count; s < ARRAY_SIZE(stages); s++)
+ memset(&stages[s], 0, sizeof(stages[s]));
+
+ /* Sort so we always give the driver shaders in order.
+ *
+ * This makes everything easier for everyone. This also helps stabilize
+ * shader keys so that we get a cache hit even if the client gives us
+ * the stages in a different order.
+ */
+ qsort(stages, stage_count, sizeof(*stages), cmp_vk_pipeline_stages);
+
+ result = vk_graphics_pipeline_compile_shaders(device, cache, pipeline,
+ pipeline_layout, state,
+ stage_count, stages,
+ stage_feedbacks);
+ if (result != VK_SUCCESS)
+ goto fail_stages;
+
+ /* Throw away precompiled shaders unless the client explicitly asks us to
+ * keep them.
+ */
+ if (!(pipeline_flags &
+ VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT)) {
+ for (uint32_t i = 0; i < stage_count; i++) {
+ if (stages[i].precomp != NULL) {
+ vk_pipeline_precomp_shader_unref(device, stages[i].precomp);
+ stages[i].precomp = NULL;
+ }
+ }
+ }
+
+ pipeline->stage_count = stage_count;
+ for (uint32_t i = 0; i < stage_count; i++)
+ pipeline->stages[i] = stages[i];
+
+ const int64_t pipeline_end = os_time_get_nano();
+ if (feedback_info != NULL) {
+ VkPipelineCreationFeedback pipeline_feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
+ .duration = pipeline_end - pipeline_start,
+ };
+
+ /* From the Vulkan 1.3.275 spec:
+ *
+ * "An implementation should set the
+ * VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT
+ * bit if it was able to avoid the large majority of pipeline or
+ * pipeline stage creation work by using the pipelineCache parameter"
+ *
+ * We really shouldn't set this bit unless all the shaders hit the
+ * cache.
+ */
+ uint32_t cache_hit_count = 0;
+ for (uint32_t i = 0; i < stage_count; i++) {
+ const gl_shader_stage stage = stages[i].stage;
+ if (stage_feedbacks[stage].flags &
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT)
+ cache_hit_count++;
+ }
+ if (cache_hit_count > 0 && cache_hit_count == stage_count) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
+ }
+
+ *feedback_info->pPipelineCreationFeedback = pipeline_feedback;
+
+ /* VUID-VkGraphicsPipelineCreateInfo-pipelineStageCreationFeedbackCount-06594 */
+ assert(feedback_info->pipelineStageCreationFeedbackCount == 0 ||
+ feedback_info->pipelineStageCreationFeedbackCount ==
+ pCreateInfo->stageCount);
+ for (uint32_t i = 0;
+ i < feedback_info->pipelineStageCreationFeedbackCount; i++) {
+ const gl_shader_stage stage =
+ vk_to_mesa_shader_stage(pCreateInfo->pStages[i].stage);
+
+ feedback_info->pPipelineStageCreationFeedbacks[i] =
+ stage_feedbacks[stage];
+ }
+ }
+
+ *pPipeline = vk_pipeline_to_handle(&pipeline->base);
+
+ return VK_SUCCESS;
+
+fail_stages:
+ for (uint32_t i = 0; i < ARRAY_SIZE(stages); i++)
+ vk_pipeline_stage_finish(device, &stages[i]);
+
+ vk_graphics_pipeline_destroy(device, &pipeline->base, pAllocator);
+
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateGraphicsPipelines(VkDevice _device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
+ VkResult first_error_or_success = VK_SUCCESS;
+
+ /* Use implicit pipeline cache if there's no cache set */
+ if (!cache && device->mem_cache)
+ cache = device->mem_cache;
+
+ /* From the Vulkan 1.3.274 spec:
+ *
+ * "When attempting to create many pipelines in a single command, it is
+ * possible that creation may fail for a subset of them. In this case,
+ * the corresponding elements of pPipelines will be set to
+ * VK_NULL_HANDLE.
+ */
+ memset(pPipelines, 0, createInfoCount * sizeof(*pPipelines));
+
+ unsigned i = 0;
+ for (; i < createInfoCount; i++) {
+ VkResult result = vk_create_graphics_pipeline(device, cache,
+ &pCreateInfos[i],
+ pAllocator,
+ &pPipelines[i]);
+ if (result == VK_SUCCESS)
+ continue;
+
+ if (first_error_or_success == VK_SUCCESS)
+ first_error_or_success = result;
+
+ /* Bail out on the first error != VK_PIPELINE_COMPILE_REQUIRED as it
+ * is not obvious what error should be report upon 2 different failures.
+ */
+ if (result != VK_PIPELINE_COMPILE_REQUIRED)
+ return result;
+
+ const VkPipelineCreateFlags2KHR flags =
+ vk_graphics_pipeline_create_flags(&pCreateInfos[i]);
+ if (flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
+ return result;
+ }
+
+ return first_error_or_success;
+}
+
+struct vk_compute_pipeline {
+ struct vk_pipeline base;
+ struct vk_shader *shader;
+};
+
+static void
+vk_compute_pipeline_destroy(struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ const VkAllocationCallbacks *pAllocator)
+{
+ struct vk_compute_pipeline *comp_pipeline =
+ container_of(pipeline, struct vk_compute_pipeline, base);
+
+ vk_shader_unref(device, comp_pipeline->shader);
+ vk_pipeline_free(device, pAllocator, pipeline);
+}
+
+static void
+vk_compute_pipeline_cmd_bind(struct vk_command_buffer *cmd_buffer,
+ struct vk_pipeline *pipeline)
+{
+ struct vk_device *device = cmd_buffer->base.device;
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+
+ struct vk_shader *shader = NULL;
+ if (pipeline != NULL) {
+ assert(pipeline->bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
+ struct vk_compute_pipeline *comp_pipeline =
+ container_of(pipeline, struct vk_compute_pipeline, base);
+
+ shader = comp_pipeline->shader;
+
+ cmd_buffer->pipeline_shader_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
+ } else {
+ cmd_buffer->pipeline_shader_stages &= ~VK_SHADER_STAGE_COMPUTE_BIT;
+ }
+
+ gl_shader_stage stage = MESA_SHADER_COMPUTE;
+ ops->cmd_bind_shaders(cmd_buffer, 1, &stage, &shader);
+}
+
+static VkResult
+vk_pipeline_compile_compute_stage(struct vk_device *device,
+ struct vk_pipeline_cache *cache,
+ struct vk_compute_pipeline *pipeline,
+ struct vk_pipeline_layout *pipeline_layout,
+ struct vk_pipeline_stage *stage,
+ bool *cache_hit)
+{
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+ VkResult result;
+
+ const VkPushConstantRange *push_range = NULL;
+ if (pipeline_layout != NULL) {
+ for (uint32_t r = 0; r < pipeline_layout->push_range_count; r++) {
+ if (pipeline_layout->push_ranges[r].stageFlags &
+ VK_SHADER_STAGE_COMPUTE_BIT) {
+ assert(push_range == NULL);
+ push_range = &pipeline_layout->push_ranges[r];
+ }
+ }
+ }
+
+ VkShaderCreateFlagsEXT shader_flags =
+ vk_pipeline_to_shader_flags(pipeline->base.flags, MESA_SHADER_COMPUTE);
+
+ struct mesa_blake3 blake3_ctx;
+ _mesa_blake3_init(&blake3_ctx);
+
+ _mesa_blake3_update(&blake3_ctx, stage->precomp->blake3,
+ sizeof(stage->precomp->blake3));
+
+ _mesa_blake3_update(&blake3_ctx, &shader_flags, sizeof(shader_flags));
+
+ for (uint32_t i = 0; i < pipeline_layout->set_count; i++) {
+ if (pipeline_layout->set_layouts[i] != NULL) {
+ _mesa_blake3_update(&blake3_ctx,
+ pipeline_layout->set_layouts[i]->blake3,
+ sizeof(pipeline_layout->set_layouts[i]->blake3));
+ }
+ }
+ if (push_range != NULL)
+ _mesa_blake3_update(&blake3_ctx, push_range, sizeof(*push_range));
+
+ struct vk_shader_pipeline_cache_key shader_key = {
+ .stage = MESA_SHADER_COMPUTE,
+ };
+ _mesa_blake3_final(&blake3_ctx, shader_key.blake3);
+
+ if (cache != NULL) {
+ struct vk_pipeline_cache_object *cache_obj =
+ vk_pipeline_cache_lookup_object(cache, &shader_key,
+ sizeof(shader_key),
+ &pipeline_shader_cache_ops,
+ cache_hit);
+ if (cache_obj != NULL) {
+ stage->shader = vk_shader_from_cache_obj(cache_obj);
+ return VK_SUCCESS;
+ }
+ }
+
+ if (pipeline->base.flags &
+ VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
+ return VK_PIPELINE_COMPILE_REQUIRED;
+
+ const struct nir_shader_compiler_options *nir_options =
+ ops->get_nir_options(device->physical, stage->stage,
+ &stage->precomp->rs);
+
+ nir_shader *nir = vk_pipeline_precomp_shader_get_nir(stage->precomp,
+ nir_options);
+ if (nir == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ /* vk_device_shader_ops::compile() consumes the NIR regardless of whether
+ * or not it succeeds and only generates shaders on success. Once compile()
+ * returns, we own the shaders but not the NIR in infos.
+ */
+ struct vk_shader_compile_info compile_info = {
+ .stage = stage->stage,
+ .flags = shader_flags,
+ .next_stage_mask = 0,
+ .nir = nir,
+ .robustness = &stage->precomp->rs,
+ .set_layout_count = pipeline_layout->set_count,
+ .set_layouts = pipeline_layout->set_layouts,
+ .push_constant_range_count = push_range != NULL,
+ .push_constant_ranges = push_range != NULL ? push_range : NULL,
+ };
+
+ struct vk_shader *shader;
+ result = ops->compile(device, 1, &compile_info, NULL,
+ &device->alloc, &shader);
+ if (result != VK_SUCCESS)
+ return result;
+
+ vk_shader_init_cache_obj(device, shader, &shader_key, sizeof(shader_key));
+
+ struct vk_pipeline_cache_object *cache_obj = &shader->pipeline.cache_obj;
+ if (cache != NULL)
+ cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
+
+ stage->shader = vk_shader_from_cache_obj(cache_obj);
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_compute_pipeline_get_executable_properties(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t *executable_count,
+ VkPipelineExecutablePropertiesKHR *properties)
+{
+ struct vk_compute_pipeline *comp_pipeline =
+ container_of(pipeline, struct vk_compute_pipeline, base);
+ struct vk_shader *shader = comp_pipeline->shader;
+
+ return shader->ops->get_executable_properties(device, shader,
+ executable_count,
+ properties);
+}
+
+static VkResult
+vk_compute_pipeline_get_executable_statistics(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t executable_index,
+ uint32_t *statistic_count,
+ VkPipelineExecutableStatisticKHR *statistics)
+{
+ struct vk_compute_pipeline *comp_pipeline =
+ container_of(pipeline, struct vk_compute_pipeline, base);
+ struct vk_shader *shader = comp_pipeline->shader;
+
+ return shader->ops->get_executable_statistics(device, shader,
+ executable_index,
+ statistic_count,
+ statistics);
+}
+
+static VkResult
+vk_compute_pipeline_get_internal_representations(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t executable_index,
+ uint32_t *internal_representation_count,
+ VkPipelineExecutableInternalRepresentationKHR* internal_representations)
+{
+ struct vk_compute_pipeline *comp_pipeline =
+ container_of(pipeline, struct vk_compute_pipeline, base);
+ struct vk_shader *shader = comp_pipeline->shader;
+
+ return shader->ops->get_executable_internal_representations(
+ device, shader, executable_index,
+ internal_representation_count, internal_representations);
+}
+
+static const struct vk_pipeline_ops vk_compute_pipeline_ops = {
+ .destroy = vk_compute_pipeline_destroy,
+ .get_executable_statistics = vk_compute_pipeline_get_executable_statistics,
+ .get_executable_properties = vk_compute_pipeline_get_executable_properties,
+ .get_internal_representations = vk_compute_pipeline_get_internal_representations,
+ .cmd_bind = vk_compute_pipeline_cmd_bind,
+};
+
+static VkResult
+vk_create_compute_pipeline(struct vk_device *device,
+ struct vk_pipeline_cache *cache,
+ const VkComputePipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipeline)
+{
+ VK_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ int64_t pipeline_start = os_time_get_nano();
+ VkResult result;
+
+ const VkPipelineCreateFlags2KHR pipeline_flags =
+ vk_compute_pipeline_create_flags(pCreateInfo);
+
+ const VkPipelineCreationFeedbackCreateInfo *feedback_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
+
+ struct vk_compute_pipeline *pipeline =
+ vk_pipeline_zalloc(device, &vk_compute_pipeline_ops,
+ VK_PIPELINE_BIND_POINT_COMPUTE,
+ pipeline_flags, pAllocator, sizeof(*pipeline));
+ if (pipeline == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ struct vk_pipeline_stage stage = {
+ .stage = MESA_SHADER_COMPUTE,
+ };
+ result = vk_pipeline_precompile_shader(device, cache, pipeline_flags,
+ pCreateInfo->pNext,
+ &pCreateInfo->stage,
+ &stage.precomp);
+ if (result != VK_SUCCESS)
+ goto fail_pipeline;
+
+ bool cache_hit;
+ result = vk_pipeline_compile_compute_stage(device, cache, pipeline,
+ pipeline_layout, &stage,
+ &cache_hit);
+ if (result != VK_SUCCESS)
+ goto fail_stage;
+
+ if (stage.precomp != NULL)
+ vk_pipeline_precomp_shader_unref(device, stage.precomp);
+ pipeline->shader = stage.shader;
+
+ const int64_t pipeline_end = os_time_get_nano();
+ if (feedback_info != NULL) {
+ VkPipelineCreationFeedback pipeline_feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
+ .duration = pipeline_end - pipeline_start,
+ };
+ if (cache_hit && cache != device->mem_cache) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
+ }
+
+ *feedback_info->pPipelineCreationFeedback = pipeline_feedback;
+ if (feedback_info->pipelineStageCreationFeedbackCount > 0) {
+ feedback_info->pPipelineStageCreationFeedbacks[0] =
+ pipeline_feedback;
+ }
+ }
+
+ *pPipeline = vk_pipeline_to_handle(&pipeline->base);
+
+ return VK_SUCCESS;
+
+fail_stage:
+ vk_pipeline_stage_finish(device, &stage);
+fail_pipeline:
+ vk_pipeline_free(device, pAllocator, &pipeline->base);
+
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateComputePipelines(VkDevice _device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
+ VkResult first_error_or_success = VK_SUCCESS;
+
+ /* Use implicit pipeline cache if there's no cache set */
+ if (!cache && device->mem_cache)
+ cache = device->mem_cache;
+
+ /* From the Vulkan 1.3.274 spec:
+ *
+ * "When attempting to create many pipelines in a single command, it is
+ * possible that creation may fail for a subset of them. In this case,
+ * the corresponding elements of pPipelines will be set to
+ * VK_NULL_HANDLE.
+ */
+ memset(pPipelines, 0, createInfoCount * sizeof(*pPipelines));
+
+ unsigned i = 0;
+ for (; i < createInfoCount; i++) {
+ VkResult result = vk_create_compute_pipeline(device, cache,
+ &pCreateInfos[i],
+ pAllocator,
+ &pPipelines[i]);
+ if (result == VK_SUCCESS)
+ continue;
+
+ if (first_error_or_success == VK_SUCCESS)
+ first_error_or_success = result;
+
+ /* Bail out on the first error != VK_PIPELINE_COMPILE_REQUIRED as it
+ * is not obvious what error should be report upon 2 different failures.
+ */
+ if (result != VK_PIPELINE_COMPILE_REQUIRED)
+ return result;
+
+ const VkPipelineCreateFlags2KHR flags =
+ vk_compute_pipeline_create_flags(&pCreateInfos[i]);
+ if (flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
+ return result;
+ }
+
+ return first_error_or_success;
+}
+
+void
+vk_cmd_unbind_pipelines_for_stages(struct vk_command_buffer *cmd_buffer,
+ VkShaderStageFlags stages)
+{
+ stages &= cmd_buffer->pipeline_shader_stages;
+
+ if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT)
+ vk_graphics_pipeline_cmd_bind(cmd_buffer, NULL);
+
+ if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
+ vk_compute_pipeline_cmd_bind(cmd_buffer, NULL);
+}
diff --git a/src/vulkan/runtime/vk_pipeline.h b/src/vulkan/runtime/vk_pipeline.h
new file mode 100644
index 00000000000..ed05d567a8f
--- /dev/null
+++ b/src/vulkan/runtime/vk_pipeline.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_PIPELINE_H
+#define VK_PIPELINE_H
+
+#include "vk_object.h"
+#include "vk_util.h"
+
+#include <stdbool.h>
+
+struct nir_shader;
+struct nir_shader_compiler_options;
+struct spirv_to_nir_options;
+struct vk_command_buffer;
+struct vk_device;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA \
+ (VkStructureType)1000290001
+
+#define VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA_cast \
+ VkPipelineShaderStageNirCreateInfoMESA
+
+typedef struct VkPipelineShaderStageNirCreateInfoMESA {
+ VkStructureType sType;
+ const void *pNext;
+ struct nir_shader *nir;
+} VkPipelineShaderStageNirCreateInfoMESA;
+
+bool
+vk_pipeline_shader_stage_is_null(const VkPipelineShaderStageCreateInfo *info);
+
+VkResult
+vk_pipeline_shader_stage_to_nir(struct vk_device *device,
+ const VkPipelineShaderStageCreateInfo *info,
+ const struct spirv_to_nir_options *spirv_options,
+ const struct nir_shader_compiler_options *nir_options,
+ void *mem_ctx, struct nir_shader **nir_out);
+
+enum gl_subgroup_size
+vk_get_subgroup_size(uint32_t spirv_version,
+ gl_shader_stage stage,
+ const void *info_pNext,
+ bool allow_varying,
+ bool require_full);
+
+struct vk_pipeline_robustness_state {
+ VkPipelineRobustnessBufferBehaviorEXT storage_buffers;
+ VkPipelineRobustnessBufferBehaviorEXT uniform_buffers;
+ VkPipelineRobustnessBufferBehaviorEXT vertex_inputs;
+ VkPipelineRobustnessImageBehaviorEXT images;
+};
+
+/** Hash VkPipelineShaderStageCreateInfo info
+ *
+ * Returns the hash of a VkPipelineShaderStageCreateInfo:
+ * SHA1(info->module->sha1,
+ * info->pName,
+ * vk_stage_to_mesa_stage(info->stage),
+ * info->pSpecializationInfo)
+ *
+ * Can only be used if VkPipelineShaderStageCreateInfo::module is a
+ * vk_shader_module object.
+ */
+void
+vk_pipeline_hash_shader_stage(const VkPipelineShaderStageCreateInfo *info,
+ const struct vk_pipeline_robustness_state *rstate,
+ unsigned char *stage_sha1);
+
+void
+vk_pipeline_robustness_state_fill(const struct vk_device *device,
+ struct vk_pipeline_robustness_state *rs,
+ const void *pipeline_pNext,
+ const void *shader_stage_pNext);
+
+static inline VkPipelineCreateFlags2KHR
+vk_compute_pipeline_create_flags(const VkComputePipelineCreateInfo *info)
+{
+ const VkPipelineCreateFlags2CreateInfoKHR *flags2 =
+ vk_find_struct_const(info->pNext,
+ PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR);
+ if (flags2)
+ return flags2->flags;
+ else
+ return info->flags;
+}
+
+static inline VkPipelineCreateFlags2KHR
+vk_graphics_pipeline_create_flags(const VkGraphicsPipelineCreateInfo *info)
+{
+ const VkPipelineCreateFlags2CreateInfoKHR *flags2 =
+ vk_find_struct_const(info->pNext,
+ PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR);
+ if (flags2)
+ return flags2->flags;
+ else
+ return info->flags;
+}
+
+static inline VkPipelineCreateFlags2KHR
+vk_rt_pipeline_create_flags(const VkRayTracingPipelineCreateInfoKHR *info)
+{
+ const VkPipelineCreateFlags2CreateInfoKHR *flags2 =
+ vk_find_struct_const(info->pNext,
+ PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR);
+ if (flags2)
+ return flags2->flags;
+ else
+ return info->flags;
+}
+
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static inline VkPipelineCreateFlags2KHR
+vk_graph_pipeline_create_flags(const VkExecutionGraphPipelineCreateInfoAMDX *info)
+{
+ const VkPipelineCreateFlags2CreateInfoKHR *flags2 =
+ vk_find_struct_const(info->pNext,
+ PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR);
+ if (flags2)
+ return flags2->flags;
+ else
+ return info->flags;
+}
+#endif
+
+struct vk_pipeline_ops;
+
+struct vk_pipeline {
+ struct vk_object_base base;
+
+ const struct vk_pipeline_ops *ops;
+
+ VkPipelineBindPoint bind_point;
+ VkPipelineCreateFlags2KHR flags;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_pipeline, base, VkPipeline,
+ VK_OBJECT_TYPE_PIPELINE);
+
+struct vk_pipeline_ops {
+ void (*destroy)(struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ const VkAllocationCallbacks *pAllocator);
+
+ VkResult (*get_executable_properties)(struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t *executable_count,
+ VkPipelineExecutablePropertiesKHR *properties);
+
+ VkResult (*get_executable_statistics)(struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t executable_index,
+ uint32_t *statistic_count,
+ VkPipelineExecutableStatisticKHR *statistics);
+
+ VkResult (*get_internal_representations)(
+ struct vk_device *device,
+ struct vk_pipeline *pipeline,
+ uint32_t executable_index,
+ uint32_t *internal_representation_count,
+ VkPipelineExecutableInternalRepresentationKHR* internal_representations);
+
+ void (*cmd_bind)(struct vk_command_buffer *cmd_buffer,
+ struct vk_pipeline *pipeline);
+};
+
+void *vk_pipeline_zalloc(struct vk_device *device,
+ const struct vk_pipeline_ops *ops,
+ VkPipelineBindPoint bind_point,
+ VkPipelineCreateFlags2KHR flags,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+
+void vk_pipeline_free(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_pipeline *pipeline);
+
+void
+vk_cmd_unbind_pipelines_for_stages(struct vk_command_buffer *cmd_buffer,
+ VkShaderStageFlags stages);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_PIPELINE_H */
diff --git a/src/vulkan/runtime/vk_pipeline_cache.c b/src/vulkan/runtime/vk_pipeline_cache.c
new file mode 100644
index 00000000000..71471dd0239
--- /dev/null
+++ b/src/vulkan/runtime/vk_pipeline_cache.c
@@ -0,0 +1,852 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_pipeline_cache.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+
+#include "compiler/nir/nir_serialize.h"
+
+#include "util/blob.h"
+#include "util/u_debug.h"
+#include "util/disk_cache.h"
+#include "util/hash_table.h"
+#include "util/set.h"
+
+#define vk_pipeline_cache_log(cache, ...) \
+ if (cache->base.client_visible) \
+ vk_logw(VK_LOG_OBJS(cache), __VA_ARGS__)
+
+static bool
+vk_raw_data_cache_object_serialize(struct vk_pipeline_cache_object *object,
+ struct blob *blob)
+{
+ struct vk_raw_data_cache_object *data_obj =
+ container_of(object, struct vk_raw_data_cache_object, base);
+
+ blob_write_bytes(blob, data_obj->data, data_obj->data_size);
+
+ return true;
+}
+
+static struct vk_pipeline_cache_object *
+vk_raw_data_cache_object_deserialize(struct vk_pipeline_cache *cache,
+ const void *key_data,
+ size_t key_size,
+ struct blob_reader *blob)
+{
+ /* We consume the entire blob_reader. Each call to ops->deserialize()
+ * happens with a brand new blob reader for error checking anyway so we
+ * can assume the blob consumes the entire reader and we don't need to
+ * serialize the data size separately.
+ */
+ assert(blob->current < blob->end);
+ size_t data_size = blob->end - blob->current;
+ const void *data = blob_read_bytes(blob, data_size);
+
+ struct vk_raw_data_cache_object *data_obj =
+ vk_raw_data_cache_object_create(cache->base.device, key_data, key_size,
+ data, data_size);
+
+ return data_obj ? &data_obj->base : NULL;
+}
+
+static void
+vk_raw_data_cache_object_destroy(struct vk_device *device,
+ struct vk_pipeline_cache_object *object)
+{
+ struct vk_raw_data_cache_object *data_obj =
+ container_of(object, struct vk_raw_data_cache_object, base);
+
+ vk_free(&device->alloc, data_obj);
+}
+
+const struct vk_pipeline_cache_object_ops vk_raw_data_cache_object_ops = {
+ .serialize = vk_raw_data_cache_object_serialize,
+ .deserialize = vk_raw_data_cache_object_deserialize,
+ .destroy = vk_raw_data_cache_object_destroy,
+};
+
+struct vk_raw_data_cache_object *
+vk_raw_data_cache_object_create(struct vk_device *device,
+ const void *key_data, size_t key_size,
+ const void *data, size_t data_size)
+{
+ VK_MULTIALLOC(ma);
+ VK_MULTIALLOC_DECL(&ma, struct vk_raw_data_cache_object, data_obj, 1);
+ VK_MULTIALLOC_DECL_SIZE(&ma, char, obj_key_data, key_size);
+ VK_MULTIALLOC_DECL_SIZE(&ma, char, obj_data, data_size);
+
+ if (!vk_multialloc_alloc(&ma, &device->alloc,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
+ return NULL;
+
+ vk_pipeline_cache_object_init(device, &data_obj->base,
+ &vk_raw_data_cache_object_ops,
+ obj_key_data, key_size);
+ data_obj->data = obj_data;
+ data_obj->data_size = data_size;
+
+ memcpy(obj_key_data, key_data, key_size);
+ memcpy(obj_data, data, data_size);
+
+ return data_obj;
+}
+
+static bool
+object_keys_equal(const void *void_a, const void *void_b)
+{
+ const struct vk_pipeline_cache_object *a = void_a, *b = void_b;
+ if (a->key_size != b->key_size)
+ return false;
+
+ return memcmp(a->key_data, b->key_data, a->key_size) == 0;
+}
+
+static uint32_t
+object_key_hash(const void *void_object)
+{
+ const struct vk_pipeline_cache_object *object = void_object;
+ return _mesa_hash_data(object->key_data, object->key_size);
+}
+
+static void
+vk_pipeline_cache_lock(struct vk_pipeline_cache *cache)
+{
+
+ if (!(cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT))
+ simple_mtx_lock(&cache->lock);
+}
+
+static void
+vk_pipeline_cache_unlock(struct vk_pipeline_cache *cache)
+{
+ if (!(cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT))
+ simple_mtx_unlock(&cache->lock);
+}
+
+/* cache->lock must be held when calling */
+static void
+vk_pipeline_cache_remove_object(struct vk_pipeline_cache *cache,
+ uint32_t hash,
+ struct vk_pipeline_cache_object *object)
+{
+ struct set_entry *entry =
+ _mesa_set_search_pre_hashed(cache->object_cache, hash, object);
+ if (entry && entry->key == (const void *)object) {
+ /* Drop the reference owned by the cache */
+ if (!cache->weak_ref)
+ vk_pipeline_cache_object_unref(cache->base.device, object);
+
+ _mesa_set_remove(cache->object_cache, entry);
+ }
+}
+
+static inline struct vk_pipeline_cache_object *
+vk_pipeline_cache_object_weak_ref(struct vk_pipeline_cache *cache,
+ struct vk_pipeline_cache_object *object)
+{
+ assert(!object->weak_owner);
+ p_atomic_set(&object->weak_owner, cache);
+ return object;
+}
+
+void
+vk_pipeline_cache_object_unref(struct vk_device *device, struct vk_pipeline_cache_object *object)
+{
+ assert(object && p_atomic_read(&object->ref_cnt) >= 1);
+
+ struct vk_pipeline_cache *weak_owner = p_atomic_read(&object->weak_owner);
+ if (!weak_owner) {
+ if (p_atomic_dec_zero(&object->ref_cnt))
+ object->ops->destroy(device, object);
+ } else {
+ vk_pipeline_cache_lock(weak_owner);
+ bool destroy = p_atomic_dec_zero(&object->ref_cnt);
+ if (destroy) {
+ uint32_t hash = object_key_hash(object);
+ vk_pipeline_cache_remove_object(weak_owner, hash, object);
+ }
+ vk_pipeline_cache_unlock(weak_owner);
+ if (destroy)
+ object->ops->destroy(device, object);
+ }
+}
+
+static bool
+vk_pipeline_cache_object_serialize(struct vk_pipeline_cache *cache,
+ struct vk_pipeline_cache_object *object,
+ struct blob *blob, uint32_t *data_size)
+{
+ if (object->ops->serialize == NULL)
+ return false;
+
+ assert(blob->size == align64(blob->size, VK_PIPELINE_CACHE_BLOB_ALIGN));
+ size_t start = blob->size;
+
+ /* Special case for if we're writing to a NULL blob (just to get the size)
+ * and we already know the data size of the allocation. This should make
+ * the first GetPipelineCacheData() call to get the data size faster in the
+ * common case where a bunch of our objects were loaded from a previous
+ * cache or where we've already serialized the cache once.
+ */
+ if (blob->data == NULL && blob->fixed_allocation) {
+ *data_size = p_atomic_read(&object->data_size);
+ if (*data_size > 0) {
+ blob_write_bytes(blob, NULL, *data_size);
+ return true;
+ }
+ }
+
+ if (!object->ops->serialize(object, blob)) {
+ vk_pipeline_cache_log(cache, "Failed to serialize pipeline cache object");
+ return false;
+ }
+
+ size_t size = blob->size - start;
+ if (size > UINT32_MAX) {
+ vk_pipeline_cache_log(cache, "Skipping giant (4 GiB or larger) object");
+ return false;
+ }
+
+ if (blob->out_of_memory) {
+ vk_pipeline_cache_log(cache,
+ "Insufficient memory for pipeline cache data");
+ return false;
+ }
+
+ *data_size = (uint32_t)size;
+ p_atomic_set(&object->data_size, *data_size);
+
+ return true;
+}
+
+static struct vk_pipeline_cache_object *
+vk_pipeline_cache_object_deserialize(struct vk_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size,
+ const void *data, size_t data_size,
+ const struct vk_pipeline_cache_object_ops *ops)
+{
+ if (ops == NULL)
+ ops = &vk_raw_data_cache_object_ops;
+
+ if (unlikely(ops->deserialize == NULL)) {
+ vk_pipeline_cache_log(cache,
+ "Pipeline cache object cannot be deserialized");
+ return NULL;
+ }
+
+ struct blob_reader reader;
+ blob_reader_init(&reader, data, data_size);
+
+ struct vk_pipeline_cache_object *object =
+ ops->deserialize(cache, key_data, key_size, &reader);
+
+ if (object == NULL)
+ return NULL;
+
+ assert(reader.current == reader.end && !reader.overrun);
+ assert(object->ops == ops);
+ assert(object->ref_cnt == 1);
+ assert(object->key_size == key_size);
+ assert(memcmp(object->key_data, key_data, key_size) == 0);
+
+ return object;
+}
+
+static struct vk_pipeline_cache_object *
+vk_pipeline_cache_insert_object(struct vk_pipeline_cache *cache,
+ struct vk_pipeline_cache_object *object)
+{
+ assert(object->ops != NULL);
+
+ if (cache->object_cache == NULL)
+ return object;
+
+ uint32_t hash = object_key_hash(object);
+
+ vk_pipeline_cache_lock(cache);
+ bool found = false;
+ struct set_entry *entry = _mesa_set_search_or_add_pre_hashed(
+ cache->object_cache, hash, object, &found);
+
+ struct vk_pipeline_cache_object *result = NULL;
+ /* add reference to either the found or inserted object */
+ if (found) {
+ struct vk_pipeline_cache_object *found_object = (void *)entry->key;
+ if (found_object->ops != object->ops) {
+ /* The found object in the cache isn't fully formed. Replace it. */
+ assert(!cache->weak_ref);
+ assert(found_object->ops == &vk_raw_data_cache_object_ops);
+ assert(object->ref_cnt == 1);
+ entry->key = object;
+ object = found_object;
+ }
+
+ result = vk_pipeline_cache_object_ref((void *)entry->key);
+ } else {
+ result = object;
+ if (!cache->weak_ref)
+ vk_pipeline_cache_object_ref(result);
+ else
+ vk_pipeline_cache_object_weak_ref(cache, result);
+ }
+ vk_pipeline_cache_unlock(cache);
+
+ if (found) {
+ vk_pipeline_cache_object_unref(cache->base.device, object);
+ }
+ return result;
+}
+
+struct vk_pipeline_cache_object *
+vk_pipeline_cache_lookup_object(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ const struct vk_pipeline_cache_object_ops *ops,
+ bool *cache_hit)
+{
+ assert(key_size <= UINT32_MAX);
+ assert(ops != NULL);
+
+ if (cache_hit != NULL)
+ *cache_hit = false;
+
+ struct vk_pipeline_cache_object key = {
+ .key_data = key_data,
+ .key_size = key_size,
+ };
+ uint32_t hash = object_key_hash(&key);
+
+ struct vk_pipeline_cache_object *object = NULL;
+
+ if (cache != NULL && cache->object_cache != NULL) {
+ vk_pipeline_cache_lock(cache);
+ struct set_entry *entry =
+ _mesa_set_search_pre_hashed(cache->object_cache, hash, &key);
+ if (entry) {
+ object = vk_pipeline_cache_object_ref((void *)entry->key);
+ if (cache_hit != NULL)
+ *cache_hit = true;
+ }
+ vk_pipeline_cache_unlock(cache);
+ }
+
+ if (object == NULL) {
+ struct disk_cache *disk_cache = cache->base.device->physical->disk_cache;
+ if (!cache->skip_disk_cache && disk_cache && cache->object_cache) {
+ cache_key cache_key;
+ disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
+
+ size_t data_size;
+ uint8_t *data = disk_cache_get(disk_cache, cache_key, &data_size);
+ if (data) {
+ object = vk_pipeline_cache_object_deserialize(cache,
+ key_data, key_size,
+ data, data_size,
+ ops);
+ free(data);
+ if (object != NULL) {
+ return vk_pipeline_cache_insert_object(cache, object);
+ }
+ }
+ }
+
+ /* No disk cache or not found in the disk cache */
+ return NULL;
+ }
+
+ if (object->ops == &vk_raw_data_cache_object_ops &&
+ ops != &vk_raw_data_cache_object_ops) {
+ /* The object isn't fully formed yet and we need to deserialize it into
+ * a real object before it can be used.
+ */
+ struct vk_raw_data_cache_object *data_obj =
+ container_of(object, struct vk_raw_data_cache_object, base);
+
+ struct vk_pipeline_cache_object *real_object =
+ vk_pipeline_cache_object_deserialize(cache,
+ data_obj->base.key_data,
+ data_obj->base.key_size,
+ data_obj->data,
+ data_obj->data_size, ops);
+ if (real_object == NULL) {
+ vk_pipeline_cache_log(cache,
+ "Deserializing pipeline cache object failed");
+
+ vk_pipeline_cache_lock(cache);
+ vk_pipeline_cache_remove_object(cache, hash, object);
+ vk_pipeline_cache_unlock(cache);
+ vk_pipeline_cache_object_unref(cache->base.device, object);
+ return NULL;
+ }
+
+ vk_pipeline_cache_object_unref(cache->base.device, object);
+ object = vk_pipeline_cache_insert_object(cache, real_object);
+ }
+
+ assert(object->ops == ops);
+
+ return object;
+}
+
+struct vk_pipeline_cache_object *
+vk_pipeline_cache_add_object(struct vk_pipeline_cache *cache,
+ struct vk_pipeline_cache_object *object)
+{
+ struct vk_pipeline_cache_object *inserted =
+ vk_pipeline_cache_insert_object(cache, object);
+
+ if (object == inserted) {
+ /* If it wasn't in the object cache, it might not be in the disk cache
+ * either. Better try and add it.
+ */
+
+ struct disk_cache *disk_cache = cache->base.device->physical->disk_cache;
+ if (!cache->skip_disk_cache && object->ops->serialize && disk_cache) {
+ struct blob blob;
+ blob_init(&blob);
+
+ if (object->ops->serialize(object, &blob) && !blob.out_of_memory) {
+ cache_key cache_key;
+ disk_cache_compute_key(disk_cache, object->key_data,
+ object->key_size, cache_key);
+
+ disk_cache_put(disk_cache, cache_key, blob.data, blob.size, NULL);
+ }
+
+ blob_finish(&blob);
+ }
+ }
+
+ return inserted;
+}
+
+struct vk_pipeline_cache_object *
+vk_pipeline_cache_create_and_insert_object(struct vk_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size,
+ const void *data, size_t data_size,
+ const struct vk_pipeline_cache_object_ops *ops)
+{
+ struct disk_cache *disk_cache = cache->base.device->physical->disk_cache;
+ if (!cache->skip_disk_cache && disk_cache) {
+ cache_key cache_key;
+ disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
+ disk_cache_put(disk_cache, cache_key, data, data_size, NULL);
+ }
+
+ struct vk_pipeline_cache_object *object =
+ vk_pipeline_cache_object_deserialize(cache, key_data, key_size, data,
+ data_size, ops);
+
+ if (object)
+ object = vk_pipeline_cache_insert_object(cache, object);
+
+ return object;
+}
+
+nir_shader *
+vk_pipeline_cache_lookup_nir(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ const struct nir_shader_compiler_options *nir_options,
+ bool *cache_hit, void *mem_ctx)
+{
+ struct vk_pipeline_cache_object *object =
+ vk_pipeline_cache_lookup_object(cache, key_data, key_size,
+ &vk_raw_data_cache_object_ops,
+ cache_hit);
+ if (object == NULL)
+ return NULL;
+
+ struct vk_raw_data_cache_object *data_obj =
+ container_of(object, struct vk_raw_data_cache_object, base);
+
+ struct blob_reader blob;
+ blob_reader_init(&blob, data_obj->data, data_obj->data_size);
+
+ nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
+ vk_pipeline_cache_object_unref(cache->base.device, object);
+
+ if (blob.overrun) {
+ ralloc_free(nir);
+ return NULL;
+ }
+
+ return nir;
+}
+
+void
+vk_pipeline_cache_add_nir(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ const nir_shader *nir)
+{
+ struct blob blob;
+ blob_init(&blob);
+
+ nir_serialize(&blob, nir, false);
+ if (blob.out_of_memory) {
+ vk_pipeline_cache_log(cache, "Ran out of memory serializing NIR shader");
+ blob_finish(&blob);
+ return;
+ }
+
+ struct vk_raw_data_cache_object *data_obj =
+ vk_raw_data_cache_object_create(cache->base.device,
+ key_data, key_size,
+ blob.data, blob.size);
+ blob_finish(&blob);
+
+ struct vk_pipeline_cache_object *cached =
+ vk_pipeline_cache_add_object(cache, &data_obj->base);
+ vk_pipeline_cache_object_unref(cache->base.device, cached);
+}
+
+static int32_t
+find_type_for_ops(const struct vk_physical_device *pdevice,
+ const struct vk_pipeline_cache_object_ops *ops)
+{
+ const struct vk_pipeline_cache_object_ops *const *import_ops =
+ pdevice->pipeline_cache_import_ops;
+
+ if (import_ops == NULL)
+ return -1;
+
+ for (int32_t i = 0; import_ops[i]; i++) {
+ if (import_ops[i] == ops)
+ return i;
+ }
+
+ return -1;
+}
+
+static const struct vk_pipeline_cache_object_ops *
+find_ops_for_type(const struct vk_physical_device *pdevice,
+ int32_t type)
+{
+ const struct vk_pipeline_cache_object_ops *const *import_ops =
+ pdevice->pipeline_cache_import_ops;
+
+ if (import_ops == NULL || type < 0)
+ return NULL;
+
+ return import_ops[type];
+}
+
+static void
+vk_pipeline_cache_load(struct vk_pipeline_cache *cache,
+ const void *data, size_t size)
+{
+ struct blob_reader blob;
+ blob_reader_init(&blob, data, size);
+
+ struct vk_pipeline_cache_header header;
+ blob_copy_bytes(&blob, &header, sizeof(header));
+ uint32_t count = blob_read_uint32(&blob);
+ if (blob.overrun)
+ return;
+
+ if (memcmp(&header, &cache->header, sizeof(header)) != 0)
+ return;
+
+ for (uint32_t i = 0; i < count; i++) {
+ int32_t type = blob_read_uint32(&blob);
+ uint32_t key_size = blob_read_uint32(&blob);
+ uint32_t data_size = blob_read_uint32(&blob);
+ const void *key_data = blob_read_bytes(&blob, key_size);
+ blob_reader_align(&blob, VK_PIPELINE_CACHE_BLOB_ALIGN);
+ const void *data = blob_read_bytes(&blob, data_size);
+ if (blob.overrun)
+ break;
+
+ const struct vk_pipeline_cache_object_ops *ops =
+ find_ops_for_type(cache->base.device->physical, type);
+
+ struct vk_pipeline_cache_object *object =
+ vk_pipeline_cache_create_and_insert_object(cache, key_data, key_size,
+ data, data_size, ops);
+
+ if (object == NULL) {
+ vk_pipeline_cache_log(cache, "Failed to load pipeline cache object");
+ continue;
+ }
+
+ vk_pipeline_cache_object_unref(cache->base.device, object);
+ }
+}
+
+struct vk_pipeline_cache *
+vk_pipeline_cache_create(struct vk_device *device,
+ const struct vk_pipeline_cache_create_info *info,
+ const VkAllocationCallbacks *pAllocator)
+{
+ static const struct VkPipelineCacheCreateInfo default_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
+ };
+ struct vk_pipeline_cache *cache;
+
+ const struct VkPipelineCacheCreateInfo *pCreateInfo =
+ info->pCreateInfo != NULL ? info->pCreateInfo : &default_create_info;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
+
+ cache = vk_object_zalloc(device, pAllocator, sizeof(*cache),
+ VK_OBJECT_TYPE_PIPELINE_CACHE);
+ if (cache == NULL)
+ return NULL;
+
+ cache->flags = pCreateInfo->flags;
+ cache->weak_ref = info->weak_ref;
+#ifndef ENABLE_SHADER_CACHE
+ cache->skip_disk_cache = true;
+#else
+ cache->skip_disk_cache = info->skip_disk_cache;
+#endif
+
+ struct VkPhysicalDeviceProperties pdevice_props;
+ device->physical->dispatch_table.GetPhysicalDeviceProperties(
+ vk_physical_device_to_handle(device->physical), &pdevice_props);
+
+ cache->header = (struct vk_pipeline_cache_header) {
+ .header_size = sizeof(struct vk_pipeline_cache_header),
+ .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ .vendor_id = pdevice_props.vendorID,
+ .device_id = pdevice_props.deviceID,
+ };
+ memcpy(cache->header.uuid, pdevice_props.pipelineCacheUUID, VK_UUID_SIZE);
+
+ simple_mtx_init(&cache->lock, mtx_plain);
+
+ if (info->force_enable ||
+ debug_get_bool_option("VK_ENABLE_PIPELINE_CACHE", true)) {
+ cache->object_cache = _mesa_set_create(NULL, object_key_hash,
+ object_keys_equal);
+ }
+
+ if (cache->object_cache && pCreateInfo->initialDataSize > 0) {
+ vk_pipeline_cache_load(cache, pCreateInfo->pInitialData,
+ pCreateInfo->initialDataSize);
+ }
+
+ return cache;
+}
+
+void
+vk_pipeline_cache_destroy(struct vk_pipeline_cache *cache,
+ const VkAllocationCallbacks *pAllocator)
+{
+ if (cache->object_cache) {
+ if (!cache->weak_ref) {
+ set_foreach(cache->object_cache, entry) {
+ vk_pipeline_cache_object_unref(cache->base.device, (void *)entry->key);
+ }
+ } else {
+ assert(cache->object_cache->entries == 0);
+ }
+ _mesa_set_destroy(cache->object_cache, NULL);
+ }
+ simple_mtx_destroy(&cache->lock);
+ vk_object_free(cache->base.device, pAllocator, cache);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreatePipelineCache(VkDevice _device,
+ const VkPipelineCacheCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipelineCache *pPipelineCache)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_pipeline_cache *cache;
+
+ struct vk_pipeline_cache_create_info info = {
+ .pCreateInfo = pCreateInfo,
+ };
+ cache = vk_pipeline_cache_create(device, &info, pAllocator);
+ if (cache == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ *pPipelineCache = vk_pipeline_cache_to_handle(cache);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyPipelineCache(VkDevice device,
+ VkPipelineCache pipelineCache,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
+
+ if (cache == NULL)
+ return;
+
+ assert(cache->base.device == vk_device_from_handle(device));
+ vk_pipeline_cache_destroy(cache, pAllocator);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetPipelineCacheData(VkDevice _device,
+ VkPipelineCache pipelineCache,
+ size_t *pDataSize,
+ void *pData)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
+
+ struct blob blob;
+ if (pData) {
+ blob_init_fixed(&blob, pData, *pDataSize);
+ } else {
+ blob_init_fixed(&blob, NULL, SIZE_MAX);
+ }
+
+ blob_write_bytes(&blob, &cache->header, sizeof(cache->header));
+
+ uint32_t count = 0;
+ intptr_t count_offset = blob_reserve_uint32(&blob);
+ if (count_offset < 0) {
+ *pDataSize = 0;
+ blob_finish(&blob);
+ return VK_INCOMPLETE;
+ }
+
+ vk_pipeline_cache_lock(cache);
+
+ VkResult result = VK_SUCCESS;
+ if (cache->object_cache != NULL) {
+ set_foreach(cache->object_cache, entry) {
+ struct vk_pipeline_cache_object *object = (void *)entry->key;
+
+ if (object->ops->serialize == NULL)
+ continue;
+
+ size_t blob_size_save = blob.size;
+
+ int32_t type = find_type_for_ops(device->physical, object->ops);
+ blob_write_uint32(&blob, type);
+ blob_write_uint32(&blob, object->key_size);
+ intptr_t data_size_resv = blob_reserve_uint32(&blob);
+ blob_write_bytes(&blob, object->key_data, object->key_size);
+
+ if (!blob_align(&blob, VK_PIPELINE_CACHE_BLOB_ALIGN)) {
+ result = VK_INCOMPLETE;
+ break;
+ }
+
+ uint32_t data_size;
+ if (!vk_pipeline_cache_object_serialize(cache, object,
+ &blob, &data_size)) {
+ blob.size = blob_size_save;
+ if (blob.out_of_memory) {
+ result = VK_INCOMPLETE;
+ break;
+ }
+
+ /* Failed for some other reason; keep going */
+ continue;
+ }
+
+ /* vk_pipeline_cache_object_serialize should have failed */
+ assert(!blob.out_of_memory);
+
+ assert(data_size_resv >= 0);
+ blob_overwrite_uint32(&blob, data_size_resv, data_size);
+
+ count++;
+ }
+ }
+
+ vk_pipeline_cache_unlock(cache);
+
+ blob_overwrite_uint32(&blob, count_offset, count);
+
+ *pDataSize = blob.size;
+
+ blob_finish(&blob);
+
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_MergePipelineCaches(VkDevice _device,
+ VkPipelineCache dstCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache *pSrcCaches)
+{
+ VK_FROM_HANDLE(vk_pipeline_cache, dst, dstCache);
+ VK_FROM_HANDLE(vk_device, device, _device);
+ assert(dst->base.device == device);
+ assert(!dst->weak_ref);
+
+ if (!dst->object_cache)
+ return VK_SUCCESS;
+
+ vk_pipeline_cache_lock(dst);
+
+ for (uint32_t i = 0; i < srcCacheCount; i++) {
+ VK_FROM_HANDLE(vk_pipeline_cache, src, pSrcCaches[i]);
+ assert(src->base.device == device);
+
+ if (!src->object_cache)
+ continue;
+
+ assert(src != dst);
+ if (src == dst)
+ continue;
+
+ vk_pipeline_cache_lock(src);
+
+ set_foreach(src->object_cache, src_entry) {
+ struct vk_pipeline_cache_object *src_object = (void *)src_entry->key;
+
+ bool found_in_dst = false;
+ struct set_entry *dst_entry =
+ _mesa_set_search_or_add_pre_hashed(dst->object_cache,
+ src_entry->hash,
+ src_object, &found_in_dst);
+ if (found_in_dst) {
+ struct vk_pipeline_cache_object *dst_object = (void *)dst_entry->key;
+ if (dst_object->ops == &vk_raw_data_cache_object_ops &&
+ src_object->ops != &vk_raw_data_cache_object_ops) {
+ /* Even though dst has the object, it only has the blob version
+ * which isn't as useful. Replace it with the real object.
+ */
+ vk_pipeline_cache_object_unref(device, dst_object);
+ dst_entry->key = vk_pipeline_cache_object_ref(src_object);
+ }
+ } else {
+ /* We inserted src_object in dst so it needs a reference */
+ assert(dst_entry->key == (const void *)src_object);
+ vk_pipeline_cache_object_ref(src_object);
+ }
+ }
+
+ vk_pipeline_cache_unlock(src);
+ }
+
+ vk_pipeline_cache_unlock(dst);
+
+ return VK_SUCCESS;
+}
diff --git a/src/vulkan/runtime/vk_pipeline_cache.h b/src/vulkan/runtime/vk_pipeline_cache.h
new file mode 100644
index 00000000000..993bbabb0be
--- /dev/null
+++ b/src/vulkan/runtime/vk_pipeline_cache.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_PIPELINE_CACHE_H
+#define VK_PIPELINE_CACHE_H
+
+#include "vk_object.h"
+#include "vk_util.h"
+
+#include "util/simple_mtx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* #include "util/blob.h" */
+struct blob;
+struct blob_reader;
+
+/* #include "util/set.h" */
+struct set;
+
+/* #include "compiler/nir/nir.h" */
+struct nir_shader;
+struct nir_shader_compiler_options;
+
+struct vk_pipeline_cache;
+struct vk_pipeline_cache_object;
+
+#define VK_PIPELINE_CACHE_BLOB_ALIGN 8
+
+struct vk_pipeline_cache_object_ops {
+ /** Writes this cache object to the given blob
+ *
+ * Because the cache works with both raw blob data and driver object data
+ * and can't always tell the difference between the two, we have to be very
+ * careful about alignments when [de]serializing. When serialize() is
+ * called, the blob will be aligned to VK_PIPELINE_CACHE_BLOB_ALIGN. The
+ * driver must be careful to not [de]serialize any data types which require
+ * a higher alignment. When deserialize() is called, the blob_reader is
+ * also guaranteed to be aligned to VK_PIPELINE_CACHE_BLOB_ALIGN.
+ *
+ * Returns true on success
+ *
+ * This function is optional. Objects without [de]serialization support
+ * will still be cached in memory but will not be placed in the disk cache
+ * and will not exported to the client when vkGetPipelineCacheData() is
+ * called.
+ */
+ bool (*serialize)(struct vk_pipeline_cache_object *object,
+ struct blob *blob);
+
+ /** Constructs an object from cached data
+ *
+ * See serialize() for details about data alignment.
+ *
+ * returns the created object
+ *
+ * This function is optional.
+ */
+ struct vk_pipeline_cache_object *(*deserialize)(struct vk_pipeline_cache *cache,
+ const void *key_data,
+ size_t key_size,
+ struct blob_reader *blob);
+
+ /** Destroys the object
+ *
+ * Called when vk_pipeline_cache_object.ref_cnt hits 0.
+ */
+ void (*destroy)(struct vk_device *device,
+ struct vk_pipeline_cache_object *object);
+};
+
+/** Base struct for cached objects
+ *
+ * A vk_pipeline_cache stores any number of vk_pipeline_cache_object's, each
+ * of which has an associated key of arbitrary size. Cached objects are
+ * reference counted so that they can exist in multiple caches (for example,
+ * when vkMergePipelineCaches() is called) and so that they can persist after
+ * the pipeline cache is destroyed. Each object also has a pointer to a
+ * vk_pipeline_cache_object_ops table which the pipeline cache uses to
+ * [de]serialize the object and clean it up when the reference count hits 0.
+ *
+ * The rest of the details of any given object are entirely up to the driver.
+ * The driver may even have multiple types of objects (distinguished by their
+ * vk_pipeline_cache_object_ops table) in the cache so long as it guarantees
+ * it never has two objects of different types with the same key.
+ */
+struct vk_pipeline_cache_object {
+ const struct vk_pipeline_cache_object_ops *ops;
+ struct vk_pipeline_cache *weak_owner;
+ uint32_t ref_cnt;
+
+ uint32_t data_size;
+ const void *key_data;
+ uint32_t key_size;
+};
+
+static inline void
+vk_pipeline_cache_object_init(struct vk_device *device,
+ struct vk_pipeline_cache_object *object,
+ const struct vk_pipeline_cache_object_ops *ops,
+ const void *key_data, uint32_t key_size)
+{
+ memset(object, 0, sizeof(*object));
+ object->ops = ops;
+ p_atomic_set(&object->ref_cnt, 1);
+ object->data_size = 0; /* Unknown */
+ object->key_data = key_data;
+ object->key_size = key_size;
+}
+
+static inline void
+vk_pipeline_cache_object_finish(struct vk_pipeline_cache_object *object)
+{
+ assert(p_atomic_read(&object->ref_cnt) <= 1);
+}
+
+static inline struct vk_pipeline_cache_object *
+vk_pipeline_cache_object_ref(struct vk_pipeline_cache_object *object)
+{
+ assert(object && p_atomic_read(&object->ref_cnt) >= 1);
+ p_atomic_inc(&object->ref_cnt);
+ return object;
+}
+
+void
+vk_pipeline_cache_object_unref(struct vk_device *device,
+ struct vk_pipeline_cache_object *object);
+
+/** A generic implementation of VkPipelineCache */
+struct vk_pipeline_cache {
+ struct vk_object_base base;
+
+ /* pCreateInfo::flags */
+ VkPipelineCacheCreateFlags flags;
+ bool weak_ref;
+ bool skip_disk_cache;
+
+ struct vk_pipeline_cache_header header;
+
+ /** Protects object_cache */
+ simple_mtx_t lock;
+
+ struct set *object_cache;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_pipeline_cache, base, VkPipelineCache,
+ VK_OBJECT_TYPE_PIPELINE_CACHE)
+
+struct vk_pipeline_cache_create_info {
+ /* The pCreateInfo for this pipeline cache, if any.
+ *
+ * For driver-internal caches, this is allowed to be NULL.
+ */
+ const VkPipelineCacheCreateInfo *pCreateInfo;
+
+ /** If true, ignore VK_ENABLE_PIPELINE_CACHE and enable anyway */
+ bool force_enable;
+
+ /** If true, the cache operates in weak reference mode.
+ *
+ * The weak reference mode is designed for device-global caches for the
+ * purpose of de-duplicating identical shaders and pipelines. In the weak
+ * reference mode, an object's reference count is not incremented when it is
+ * added to the cache. Therefore the object will be destroyed as soon as
+ * there's no external references to it, and the runtime will perform the
+ * necessary bookkeeping to remove the dead reference from this cache's table.
+ *
+ * As the weak reference mode is designed for driver-internal use, it has
+ * several limitations:
+ * - Merging against a weak reference mode cache is not supported.
+ * - Lazy deserialization from vk_raw_data_cache_object_ops is not supported.
+ * - An object can only belong to up to one weak reference mode cache.
+ * - The cache must outlive the object, as the object will try to access its
+ * owner when it's destroyed.
+ */
+ bool weak_ref;
+
+ /** If true, do not attempt to use the disk cache */
+ bool skip_disk_cache;
+};
+
+struct vk_pipeline_cache *
+vk_pipeline_cache_create(struct vk_device *device,
+ const struct vk_pipeline_cache_create_info *info,
+ const VkAllocationCallbacks *pAllocator);
+void
+vk_pipeline_cache_destroy(struct vk_pipeline_cache *cache,
+ const VkAllocationCallbacks *pAllocator);
+
+/** Attempts to look up an object in the cache by key
+ *
+ * If an object is found in the cache matching the given key, *cache_hit is
+ * set to true and a reference to that object is returned.
+ *
+ * If the driver sets vk_device.disk_cache, we attempt to look up any missing
+ * objects in the disk cache before declaring failure. If an object is found
+ * in the disk cache but not the in-memory cache, *cache_hit is set to false.
+ *
+ * The deserialization of pipeline cache objects found in the cache data
+ * provided via VkPipelineCacheCreateInfo::pInitialData happens during
+ * vk_pipeline_cache_lookup() rather than during vkCreatePipelineCache().
+ * Prior to the first vk_pipeline_cache_lookup() of a given object, it is
+ * stored as an internal raw data object with the same hash. This allows us
+ * to avoid any complex object type tagging in the serialized cache. It does,
+ * however, mean that drivers need to be careful to ensure that objects with
+ * different types (ops) have different keys.
+ *
+ * Returns a reference to the object, if found
+ */
+struct vk_pipeline_cache_object * MUST_CHECK
+vk_pipeline_cache_lookup_object(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ const struct vk_pipeline_cache_object_ops *ops,
+ bool *cache_hit);
+
+/** Adds an object to the pipeline cache
+ *
+ * This function adds the given object to the pipeline cache. We do not
+ * specify a key here because the key is part of the object. See also
+ * vk_pipeline_cache_object_init().
+ *
+ * This function consumes a reference to the object and returns a reference to
+ * the (possibly different) object in the cache. The intended usage pattern
+ * is as follows:
+ *
+ * key = compute_key();
+ * struct vk_pipeline_cache_object *object =
+ * vk_pipeline_cache_lookup_object(cache, &key, sizeof(key),
+ * &driver_type_ops, &cache_hit);
+ * if (object != NULL)
+ * return container_of(object, driver_type, base);
+ *
+ * object = do_compile();
+ * assert(object != NULL);
+ *
+ * object = vk_pipeline_cache_add_object(cache, object);
+ * return container_of(object, driver_type, base);
+ */
+struct vk_pipeline_cache_object * MUST_CHECK
+vk_pipeline_cache_add_object(struct vk_pipeline_cache *cache,
+ struct vk_pipeline_cache_object *object);
+
+/** Creates and inserts an object into the pipeline cache
+ *
+ * This function takes serialized data and emplaces the deserialized object
+ * into the pipeline cache. It is the responsibility of the caller to
+ * specify a deserialize() function that properly initializes the object.
+ *
+ * This function can be used to avoid an extra serialize() step for
+ * disk-cache insertion. For the intended usage pattern, see
+ * vk_pipeline_cache_add_object().
+ *
+ */
+struct vk_pipeline_cache_object *
+vk_pipeline_cache_create_and_insert_object(struct vk_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size,
+ const void *data, size_t data_size,
+ const struct vk_pipeline_cache_object_ops *ops);
+
+struct nir_shader *
+vk_pipeline_cache_lookup_nir(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ const struct nir_shader_compiler_options *nir_options,
+ bool *cache_hit, void *mem_ctx);
+void
+vk_pipeline_cache_add_nir(struct vk_pipeline_cache *cache,
+ const void *key_data, size_t key_size,
+ const struct nir_shader *nir);
+
+/** Specialized type of vk_pipeline_cache_object for raw data objects.
+ *
+ * This cache object implementation, together with vk_raw_data_cache_object_ops,
+ * can be used to cache plain objects as well as already serialized data.
+ */
+struct vk_raw_data_cache_object {
+ struct vk_pipeline_cache_object base;
+
+ const void *data;
+ size_t data_size;
+};
+
+struct vk_raw_data_cache_object *
+vk_raw_data_cache_object_create(struct vk_device *device,
+ const void *key_data, size_t key_size,
+ const void *data, size_t data_size);
+
+extern const struct vk_pipeline_cache_object_ops vk_raw_data_cache_object_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_PIPELINE_CACHE_H */
diff --git a/src/vulkan/runtime/vk_pipeline_layout.c b/src/vulkan/runtime/vk_pipeline_layout.c
new file mode 100644
index 00000000000..77653464835
--- /dev/null
+++ b/src/vulkan/runtime/vk_pipeline_layout.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_pipeline_layout.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_descriptor_set_layout.h"
+#include "vk_device.h"
+#include "vk_log.h"
+
+#include "util/mesa-sha1.h"
+
+static void
+vk_pipeline_layout_init(struct vk_device *device,
+ struct vk_pipeline_layout *layout,
+ const VkPipelineLayoutCreateInfo *pCreateInfo)
+{
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
+ assert(pCreateInfo->setLayoutCount <= MESA_VK_MAX_DESCRIPTOR_SETS);
+
+ vk_object_base_init(device, &layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT);
+
+ layout->ref_cnt = 1;
+ layout->create_flags = pCreateInfo->flags;
+ layout->set_count = pCreateInfo->setLayoutCount;
+ layout->destroy = vk_pipeline_layout_destroy;
+
+ for (uint32_t s = 0; s < pCreateInfo->setLayoutCount; s++) {
+ VK_FROM_HANDLE(vk_descriptor_set_layout, set_layout,
+ pCreateInfo->pSetLayouts[s]);
+
+ if (set_layout != NULL)
+ layout->set_layouts[s] = vk_descriptor_set_layout_ref(set_layout);
+ else
+ layout->set_layouts[s] = NULL;
+ }
+
+ assert(pCreateInfo->pushConstantRangeCount <
+ MESA_VK_MAX_PUSH_CONSTANT_RANGES);
+ layout->push_range_count = pCreateInfo->pushConstantRangeCount;
+ for (uint32_t r = 0; r < pCreateInfo->pushConstantRangeCount; r++)
+ layout->push_ranges[r] = pCreateInfo->pPushConstantRanges[r];
+}
+
+void *
+vk_pipeline_layout_zalloc(struct vk_device *device, size_t size,
+ const VkPipelineLayoutCreateInfo *pCreateInfo)
+{
+ /* Because we're reference counting and lifetimes may not be what the
+ * client expects, these have to be allocated off the device and not as
+ * their own object.
+ */
+ struct vk_pipeline_layout *layout =
+ vk_zalloc(&device->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (layout == NULL)
+ return NULL;
+
+ vk_pipeline_layout_init(device, layout, pCreateInfo);
+ return layout;
+}
+
+void *
+vk_pipeline_layout_multizalloc(struct vk_device *device,
+ struct vk_multialloc *ma,
+ const VkPipelineLayoutCreateInfo *pCreateInfo)
+{
+ struct vk_pipeline_layout *layout =
+ vk_multialloc_zalloc(ma, &device->alloc,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (layout == NULL)
+ return NULL;
+
+ vk_pipeline_layout_init(device, layout, pCreateInfo);
+ return layout;
+}
+
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreatePipelineLayout(VkDevice _device,
+ const VkPipelineLayoutCreateInfo *pCreateInfo,
+ UNUSED const VkAllocationCallbacks *pAllocator,
+ VkPipelineLayout *pPipelineLayout)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ struct vk_pipeline_layout *layout =
+ vk_pipeline_layout_zalloc(device, sizeof(struct vk_pipeline_layout),
+ pCreateInfo);
+ if (layout == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *pPipelineLayout = vk_pipeline_layout_to_handle(layout);
+
+ return VK_SUCCESS;
+}
+
+void
+vk_pipeline_layout_destroy(struct vk_device *device,
+ struct vk_pipeline_layout *layout)
+{
+ assert(layout && layout->ref_cnt == 0);
+
+ for (uint32_t s = 0; s < layout->set_count; s++) {
+ if (layout->set_layouts[s] != NULL)
+ vk_descriptor_set_layout_unref(device, layout->set_layouts[s]);
+ }
+
+ vk_object_free(device, NULL, layout);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyPipelineLayout(VkDevice _device,
+ VkPipelineLayout pipelineLayout,
+ UNUSED const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_pipeline_layout, layout, pipelineLayout);
+
+ if (layout == NULL)
+ return;
+
+ vk_pipeline_layout_unref(device, layout);
+}
diff --git a/src/vulkan/runtime/vk_pipeline_layout.h b/src/vulkan/runtime/vk_pipeline_layout.h
new file mode 100644
index 00000000000..f71110c20a5
--- /dev/null
+++ b/src/vulkan/runtime/vk_pipeline_layout.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright © 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_PIPELINE_LAYOUT_H
+#define VK_PIPELINE_LAYOUT_H
+
+#include "vk_limits.h"
+#include "vk_object.h"
+
+#include "util/u_atomic.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_descriptor_set_layout;
+
+struct vk_pipeline_layout {
+ struct vk_object_base base;
+
+ /** Reference count
+ *
+ * It's often necessary to store a pointer to the descriptor set layout in
+ * the descriptor so that any entrypoint which has access to a descriptor
+ * set also has the layout. While layouts are often passed into various
+ * entrypoints, they're notably missing from vkUpdateDescriptorSets(). In
+ * order to implement descriptor writes, you either need to stash a pointer
+ * to the descriptor set layout in the descriptor set or you need to copy
+ * all of the relevant information. Storing a pointer is a lot cheaper.
+ *
+ * Because descriptor set layout lifetimes and descriptor set lifetimes are
+ * not guaranteed to coincide, we have to reference count if we're going to
+ * do this.
+ */
+ uint32_t ref_cnt;
+
+ /** VkPipelineLayoutCreateInfo::flags */
+ VkPipelineLayoutCreateFlagBits create_flags;
+
+ /** Number of descriptor set layouts in this pipeline layout */
+ uint32_t set_count;
+
+ /** Array of pointers to descriptor set layouts, indexed by set index */
+ struct vk_descriptor_set_layout *set_layouts[MESA_VK_MAX_DESCRIPTOR_SETS];
+
+ /** Number of push constant ranges in this pipeline layout */
+ uint32_t push_range_count;
+
+ /** Array of push constant ranges */
+ VkPushConstantRange push_ranges[MESA_VK_MAX_PUSH_CONSTANT_RANGES];
+
+ /** Destroy callback
+ *
+ * Will be initially set to vk_pipeline_layout_destroy() but may be set to
+ * a driver-specific callback which does driver-specific clean-up and then
+ * calls vk_pipeline_layout_destroy().
+ */
+ void (*destroy)(struct vk_device *device,
+ struct vk_pipeline_layout *layout);
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_pipeline_layout, base, VkPipelineLayout,
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT);
+
+void *
+vk_pipeline_layout_zalloc(struct vk_device *device, size_t size,
+ const VkPipelineLayoutCreateInfo *pCreateInfo);
+
+void *
+vk_pipeline_layout_multizalloc(struct vk_device *device,
+ struct vk_multialloc *ma,
+ const VkPipelineLayoutCreateInfo *pCreateInfo);
+
+void vk_pipeline_layout_destroy(struct vk_device *device,
+ struct vk_pipeline_layout *layout);
+
+static inline struct vk_pipeline_layout *
+vk_pipeline_layout_ref(struct vk_pipeline_layout *layout)
+{
+ assert(layout && layout->ref_cnt >= 1);
+ p_atomic_inc(&layout->ref_cnt);
+ return layout;
+}
+
+static inline void
+vk_pipeline_layout_unref(struct vk_device *device,
+ struct vk_pipeline_layout *layout)
+{
+ assert(layout && layout->ref_cnt >= 1);
+ if (p_atomic_dec_zero(&layout->ref_cnt))
+ layout->destroy(device, layout);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_PIPELINE_LAYOUT_H */
+
diff --git a/src/vulkan/runtime/vk_query_pool.c b/src/vulkan/runtime/vk_query_pool.c
new file mode 100644
index 00000000000..59294f414f3
--- /dev/null
+++ b/src/vulkan/runtime/vk_query_pool.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright © 2022 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_query_pool.h"
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+
+void
+vk_query_pool_init(struct vk_device *device,
+ struct vk_query_pool *query_pool,
+ const VkQueryPoolCreateInfo *pCreateInfo)
+{
+ vk_object_base_init(device, &query_pool->base, VK_OBJECT_TYPE_QUERY_POOL);
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
+
+ query_pool->query_type = pCreateInfo->queryType;
+ query_pool->query_count = pCreateInfo->queryCount;
+ query_pool->pipeline_statistics =
+ pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS ?
+ pCreateInfo->pipelineStatistics : 0;
+}
+
+void *
+vk_query_pool_create(struct vk_device *device,
+ const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_query_pool *query_pool =
+ vk_zalloc2(&device->alloc, alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (query_pool == NULL)
+ return NULL;
+
+ vk_query_pool_init(device, query_pool, pCreateInfo);
+
+ return query_pool;
+}
+
+void
+vk_query_pool_finish(struct vk_query_pool *query_pool)
+{
+ vk_object_base_finish(&query_pool->base);
+}
+
+void
+vk_query_pool_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_query_pool *query_pool)
+{
+ vk_object_free(device, alloc, query_pool);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBeginQuery(VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdEndQuery(VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
+}
diff --git a/src/vulkan/util/vk_device.h b/src/vulkan/runtime/vk_query_pool.h
index e31688475c8..ee04eee9ff6 100644
--- a/src/vulkan/util/vk_device.h
+++ b/src/vulkan/runtime/vk_query_pool.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2020 Intel Corporation
+ * Copyright © 2022 Collabora, Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,54 +20,45 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef VK_DEVICE_H
-#define VK_DEVICE_H
+#ifndef VK_QUERY_POOL_H
+#define VK_QUERY_POOL_H
-#include "vk_dispatch_table.h"
-#include "vk_extensions.h"
#include "vk_object.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct vk_device {
+struct vk_query_pool {
struct vk_object_base base;
- VkAllocationCallbacks alloc;
- struct vk_physical_device *physical;
- struct vk_device_extension_table enabled_extensions;
+ /** VkQueryPoolCreateInfo::queryType */
+ VkQueryType query_type;
- struct vk_device_dispatch_table dispatch_table;
+ /** VkQueryPoolCreateInfo::queryCount */
+ uint32_t query_count;
- /* For VK_EXT_private_data */
- uint32_t private_data_next_index;
-
-#ifdef ANDROID
- mtx_t swapchain_private_mtx;
- struct hash_table *swapchain_private;
-#endif
+ /** VkQueryPoolCreateInfo::pipelineStatistics
+ *
+ * If query_type != VK_QUERY_TYPE_PIPELINE_STATISTICS, this will be zero.
+ */
+ VkQueryPipelineStatisticFlags pipeline_statistics;
};
-VK_DEFINE_HANDLE_CASTS(vk_device, base, VkDevice,
- VK_OBJECT_TYPE_DEVICE)
-
-VkResult MUST_CHECK
-vk_device_init(struct vk_device *device,
- struct vk_physical_device *physical_device,
- const struct vk_device_dispatch_table *dispatch_table,
- const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc);
-
-void
-vk_device_finish(struct vk_device *device);
-
-PFN_vkVoidFunction
-vk_device_get_proc_addr(const struct vk_device *device,
- const char *name);
+void vk_query_pool_init(struct vk_device *device,
+ struct vk_query_pool *query_pool,
+ const VkQueryPoolCreateInfo *pCreateInfo);
+void vk_query_pool_finish(struct vk_query_pool *query_pool);
+void *vk_query_pool_create(struct vk_device *device,
+ const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+void vk_query_pool_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_query_pool *query_pool);
#ifdef __cplusplus
}
#endif
-#endif /* VK_DEVICE_H */
+#endif /* VK_QUERY_POOL_H */
diff --git a/src/vulkan/runtime/vk_queue.c b/src/vulkan/runtime/vk_queue.c
new file mode 100644
index 00000000000..c8b55b58b0a
--- /dev/null
+++ b/src/vulkan/runtime/vk_queue.c
@@ -0,0 +1,1339 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_queue.h"
+
+#include "util/perf/cpu_trace.h"
+#include "util/u_debug.h"
+#include <inttypes.h>
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_command_pool.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_fence.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_semaphore.h"
+#include "vk_sync.h"
+#include "vk_sync_binary.h"
+#include "vk_sync_dummy.h"
+#include "vk_sync_timeline.h"
+#include "vk_util.h"
+
+#include "vulkan/wsi/wsi_common.h"
+
+static VkResult
+vk_queue_start_submit_thread(struct vk_queue *queue);
+
+VkResult
+vk_queue_init(struct vk_queue *queue, struct vk_device *device,
+ const VkDeviceQueueCreateInfo *pCreateInfo,
+ uint32_t index_in_family)
+{
+ VkResult result = VK_SUCCESS;
+ int ret;
+
+ memset(queue, 0, sizeof(*queue));
+ vk_object_base_init(device, &queue->base, VK_OBJECT_TYPE_QUEUE);
+
+ list_addtail(&queue->link, &device->queues);
+
+ queue->flags = pCreateInfo->flags;
+ queue->queue_family_index = pCreateInfo->queueFamilyIndex;
+
+ assert(index_in_family < pCreateInfo->queueCount);
+ queue->index_in_family = index_in_family;
+
+ queue->submit.mode = device->submit_mode;
+ if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND)
+ queue->submit.mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
+
+ list_inithead(&queue->submit.submits);
+
+ ret = mtx_init(&queue->submit.mutex, mtx_plain);
+ if (ret == thrd_error) {
+ result = vk_errorf(queue, VK_ERROR_UNKNOWN, "mtx_init failed");
+ goto fail_mutex;
+ }
+
+ ret = cnd_init(&queue->submit.push);
+ if (ret == thrd_error) {
+ result = vk_errorf(queue, VK_ERROR_UNKNOWN, "cnd_init failed");
+ goto fail_push;
+ }
+
+ ret = cnd_init(&queue->submit.pop);
+ if (ret == thrd_error) {
+ result = vk_errorf(queue, VK_ERROR_UNKNOWN, "cnd_init failed");
+ goto fail_pop;
+ }
+
+ if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED) {
+ result = vk_queue_start_submit_thread(queue);
+ if (result != VK_SUCCESS)
+ goto fail_thread;
+ }
+
+ util_dynarray_init(&queue->labels, NULL);
+ queue->region_begin = true;
+
+ return VK_SUCCESS;
+
+fail_thread:
+ cnd_destroy(&queue->submit.pop);
+fail_pop:
+ cnd_destroy(&queue->submit.push);
+fail_push:
+ mtx_destroy(&queue->submit.mutex);
+fail_mutex:
+ return result;
+}
+
+VkResult
+_vk_queue_set_lost(struct vk_queue *queue,
+ const char *file, int line,
+ const char *msg, ...)
+{
+ if (queue->_lost.lost)
+ return VK_ERROR_DEVICE_LOST;
+
+ queue->_lost.lost = true;
+ queue->_lost.error_file = file;
+ queue->_lost.error_line = line;
+
+ va_list ap;
+ va_start(ap, msg);
+ vsnprintf(queue->_lost.error_msg, sizeof(queue->_lost.error_msg), msg, ap);
+ va_end(ap);
+
+ p_atomic_inc(&queue->base.device->_lost.lost);
+
+ if (debug_get_bool_option("MESA_VK_ABORT_ON_DEVICE_LOSS", false)) {
+ _vk_device_report_lost(queue->base.device);
+ abort();
+ }
+
+ return VK_ERROR_DEVICE_LOST;
+}
+
+static struct vk_queue_submit *
+vk_queue_submit_alloc(struct vk_queue *queue,
+ uint32_t wait_count,
+ uint32_t command_buffer_count,
+ uint32_t buffer_bind_count,
+ uint32_t image_opaque_bind_count,
+ uint32_t image_bind_count,
+ uint32_t bind_entry_count,
+ uint32_t image_bind_entry_count,
+ uint32_t signal_count,
+ VkSparseMemoryBind **bind_entries,
+ VkSparseImageMemoryBind **image_bind_entries)
+{
+ VK_MULTIALLOC(ma);
+ VK_MULTIALLOC_DECL(&ma, struct vk_queue_submit, submit, 1);
+ VK_MULTIALLOC_DECL(&ma, struct vk_sync_wait, waits, wait_count);
+ VK_MULTIALLOC_DECL(&ma, struct vk_command_buffer *, command_buffers,
+ command_buffer_count);
+ VK_MULTIALLOC_DECL(&ma, VkSparseBufferMemoryBindInfo, buffer_binds,
+ buffer_bind_count);
+ VK_MULTIALLOC_DECL(&ma, VkSparseImageOpaqueMemoryBindInfo,
+ image_opaque_binds, image_opaque_bind_count);
+ VK_MULTIALLOC_DECL(&ma, VkSparseImageMemoryBindInfo, image_binds,
+ image_bind_count);
+ VK_MULTIALLOC_DECL(&ma, VkSparseMemoryBind,
+ bind_entries_local, bind_entry_count);
+ VK_MULTIALLOC_DECL(&ma, VkSparseImageMemoryBind, image_bind_entries_local,
+ image_bind_entry_count);
+ VK_MULTIALLOC_DECL(&ma, struct vk_sync_signal, signals, signal_count);
+ VK_MULTIALLOC_DECL(&ma, struct vk_sync *, wait_temps, wait_count);
+
+ struct vk_sync_timeline_point **wait_points = NULL, **signal_points = NULL;
+ if (queue->base.device->timeline_mode == VK_DEVICE_TIMELINE_MODE_EMULATED) {
+ vk_multialloc_add(&ma, &wait_points,
+ struct vk_sync_timeline_point *, wait_count);
+ vk_multialloc_add(&ma, &signal_points,
+ struct vk_sync_timeline_point *, signal_count);
+ }
+
+ if (!vk_multialloc_zalloc(&ma, &queue->base.device->alloc,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
+ return NULL;
+
+ submit->wait_count = wait_count;
+ submit->command_buffer_count = command_buffer_count;
+ submit->signal_count = signal_count;
+ submit->buffer_bind_count = buffer_bind_count;
+ submit->image_opaque_bind_count = image_opaque_bind_count;
+ submit->image_bind_count = image_bind_count;
+
+ submit->waits = waits;
+ submit->command_buffers = command_buffers;
+ submit->signals = signals;
+ submit->buffer_binds = buffer_binds;
+ submit->image_opaque_binds = image_opaque_binds;
+ submit->image_binds = image_binds;
+ submit->_wait_temps = wait_temps;
+ submit->_wait_points = wait_points;
+ submit->_signal_points = signal_points;
+
+ if (bind_entries)
+ *bind_entries = bind_entries_local;
+
+ if (image_bind_entries)
+ *image_bind_entries = image_bind_entries_local;
+
+ return submit;
+}
+
+static void
+vk_queue_submit_cleanup(struct vk_queue *queue,
+ struct vk_queue_submit *submit)
+{
+ for (uint32_t i = 0; i < submit->wait_count; i++) {
+ if (submit->_wait_temps[i] != NULL)
+ vk_sync_destroy(queue->base.device, submit->_wait_temps[i]);
+ }
+
+ if (submit->_mem_signal_temp != NULL)
+ vk_sync_destroy(queue->base.device, submit->_mem_signal_temp);
+
+ if (submit->_wait_points != NULL) {
+ for (uint32_t i = 0; i < submit->wait_count; i++) {
+ if (unlikely(submit->_wait_points[i] != NULL)) {
+ vk_sync_timeline_point_release(queue->base.device,
+ submit->_wait_points[i]);
+ }
+ }
+ }
+
+ if (submit->_signal_points != NULL) {
+ for (uint32_t i = 0; i < submit->signal_count; i++) {
+ if (unlikely(submit->_signal_points[i] != NULL)) {
+ vk_sync_timeline_point_free(queue->base.device,
+ submit->_signal_points[i]);
+ }
+ }
+ }
+}
+
+static void
+vk_queue_submit_free(struct vk_queue *queue,
+ struct vk_queue_submit *submit)
+{
+ vk_free(&queue->base.device->alloc, submit);
+}
+
+static void
+vk_queue_submit_destroy(struct vk_queue *queue,
+ struct vk_queue_submit *submit)
+{
+ vk_queue_submit_cleanup(queue, submit);
+ vk_queue_submit_free(queue, submit);
+}
+
+static void
+vk_queue_push_submit(struct vk_queue *queue,
+ struct vk_queue_submit *submit)
+{
+ mtx_lock(&queue->submit.mutex);
+ list_addtail(&submit->link, &queue->submit.submits);
+ cnd_signal(&queue->submit.push);
+ mtx_unlock(&queue->submit.mutex);
+}
+
+static VkResult
+vk_queue_drain(struct vk_queue *queue)
+{
+ VkResult result = VK_SUCCESS;
+
+ mtx_lock(&queue->submit.mutex);
+ while (!list_is_empty(&queue->submit.submits)) {
+ if (vk_device_is_lost(queue->base.device)) {
+ result = VK_ERROR_DEVICE_LOST;
+ break;
+ }
+
+ int ret = cnd_wait(&queue->submit.pop, &queue->submit.mutex);
+ if (ret == thrd_error) {
+ result = vk_queue_set_lost(queue, "cnd_wait failed");
+ break;
+ }
+ }
+ mtx_unlock(&queue->submit.mutex);
+
+ return result;
+}
+
+static VkResult
+vk_queue_submit_final(struct vk_queue *queue,
+ struct vk_queue_submit *submit)
+{
+ VkResult result;
+
+ /* Now that we know all our time points exist, fetch the time point syncs
+ * from any vk_sync_timelines. While we're here, also compact down the
+ * list of waits to get rid of any trivial timeline waits.
+ */
+ uint32_t wait_count = 0;
+ for (uint32_t i = 0; i < submit->wait_count; i++) {
+ /* A timeline wait on 0 is always a no-op */
+ if ((submit->waits[i].sync->flags & VK_SYNC_IS_TIMELINE) &&
+ submit->waits[i].wait_value == 0)
+ continue;
+
+ /* Waits on dummy vk_syncs are no-ops */
+ if (vk_sync_type_is_dummy(submit->waits[i].sync->type)) {
+ /* We are about to lose track of this wait, if it has a temporary
+ * we need to destroy it now, as vk_queue_submit_cleanup will not
+ * know about it */
+ if (submit->_wait_temps[i] != NULL) {
+ vk_sync_destroy(queue->base.device, submit->_wait_temps[i]);
+ submit->waits[i].sync = NULL;
+ }
+ continue;
+ }
+
+ /* For emulated timelines, we have a binary vk_sync associated with
+ * each time point and pass the binary vk_sync to the driver.
+ */
+ struct vk_sync_timeline *timeline =
+ vk_sync_as_timeline(submit->waits[i].sync);
+ if (timeline) {
+ assert(queue->base.device->timeline_mode ==
+ VK_DEVICE_TIMELINE_MODE_EMULATED);
+ result = vk_sync_timeline_get_point(queue->base.device, timeline,
+ submit->waits[i].wait_value,
+ &submit->_wait_points[i]);
+ if (unlikely(result != VK_SUCCESS)) {
+ result = vk_queue_set_lost(queue,
+ "Time point >= %"PRIu64" not found",
+ submit->waits[i].wait_value);
+ }
+
+ /* This can happen if the point is long past */
+ if (submit->_wait_points[i] == NULL)
+ continue;
+
+ submit->waits[i].sync = &submit->_wait_points[i]->sync;
+ submit->waits[i].wait_value = 0;
+ }
+
+ struct vk_sync_binary *binary =
+ vk_sync_as_binary(submit->waits[i].sync);
+ if (binary) {
+ submit->waits[i].sync = &binary->timeline;
+ submit->waits[i].wait_value = binary->next_point;
+ }
+
+ assert((submit->waits[i].sync->flags & VK_SYNC_IS_TIMELINE) ||
+ submit->waits[i].wait_value == 0);
+
+ assert(wait_count <= i);
+ if (wait_count < i) {
+ submit->waits[wait_count] = submit->waits[i];
+ submit->_wait_temps[wait_count] = submit->_wait_temps[i];
+ if (submit->_wait_points)
+ submit->_wait_points[wait_count] = submit->_wait_points[i];
+ }
+ wait_count++;
+ }
+
+ assert(wait_count <= submit->wait_count);
+ submit->wait_count = wait_count;
+
+ for (uint32_t i = 0; i < submit->signal_count; i++) {
+ assert((submit->signals[i].sync->flags & VK_SYNC_IS_TIMELINE) ||
+ submit->signals[i].signal_value == 0);
+
+ struct vk_sync_binary *binary =
+ vk_sync_as_binary(submit->signals[i].sync);
+ if (binary) {
+ submit->signals[i].sync = &binary->timeline;
+ submit->signals[i].signal_value = ++binary->next_point;
+ }
+ }
+
+ result = queue->driver_submit(queue, submit);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ if (submit->_signal_points) {
+ for (uint32_t i = 0; i < submit->signal_count; i++) {
+ if (submit->_signal_points[i] == NULL)
+ continue;
+
+ vk_sync_timeline_point_install(queue->base.device,
+ submit->_signal_points[i]);
+ submit->_signal_points[i] = NULL;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_queue_flush(struct vk_queue *queue, uint32_t *submit_count_out)
+{
+ VkResult result = VK_SUCCESS;
+
+ assert(queue->submit.mode == VK_QUEUE_SUBMIT_MODE_DEFERRED);
+
+ mtx_lock(&queue->submit.mutex);
+
+ uint32_t submit_count = 0;
+ while (!list_is_empty(&queue->submit.submits)) {
+ struct vk_queue_submit *submit =
+ list_first_entry(&queue->submit.submits,
+ struct vk_queue_submit, link);
+
+ for (uint32_t i = 0; i < submit->wait_count; i++) {
+ /* In emulated timeline mode, only emulated timelines are allowed */
+ if (!vk_sync_type_is_vk_sync_timeline(submit->waits[i].sync->type)) {
+ assert(!(submit->waits[i].sync->flags & VK_SYNC_IS_TIMELINE));
+ continue;
+ }
+
+ result = vk_sync_wait(queue->base.device,
+ submit->waits[i].sync,
+ submit->waits[i].wait_value,
+ VK_SYNC_WAIT_PENDING, 0);
+ if (result == VK_TIMEOUT) {
+ /* This one's not ready yet */
+ result = VK_SUCCESS;
+ goto done;
+ } else if (result != VK_SUCCESS) {
+ result = vk_queue_set_lost(queue, "Wait for time points failed");
+ goto done;
+ }
+ }
+
+ result = vk_queue_submit_final(queue, submit);
+ if (unlikely(result != VK_SUCCESS)) {
+ result = vk_queue_set_lost(queue, "queue::driver_submit failed");
+ goto done;
+ }
+
+ submit_count++;
+
+ list_del(&submit->link);
+
+ vk_queue_submit_destroy(queue, submit);
+ }
+
+done:
+ if (submit_count)
+ cnd_broadcast(&queue->submit.pop);
+
+ mtx_unlock(&queue->submit.mutex);
+
+ if (submit_count_out)
+ *submit_count_out = submit_count;
+
+ return result;
+}
+
+static int
+vk_queue_submit_thread_func(void *_data)
+{
+ struct vk_queue *queue = _data;
+ VkResult result;
+
+ mtx_lock(&queue->submit.mutex);
+
+ while (queue->submit.thread_run) {
+ if (list_is_empty(&queue->submit.submits)) {
+ int ret = cnd_wait(&queue->submit.push, &queue->submit.mutex);
+ if (ret == thrd_error) {
+ mtx_unlock(&queue->submit.mutex);
+ vk_queue_set_lost(queue, "cnd_wait failed");
+ return 1;
+ }
+ continue;
+ }
+
+ struct vk_queue_submit *submit =
+ list_first_entry(&queue->submit.submits,
+ struct vk_queue_submit, link);
+
+ /* Drop the lock while we wait */
+ mtx_unlock(&queue->submit.mutex);
+
+ result = vk_sync_wait_many(queue->base.device,
+ submit->wait_count, submit->waits,
+ VK_SYNC_WAIT_PENDING, UINT64_MAX);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_queue_set_lost(queue, "Wait for time points failed");
+ return 1;
+ }
+
+ result = vk_queue_submit_final(queue, submit);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_queue_set_lost(queue, "queue::driver_submit failed");
+ return 1;
+ }
+
+ /* Do all our cleanup of individual fences etc. outside the lock.
+ * We can't actually remove it from the list yet. We have to do
+ * that under the lock.
+ */
+ vk_queue_submit_cleanup(queue, submit);
+
+ mtx_lock(&queue->submit.mutex);
+
+ /* Only remove the submit from from the list and free it after
+ * queue->submit() has completed. This ensures that, when
+ * vk_queue_drain() completes, there are no more pending jobs.
+ */
+ list_del(&submit->link);
+ vk_queue_submit_free(queue, submit);
+
+ cnd_broadcast(&queue->submit.pop);
+ }
+
+ mtx_unlock(&queue->submit.mutex);
+ return 0;
+}
+
+static VkResult
+vk_queue_start_submit_thread(struct vk_queue *queue)
+{
+ int ret;
+
+ mtx_lock(&queue->submit.mutex);
+ queue->submit.thread_run = true;
+ mtx_unlock(&queue->submit.mutex);
+
+ ret = thrd_create(&queue->submit.thread,
+ vk_queue_submit_thread_func,
+ queue);
+ if (ret == thrd_error)
+ return vk_errorf(queue, VK_ERROR_UNKNOWN, "thrd_create failed");
+
+ return VK_SUCCESS;
+}
+
+static void
+vk_queue_stop_submit_thread(struct vk_queue *queue)
+{
+ vk_queue_drain(queue);
+
+ /* Kick the thread to disable it */
+ mtx_lock(&queue->submit.mutex);
+ queue->submit.thread_run = false;
+ cnd_signal(&queue->submit.push);
+ mtx_unlock(&queue->submit.mutex);
+
+ thrd_join(queue->submit.thread, NULL);
+
+ assert(list_is_empty(&queue->submit.submits));
+ queue->submit.mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
+}
+
+VkResult
+vk_queue_enable_submit_thread(struct vk_queue *queue)
+{
+ assert(vk_device_supports_threaded_submit(queue->base.device));
+
+ if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED)
+ return VK_SUCCESS;
+
+ VkResult result = vk_queue_start_submit_thread(queue);
+ if (result != VK_SUCCESS)
+ return result;
+
+ queue->submit.mode = VK_QUEUE_SUBMIT_MODE_THREADED;
+
+ return VK_SUCCESS;
+}
+
+struct vulkan_submit_info {
+ const void *pNext;
+
+ uint32_t command_buffer_count;
+ const VkCommandBufferSubmitInfo *command_buffers;
+
+ uint32_t wait_count;
+ const VkSemaphoreSubmitInfo *waits;
+
+ uint32_t signal_count;
+ const VkSemaphoreSubmitInfo *signals;
+
+ uint32_t buffer_bind_count;
+ const VkSparseBufferMemoryBindInfo *buffer_binds;
+
+ uint32_t image_opaque_bind_count;
+ const VkSparseImageOpaqueMemoryBindInfo *image_opaque_binds;
+
+ uint32_t image_bind_count;
+ const VkSparseImageMemoryBindInfo *image_binds;
+
+ struct vk_fence *fence;
+};
+
+static VkResult
+vk_queue_submit(struct vk_queue *queue,
+ const struct vulkan_submit_info *info)
+{
+ struct vk_device *device = queue->base.device;
+ VkResult result;
+ uint32_t sparse_memory_bind_entry_count = 0;
+ uint32_t sparse_memory_image_bind_entry_count = 0;
+ VkSparseMemoryBind *sparse_memory_bind_entries = NULL;
+ VkSparseImageMemoryBind *sparse_memory_image_bind_entries = NULL;
+
+ for (uint32_t i = 0; i < info->buffer_bind_count; ++i)
+ sparse_memory_bind_entry_count += info->buffer_binds[i].bindCount;
+
+ for (uint32_t i = 0; i < info->image_opaque_bind_count; ++i)
+ sparse_memory_bind_entry_count += info->image_opaque_binds[i].bindCount;
+
+ for (uint32_t i = 0; i < info->image_bind_count; ++i)
+ sparse_memory_image_bind_entry_count += info->image_binds[i].bindCount;
+
+ const struct wsi_memory_signal_submit_info *mem_signal =
+ vk_find_struct_const(info->pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
+ bool signal_mem_sync = mem_signal != NULL &&
+ mem_signal->memory != VK_NULL_HANDLE &&
+ queue->base.device->create_sync_for_memory != NULL;
+
+ struct vk_queue_submit *submit =
+ vk_queue_submit_alloc(queue, info->wait_count,
+ info->command_buffer_count,
+ info->buffer_bind_count,
+ info->image_opaque_bind_count,
+ info->image_bind_count,
+ sparse_memory_bind_entry_count,
+ sparse_memory_image_bind_entry_count,
+ info->signal_count +
+ signal_mem_sync + (info->fence != NULL),
+ &sparse_memory_bind_entries,
+ &sparse_memory_image_bind_entries);
+ if (unlikely(submit == NULL))
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "If the VkSubmitInfo::pNext chain does not include this structure,
+ * the batch defaults to use counter pass index 0."
+ */
+ const VkPerformanceQuerySubmitInfoKHR *perf_info =
+ vk_find_struct_const(info->pNext, PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
+ submit->perf_pass_index = perf_info ? perf_info->counterPassIndex : 0;
+
+ bool has_binary_permanent_semaphore_wait = false;
+ for (uint32_t i = 0; i < info->wait_count; i++) {
+ VK_FROM_HANDLE(vk_semaphore, semaphore,
+ info->waits[i].semaphore);
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Applications can import a semaphore payload into an existing
+ * semaphore using an external semaphore handle. The effects of the
+ * import operation will be either temporary or permanent, as
+ * specified by the application. If the import is temporary, the
+ * implementation must restore the semaphore to its prior permanent
+ * state after submitting the next semaphore wait operation."
+ *
+ * and
+ *
+ * VUID-VkImportSemaphoreFdInfoKHR-flags-03323
+ *
+ * "If flags contains VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, the
+ * VkSemaphoreTypeCreateInfo::semaphoreType field of the semaphore
+ * from which handle or name was exported must not be
+ * VK_SEMAPHORE_TYPE_TIMELINE"
+ */
+ struct vk_sync *sync;
+ if (semaphore->temporary) {
+ assert(semaphore->type == VK_SEMAPHORE_TYPE_BINARY);
+ sync = submit->_wait_temps[i] = semaphore->temporary;
+ semaphore->temporary = NULL;
+ } else {
+ if (semaphore->type == VK_SEMAPHORE_TYPE_BINARY) {
+ if (vk_device_supports_threaded_submit(device))
+ assert(semaphore->permanent.type->move);
+ has_binary_permanent_semaphore_wait = true;
+ }
+
+ sync = &semaphore->permanent;
+ }
+
+ uint64_t wait_value = semaphore->type == VK_SEMAPHORE_TYPE_TIMELINE ?
+ info->waits[i].value : 0;
+
+ submit->waits[i] = (struct vk_sync_wait) {
+ .sync = sync,
+ .stage_mask = info->waits[i].stageMask,
+ .wait_value = wait_value,
+ };
+ }
+
+ for (uint32_t i = 0; i < info->command_buffer_count; i++) {
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer,
+ info->command_buffers[i].commandBuffer);
+ assert(info->command_buffers[i].deviceMask == 0 ||
+ info->command_buffers[i].deviceMask == 1);
+ assert(cmd_buffer->pool->queue_family_index == queue->queue_family_index);
+
+ /* Some drivers don't call vk_command_buffer_begin/end() yet and, for
+ * those, we'll see initial layout. However, this is enough to catch
+ * command buffers which get submitted without calling EndCommandBuffer.
+ */
+ assert(cmd_buffer->state == MESA_VK_COMMAND_BUFFER_STATE_INITIAL ||
+ cmd_buffer->state == MESA_VK_COMMAND_BUFFER_STATE_EXECUTABLE ||
+ cmd_buffer->state == MESA_VK_COMMAND_BUFFER_STATE_PENDING);
+ cmd_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_PENDING;
+
+ submit->command_buffers[i] = cmd_buffer;
+ }
+
+ sparse_memory_bind_entry_count = 0;
+ sparse_memory_image_bind_entry_count = 0;
+
+ if (info->buffer_binds)
+ typed_memcpy(submit->buffer_binds, info->buffer_binds, info->buffer_bind_count);
+
+ for (uint32_t i = 0; i < info->buffer_bind_count; ++i) {
+ VkSparseMemoryBind *binds = sparse_memory_bind_entries +
+ sparse_memory_bind_entry_count;
+ submit->buffer_binds[i].pBinds = binds;
+ typed_memcpy(binds, info->buffer_binds[i].pBinds,
+ info->buffer_binds[i].bindCount);
+
+ sparse_memory_bind_entry_count += info->buffer_binds[i].bindCount;
+ }
+
+ if (info->image_opaque_binds)
+ typed_memcpy(submit->image_opaque_binds, info->image_opaque_binds,
+ info->image_opaque_bind_count);
+
+ for (uint32_t i = 0; i < info->image_opaque_bind_count; ++i) {
+ VkSparseMemoryBind *binds = sparse_memory_bind_entries +
+ sparse_memory_bind_entry_count;
+ submit->image_opaque_binds[i].pBinds = binds;
+ typed_memcpy(binds, info->image_opaque_binds[i].pBinds,
+ info->image_opaque_binds[i].bindCount);
+
+ sparse_memory_bind_entry_count += info->image_opaque_binds[i].bindCount;
+ }
+
+ if (info->image_binds)
+ typed_memcpy(submit->image_binds, info->image_binds, info->image_bind_count);
+
+ for (uint32_t i = 0; i < info->image_bind_count; ++i) {
+ VkSparseImageMemoryBind *binds = sparse_memory_image_bind_entries +
+ sparse_memory_image_bind_entry_count;
+ submit->image_binds[i].pBinds = binds;
+ typed_memcpy(binds, info->image_binds[i].pBinds,
+ info->image_binds[i].bindCount);
+
+ sparse_memory_image_bind_entry_count += info->image_binds[i].bindCount;
+ }
+
+ for (uint32_t i = 0; i < info->signal_count; i++) {
+ VK_FROM_HANDLE(vk_semaphore, semaphore,
+ info->signals[i].semaphore);
+
+ struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
+ uint64_t signal_value = info->signals[i].value;
+ if (semaphore->type == VK_SEMAPHORE_TYPE_TIMELINE) {
+ if (signal_value == 0) {
+ result = vk_queue_set_lost(queue,
+ "Tried to signal a timeline with value 0");
+ goto fail;
+ }
+ } else {
+ signal_value = 0;
+ }
+
+ /* For emulated timelines, we need to associate a binary vk_sync with
+ * each time point and pass the binary vk_sync to the driver. We could
+ * do this in vk_queue_submit_final but it might require doing memory
+ * allocation and we don't want to to add extra failure paths there.
+ * Instead, allocate and replace the driver-visible vk_sync now and
+ * we'll insert it into the timeline in vk_queue_submit_final. The
+ * insert step is guaranteed to not fail.
+ */
+ struct vk_sync_timeline *timeline = vk_sync_as_timeline(sync);
+ if (timeline) {
+ assert(queue->base.device->timeline_mode ==
+ VK_DEVICE_TIMELINE_MODE_EMULATED);
+ result = vk_sync_timeline_alloc_point(queue->base.device, timeline,
+ signal_value,
+ &submit->_signal_points[i]);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+
+ sync = &submit->_signal_points[i]->sync;
+ signal_value = 0;
+ }
+
+ submit->signals[i] = (struct vk_sync_signal) {
+ .sync = sync,
+ .stage_mask = info->signals[i].stageMask,
+ .signal_value = signal_value,
+ };
+ }
+
+ uint32_t signal_count = info->signal_count;
+ if (signal_mem_sync) {
+ struct vk_sync *mem_sync;
+ result = queue->base.device->create_sync_for_memory(queue->base.device,
+ mem_signal->memory,
+ true, &mem_sync);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+
+ submit->_mem_signal_temp = mem_sync;
+
+ assert(submit->signals[signal_count].sync == NULL);
+ submit->signals[signal_count++] = (struct vk_sync_signal) {
+ .sync = mem_sync,
+ .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
+ };
+ }
+
+ if (info->fence != NULL) {
+ assert(submit->signals[signal_count].sync == NULL);
+ submit->signals[signal_count++] = (struct vk_sync_signal) {
+ .sync = vk_fence_get_active_sync(info->fence),
+ .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
+ };
+ }
+
+ assert(signal_count == submit->signal_count);
+
+ /* If this device supports threaded submit, we can't rely on the client
+ * ordering requirements to ensure submits happen in the right order. Even
+ * if this queue doesn't have a submit thread, another queue (possibly in a
+ * different process) may and that means we our dependencies may not have
+ * been submitted to the kernel yet. Do a quick zero-timeout WAIT_PENDING
+ * on all the wait semaphores to see if we need to start up our own thread.
+ */
+ if (device->submit_mode == VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND &&
+ queue->submit.mode != VK_QUEUE_SUBMIT_MODE_THREADED) {
+ assert(queue->submit.mode == VK_QUEUE_SUBMIT_MODE_IMMEDIATE);
+
+ result = vk_sync_wait_many(queue->base.device,
+ submit->wait_count, submit->waits,
+ VK_SYNC_WAIT_PENDING, 0);
+ if (result == VK_TIMEOUT)
+ result = vk_queue_enable_submit_thread(queue);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+ }
+
+ switch (queue->submit.mode) {
+ case VK_QUEUE_SUBMIT_MODE_IMMEDIATE:
+ result = vk_queue_submit_final(queue, submit);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+
+ /* If threaded submit is possible on this device, we need to ensure that
+ * binary semaphore payloads get reset so that any other threads can
+ * properly wait on them for dependency checking. Because we don't
+ * currently have a submit thread, we can directly reset that binary
+ * semaphore payloads.
+ *
+ * If we the vk_sync is in our signal et, we can consider it to have
+ * been both reset and signaled by queue_submit_final(). A reset in
+ * this case would be wrong because it would throw away our signal
+ * operation. If we don't signal the vk_sync, then we need to reset it.
+ */
+ if (vk_device_supports_threaded_submit(device) &&
+ has_binary_permanent_semaphore_wait) {
+ for (uint32_t i = 0; i < submit->wait_count; i++) {
+ if ((submit->waits[i].sync->flags & VK_SYNC_IS_TIMELINE) ||
+ submit->_wait_temps[i] != NULL)
+ continue;
+
+ bool was_signaled = false;
+ for (uint32_t j = 0; j < submit->signal_count; j++) {
+ if (submit->signals[j].sync == submit->waits[i].sync) {
+ was_signaled = true;
+ break;
+ }
+ }
+
+ if (!was_signaled) {
+ result = vk_sync_reset(queue->base.device,
+ submit->waits[i].sync);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+ }
+ }
+ }
+
+ vk_queue_submit_destroy(queue, submit);
+ return result;
+
+ case VK_QUEUE_SUBMIT_MODE_DEFERRED:
+ vk_queue_push_submit(queue, submit);
+ return vk_device_flush(queue->base.device);
+
+ case VK_QUEUE_SUBMIT_MODE_THREADED:
+ if (has_binary_permanent_semaphore_wait) {
+ for (uint32_t i = 0; i < info->wait_count; i++) {
+ VK_FROM_HANDLE(vk_semaphore, semaphore,
+ info->waits[i].semaphore);
+
+ if (semaphore->type != VK_SEMAPHORE_TYPE_BINARY)
+ continue;
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "When a batch is submitted to a queue via a queue
+ * submission, and it includes semaphores to be waited on,
+ * it defines a memory dependency between prior semaphore
+ * signal operations and the batch, and defines semaphore
+ * wait operations.
+ *
+ * Such semaphore wait operations set the semaphores
+ * created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_BINARY to the unsignaled state."
+ *
+ * For threaded submit, we depend on tracking the unsignaled
+ * state of binary semaphores to determine when we can safely
+ * submit. The VK_SYNC_WAIT_PENDING check above as well as the
+ * one in the sumbit thread depend on all binary semaphores
+ * being reset when they're not in active use from the point
+ * of view of the client's CPU timeline. This means we need to
+ * reset them inside vkQueueSubmit and cannot wait until the
+ * actual submit which happens later in the thread.
+ *
+ * We've already stolen temporary semaphore payloads above as
+ * part of basic semaphore processing. We steal permanent
+ * semaphore payloads here by way of vk_sync_move. For shared
+ * semaphores, this can be a bit expensive (sync file import
+ * and export) but, for non-shared semaphores, it can be made
+ * fairly cheap. Also, we only do this semaphore swapping in
+ * the case where you have real timelines AND the client is
+ * using timeline semaphores with wait-before-signal (that's
+ * the only way to get a submit thread) AND mixing those with
+ * waits on binary semaphores AND said binary semaphore is
+ * using its permanent payload. In other words, this code
+ * should basically only ever get executed in CTS tests.
+ */
+ if (submit->_wait_temps[i] != NULL)
+ continue;
+
+ assert(submit->waits[i].sync == &semaphore->permanent);
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-vkQueueSubmit-pWaitSemaphores-03238
+ *
+ * "All elements of the pWaitSemaphores member of all
+ * elements of pSubmits created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_BINARY must reference a semaphore
+ * signal operation that has been submitted for execution
+ * and any semaphore signal operations on which it depends
+ * (if any) must have also been submitted for execution."
+ *
+ * Therefore, we can safely do a blocking wait here and it
+ * won't actually block for long. This ensures that the
+ * vk_sync_move below will succeed.
+ */
+ result = vk_sync_wait(queue->base.device,
+ submit->waits[i].sync, 0,
+ VK_SYNC_WAIT_PENDING, UINT64_MAX);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+
+ result = vk_sync_create(queue->base.device,
+ semaphore->permanent.type,
+ 0 /* flags */,
+ 0 /* initial value */,
+ &submit->_wait_temps[i]);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+
+ result = vk_sync_move(queue->base.device,
+ submit->_wait_temps[i],
+ &semaphore->permanent);
+ if (unlikely(result != VK_SUCCESS))
+ goto fail;
+
+ submit->waits[i].sync = submit->_wait_temps[i];
+ }
+ }
+
+ vk_queue_push_submit(queue, submit);
+
+ if (signal_mem_sync) {
+ /* If we're signaling a memory object, we have to ensure that
+ * vkQueueSubmit does not return until the kernel submission has
+ * happened. Otherwise, we may get a race between this process
+ * and whatever is going to wait on the object where the other
+ * process may wait before we've submitted our work. Drain the
+ * queue now to avoid this. It's the responsibility of the caller
+ * to ensure that any vkQueueSubmit which signals a memory object
+ * has fully resolved dependencies.
+ */
+ result = vk_queue_drain(queue);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ return VK_SUCCESS;
+
+ case VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND:
+ unreachable("Invalid vk_queue::submit.mode");
+ }
+ unreachable("Invalid submit mode");
+
+fail:
+ vk_queue_submit_destroy(queue, submit);
+ return result;
+}
+
+VkResult
+vk_queue_wait_before_present(struct vk_queue *queue,
+ const VkPresentInfoKHR *pPresentInfo)
+{
+ if (vk_device_is_lost(queue->base.device))
+ return VK_ERROR_DEVICE_LOST;
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-vkQueuePresentKHR-pWaitSemaphores-03268
+ *
+ * "All elements of the pWaitSemaphores member of pPresentInfo must
+ * reference a semaphore signal operation that has been submitted for
+ * execution and any semaphore signal operations on which it depends (if
+ * any) must have also been submitted for execution."
+ *
+ * As with vkQueueSubmit above, we need to ensure that any binary
+ * semaphores we use in this present actually exist. If we don't have
+ * timeline semaphores, this is a non-issue. If they're emulated, then
+ * this is ensured for us by the vk_device_flush() at the end of every
+ * vkQueueSubmit() and every vkSignalSemaphore(). For real timeline
+ * semaphores, however, we need to do a wait. Thanks to the above bit of
+ * spec text, that wait should never block for long.
+ */
+ if (!vk_device_supports_threaded_submit(queue->base.device))
+ return VK_SUCCESS;
+
+ const uint32_t wait_count = pPresentInfo->waitSemaphoreCount;
+
+ if (wait_count == 0)
+ return VK_SUCCESS;
+
+ STACK_ARRAY(struct vk_sync_wait, waits, wait_count);
+
+ for (uint32_t i = 0; i < wait_count; i++) {
+ VK_FROM_HANDLE(vk_semaphore, semaphore,
+ pPresentInfo->pWaitSemaphores[i]);
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-vkQueuePresentKHR-pWaitSemaphores-03267
+ *
+ * "All elements of the pWaitSemaphores member of pPresentInfo must
+ * be created with a VkSemaphoreType of VK_SEMAPHORE_TYPE_BINARY."
+ */
+ assert(semaphore->type == VK_SEMAPHORE_TYPE_BINARY);
+
+ waits[i] = (struct vk_sync_wait) {
+ .sync = vk_semaphore_get_active_sync(semaphore),
+ .stage_mask = ~(VkPipelineStageFlags2)0,
+ };
+ }
+
+ VkResult result = vk_sync_wait_many(queue->base.device, wait_count, waits,
+ VK_SYNC_WAIT_PENDING, UINT64_MAX);
+
+ STACK_ARRAY_FINISH(waits);
+
+ /* Check again, just in case */
+ if (vk_device_is_lost(queue->base.device))
+ return VK_ERROR_DEVICE_LOST;
+
+ return result;
+}
+
+static VkResult
+vk_queue_signal_sync(struct vk_queue *queue,
+ struct vk_sync *sync,
+ uint32_t signal_value)
+{
+ struct vk_queue_submit *submit = vk_queue_submit_alloc(queue, 0, 0, 0, 0, 0,
+ 0, 0, 1, NULL, NULL);
+ if (unlikely(submit == NULL))
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ submit->signals[0] = (struct vk_sync_signal) {
+ .sync = sync,
+ .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
+ .signal_value = signal_value,
+ };
+
+ VkResult result;
+ switch (queue->submit.mode) {
+ case VK_QUEUE_SUBMIT_MODE_IMMEDIATE:
+ result = vk_queue_submit_final(queue, submit);
+ vk_queue_submit_destroy(queue, submit);
+ return result;
+
+ case VK_QUEUE_SUBMIT_MODE_DEFERRED:
+ vk_queue_push_submit(queue, submit);
+ return vk_device_flush(queue->base.device);
+
+ case VK_QUEUE_SUBMIT_MODE_THREADED:
+ vk_queue_push_submit(queue, submit);
+ return VK_SUCCESS;
+
+ case VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND:
+ unreachable("Invalid vk_queue::submit.mode");
+ }
+ unreachable("Invalid timeline mode");
+}
+
+void
+vk_queue_finish(struct vk_queue *queue)
+{
+ if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED)
+ vk_queue_stop_submit_thread(queue);
+
+ while (!list_is_empty(&queue->submit.submits)) {
+ assert(vk_device_is_lost_no_report(queue->base.device));
+
+ struct vk_queue_submit *submit =
+ list_first_entry(&queue->submit.submits,
+ struct vk_queue_submit, link);
+
+ list_del(&submit->link);
+ vk_queue_submit_destroy(queue, submit);
+ }
+
+#if DETECT_OS_ANDROID
+ if (queue->anb_semaphore != VK_NULL_HANDLE) {
+ struct vk_device *device = queue->base.device;
+ device->dispatch_table.DestroySemaphore(vk_device_to_handle(device),
+ queue->anb_semaphore, NULL);
+ }
+#endif
+
+ cnd_destroy(&queue->submit.pop);
+ cnd_destroy(&queue->submit.push);
+ mtx_destroy(&queue->submit.mutex);
+
+ util_dynarray_fini(&queue->labels);
+ list_del(&queue->link);
+ vk_object_base_finish(&queue->base);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_QueueSubmit2(VkQueue _queue,
+ uint32_t submitCount,
+ const VkSubmitInfo2 *pSubmits,
+ VkFence _fence)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+ VK_FROM_HANDLE(vk_fence, fence, _fence);
+
+ if (vk_device_is_lost(queue->base.device))
+ return VK_ERROR_DEVICE_LOST;
+
+ if (submitCount == 0) {
+ if (fence == NULL) {
+ return VK_SUCCESS;
+ } else {
+ return vk_queue_signal_sync(queue, vk_fence_get_active_sync(fence), 0);
+ }
+ }
+
+ for (uint32_t i = 0; i < submitCount; i++) {
+ struct vulkan_submit_info info = {
+ .pNext = pSubmits[i].pNext,
+ .command_buffer_count = pSubmits[i].commandBufferInfoCount,
+ .command_buffers = pSubmits[i].pCommandBufferInfos,
+ .wait_count = pSubmits[i].waitSemaphoreInfoCount,
+ .waits = pSubmits[i].pWaitSemaphoreInfos,
+ .signal_count = pSubmits[i].signalSemaphoreInfoCount,
+ .signals = pSubmits[i].pSignalSemaphoreInfos,
+ .fence = i == submitCount - 1 ? fence : NULL
+ };
+ VkResult result = vk_queue_submit(queue, &info);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_QueueBindSparse(VkQueue _queue,
+ uint32_t bindInfoCount,
+ const VkBindSparseInfo *pBindInfo,
+ VkFence _fence)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+ VK_FROM_HANDLE(vk_fence, fence, _fence);
+
+ if (vk_device_is_lost(queue->base.device))
+ return VK_ERROR_DEVICE_LOST;
+
+ if (bindInfoCount == 0) {
+ if (fence == NULL) {
+ return VK_SUCCESS;
+ } else {
+ return vk_queue_signal_sync(queue, vk_fence_get_active_sync(fence), 0);
+ }
+ }
+
+ for (uint32_t i = 0; i < bindInfoCount; i++) {
+ const VkTimelineSemaphoreSubmitInfo *timeline_info =
+ vk_find_struct_const(pBindInfo[i].pNext, TIMELINE_SEMAPHORE_SUBMIT_INFO);
+ const uint64_t *wait_values = NULL;
+ const uint64_t *signal_values = NULL;
+
+ if (timeline_info && timeline_info->waitSemaphoreValueCount) {
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkBindSparseInfo-pNext-03248
+ *
+ * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
+ * and any element of pSignalSemaphores was created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
+ * signalSemaphoreCount"
+ */
+ assert(timeline_info->waitSemaphoreValueCount == pBindInfo[i].waitSemaphoreCount);
+ wait_values = timeline_info->pWaitSemaphoreValues;
+ }
+
+ if (timeline_info && timeline_info->signalSemaphoreValueCount) {
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkBindSparseInfo-pNext-03247
+ *
+ * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
+ * and any element of pWaitSemaphores was created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_TIMELINE, then its waitSemaphoreValueCount member must equal
+ * waitSemaphoreCount"
+ */
+ assert(timeline_info->signalSemaphoreValueCount == pBindInfo[i].signalSemaphoreCount);
+ signal_values = timeline_info->pSignalSemaphoreValues;
+ }
+
+ STACK_ARRAY(VkSemaphoreSubmitInfo, wait_semaphore_infos,
+ pBindInfo[i].waitSemaphoreCount);
+ STACK_ARRAY(VkSemaphoreSubmitInfo, signal_semaphore_infos,
+ pBindInfo[i].signalSemaphoreCount);
+
+ if (!wait_semaphore_infos || !signal_semaphore_infos) {
+ STACK_ARRAY_FINISH(wait_semaphore_infos);
+ STACK_ARRAY_FINISH(signal_semaphore_infos);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ for (uint32_t j = 0; j < pBindInfo[i].waitSemaphoreCount; j++) {
+ wait_semaphore_infos[j] = (VkSemaphoreSubmitInfo) {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
+ .semaphore = pBindInfo[i].pWaitSemaphores[j],
+ .value = wait_values ? wait_values[j] : 0,
+ };
+ }
+
+ for (uint32_t j = 0; j < pBindInfo[i].signalSemaphoreCount; j++) {
+ signal_semaphore_infos[j] = (VkSemaphoreSubmitInfo) {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
+ .semaphore = pBindInfo[i].pSignalSemaphores[j],
+ .value = signal_values ? signal_values[j] : 0,
+ };
+ }
+ struct vulkan_submit_info info = {
+ .pNext = pBindInfo[i].pNext,
+ .wait_count = pBindInfo[i].waitSemaphoreCount,
+ .waits = wait_semaphore_infos,
+ .signal_count = pBindInfo[i].signalSemaphoreCount,
+ .signals = signal_semaphore_infos,
+ .buffer_bind_count = pBindInfo[i].bufferBindCount,
+ .buffer_binds = pBindInfo[i].pBufferBinds,
+ .image_opaque_bind_count = pBindInfo[i].imageOpaqueBindCount,
+ .image_opaque_binds = pBindInfo[i].pImageOpaqueBinds,
+ .image_bind_count = pBindInfo[i].imageBindCount,
+ .image_binds = pBindInfo[i].pImageBinds,
+ .fence = i == bindInfoCount - 1 ? fence : NULL
+ };
+ VkResult result = vk_queue_submit(queue, &info);
+
+ STACK_ARRAY_FINISH(wait_semaphore_infos);
+ STACK_ARRAY_FINISH(signal_semaphore_infos);
+
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+static const struct vk_sync_type *
+get_cpu_wait_type(struct vk_physical_device *pdevice)
+{
+ for (const struct vk_sync_type *const *t =
+ pdevice->supported_sync_types; *t; t++) {
+ if (((*t)->features & VK_SYNC_FEATURE_BINARY) &&
+ ((*t)->features & VK_SYNC_FEATURE_CPU_WAIT))
+ return *t;
+ }
+
+ unreachable("You must have a non-timeline CPU wait sync type");
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_QueueWaitIdle(VkQueue _queue)
+{
+ MESA_TRACE_FUNC();
+
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+ VkResult result;
+
+ if (vk_device_is_lost(queue->base.device))
+ return VK_ERROR_DEVICE_LOST;
+
+ const struct vk_sync_type *sync_type =
+ get_cpu_wait_type(queue->base.device->physical);
+
+ struct vk_sync *sync;
+ result = vk_sync_create(queue->base.device, sync_type, 0, 0, &sync);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ result = vk_queue_signal_sync(queue, sync, 0);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ result = vk_sync_wait(queue->base.device, sync, 0,
+ VK_SYNC_WAIT_COMPLETE, UINT64_MAX);
+
+ vk_sync_destroy(queue->base.device, sync);
+
+ VkResult device_status = vk_device_check_status(queue->base.device);
+ if (device_status != VK_SUCCESS)
+ return device_status;
+
+ return result;
+}
diff --git a/src/vulkan/runtime/vk_queue.h b/src/vulkan/runtime/vk_queue.h
new file mode 100644
index 00000000000..814f9fefcdd
--- /dev/null
+++ b/src/vulkan/runtime/vk_queue.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_QUEUE_H
+#define VK_QUEUE_H
+
+#include "vk_device.h"
+
+#include "c11/threads.h"
+
+#include "util/list.h"
+#include "util/u_dynarray.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_command_buffer;
+struct vk_queue_submit;
+struct vk_sync;
+struct vk_sync_wait;
+struct vk_sync_signal;
+struct vk_sync_timeline_point;
+
+struct vk_queue {
+ struct vk_object_base base;
+
+ /* Link in vk_device::queues */
+ struct list_head link;
+
+ /* VkDeviceQueueCreateInfo::flags */
+ VkDeviceQueueCreateFlags flags;
+
+ /* VkDeviceQueueCreateInfo::queueFamilyIndex */
+ uint32_t queue_family_index;
+
+ /* Which queue this is within the queue family */
+ uint32_t index_in_family;
+
+ /** Driver queue submit hook
+ *
+ * When using the common implementation of vkQueueSubmit(), this function
+ * is called to do the final submit to the kernel driver after all
+ * semaphore dependencies have been resolved. Depending on the timeline
+ * mode and application usage, this function may be called directly from
+ * the client thread on which vkQueueSubmit was called or from a runtime-
+ * managed submit thread. We do, however, guarantee that as long as the
+ * client follows the Vulkan threading rules, this function will never be
+ * called by the runtime concurrently on the same queue.
+ */
+ VkResult (*driver_submit)(struct vk_queue *queue,
+ struct vk_queue_submit *submit);
+
+ struct {
+ /** Current submit mode
+ *
+ * This represents the exact current submit mode for this specific queue
+ * which may be different from `vk_device::submit_mode`. In particular,
+ * this will never be `VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND`.
+ * Instead, when the device submit mode is
+ * `VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND`, the queue submit mode
+ * will be one of `VK_QUEUE_SUBMIT_MODE_THREADED` or
+ * `VK_QUEUE_SUBMIT_MODE_IMMEDIATE` depending on whether or not a submit
+ * thread is currently running for this queue. If the device submit
+ * mode is `VK_QUEUE_SUBMIT_MODE_DEFERRED`, every queue in the device
+ * will use `VK_QUEUE_SUBMIT_MODE_DEFERRED` because the deferred submit
+ * model depends on regular flushing instead of independent threads.
+ */
+ enum vk_queue_submit_mode mode;
+
+ mtx_t mutex;
+ cnd_t push;
+ cnd_t pop;
+
+ struct list_head submits;
+
+ bool thread_run;
+ thrd_t thread;
+ } submit;
+
+ struct {
+ /* Only set once atomically by the queue */
+ int lost;
+ int error_line;
+ const char *error_file;
+ char error_msg[80];
+ } _lost;
+
+ /**
+ * VK_EXT_debug_utils
+ *
+ * The next two fields represent debug labels storage.
+ *
+ * VK_EXT_debug_utils spec requires that upon triggering a debug message
+ * with a queue attached to it, all "active" labels will also be provided
+ * to the callback. The spec describes two distinct ways of attaching a
+ * debug label to the queue: opening a label region and inserting a single
+ * label.
+ *
+ * Label region is active between the corresponding `*BeginDebugUtilsLabel`
+ * and `*EndDebugUtilsLabel` calls. The spec doesn't mention any limits on
+ * nestedness of label regions. This implementation assumes that there
+ * aren't any.
+ *
+ * The spec, however, doesn't explain the lifetime of a label submitted by
+ * an `*InsertDebugUtilsLabel` call. The LunarG whitepaper [1] (pp 12-15)
+ * provides a more detailed explanation along with some examples. According
+ * to those, such label remains active until the next `*DebugUtilsLabel`
+ * call. This means that there can be no more than one such label at a
+ * time.
+ *
+ * ``labels`` contains all active labels at this point in order of
+ * submission ``region_begin`` denotes whether the most recent label opens
+ * a new region If ``labels`` is empty ``region_begin`` must be true.
+ *
+ * Anytime we modify labels, we first check for ``region_begin``. If it's
+ * false, it means that the most recent label was submitted by
+ * `*InsertDebugUtilsLabel` and we need to remove it before doing anything
+ * else.
+ *
+ * See the discussion here:
+ * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10318#note_1061317
+ *
+ * [1] https://www.lunarg.com/wp-content/uploads/2018/05/Vulkan-Debug-Utils_05_18_v1.pdf
+ */
+ struct util_dynarray labels;
+ bool region_begin;
+
+#if DETECT_OS_ANDROID
+ /** SYNC_FD signal semaphore for vkQueueSignalReleaseImageANDROID
+ *
+ * VK_ANDROID_native_buffer enforces explicit fencing on the present api
+ * boundary. To avoid assuming all waitSemaphores exportable to sync file
+ * and to capture pending cmds in the queue, we do a simple submission and
+ * signal a SYNC_FD handle type external sempahore for native fence export.
+ *
+ * This plays the same role as wsi_swapchain::dma_buf_semaphore for WSI.
+ * The VK_ANDROID_native_buffer spec hides the swapchain object from the
+ * icd, so we have to cache the semaphore in common vk_queue.
+ *
+ * This also makes it easier to add additional cmds to prepare the wsi
+ * image for implementations requiring such (e.g. for layout transition).
+ */
+ VkSemaphore anb_semaphore;
+#endif
+};
+
+VK_DEFINE_HANDLE_CASTS(vk_queue, base, VkQueue, VK_OBJECT_TYPE_QUEUE)
+
+VkResult MUST_CHECK
+vk_queue_init(struct vk_queue *queue, struct vk_device *device,
+ const VkDeviceQueueCreateInfo *pCreateInfo,
+ uint32_t index_in_family);
+
+void
+vk_queue_finish(struct vk_queue *queue);
+
+static inline bool
+vk_queue_is_empty(struct vk_queue *queue)
+{
+ return list_is_empty(&queue->submit.submits);
+}
+
+/** Enables threaded submit on this queue
+ *
+ * This should be called by the driver if it wants to be able to block inside
+ * `vk_queue::driver_submit`. Once this function has been called, the queue
+ * will always use a submit thread for all submissions. You must have called
+ * vk_device_enabled_threaded_submit() before calling this function.
+ */
+VkResult vk_queue_enable_submit_thread(struct vk_queue *queue);
+
+VkResult vk_queue_flush(struct vk_queue *queue, uint32_t *submit_count_out);
+
+VkResult vk_queue_wait_before_present(struct vk_queue *queue,
+ const VkPresentInfoKHR *pPresentInfo);
+
+VkResult PRINTFLIKE(4, 5)
+_vk_queue_set_lost(struct vk_queue *queue,
+ const char *file, int line,
+ const char *msg, ...);
+
+#define vk_queue_set_lost(queue, ...) \
+ _vk_queue_set_lost(queue, __FILE__, __LINE__, __VA_ARGS__)
+
+static inline bool
+vk_queue_is_lost(struct vk_queue *queue)
+{
+ return queue->_lost.lost;
+}
+
+#define vk_foreach_queue(queue, device) \
+ list_for_each_entry(struct vk_queue, queue, &(device)->queues, link)
+
+#define vk_foreach_queue_safe(queue, device) \
+ list_for_each_entry_safe(struct vk_queue, queue, &(device)->queues, link)
+
+struct vk_queue_submit {
+ struct list_head link;
+
+ uint32_t wait_count;
+ uint32_t command_buffer_count;
+ uint32_t signal_count;
+
+ uint32_t buffer_bind_count;
+ uint32_t image_opaque_bind_count;
+ uint32_t image_bind_count;
+
+ struct vk_sync_wait *waits;
+ struct vk_command_buffer **command_buffers;
+ struct vk_sync_signal *signals;
+
+ VkSparseBufferMemoryBindInfo *buffer_binds;
+ VkSparseImageOpaqueMemoryBindInfo *image_opaque_binds;
+ VkSparseImageMemoryBindInfo *image_binds;
+
+ uint32_t perf_pass_index;
+
+ /* Used internally; should be ignored by drivers */
+ struct vk_sync **_wait_temps;
+ struct vk_sync *_mem_signal_temp;
+ struct vk_sync_timeline_point **_wait_points;
+ struct vk_sync_timeline_point **_signal_points;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_QUEUE_H */
diff --git a/src/vulkan/runtime/vk_render_pass.c b/src/vulkan/runtime/vk_render_pass.c
new file mode 100644
index 00000000000..9eb69987383
--- /dev/null
+++ b/src/vulkan/runtime/vk_render_pass.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright © 2020 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_render_pass.h"
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_framebuffer.h"
+#include "vk_image.h"
+#include "vk_util.h"
+
+#include "util/log.h"
+
+static void
+translate_references(VkAttachmentReference2 **reference_ptr,
+ uint32_t reference_count,
+ const VkAttachmentReference *reference,
+ const VkRenderPassCreateInfo *pass_info,
+ bool is_input_attachment)
+{
+ VkAttachmentReference2 *reference2 = *reference_ptr;
+ *reference_ptr += reference_count;
+ for (uint32_t i = 0; i < reference_count; i++) {
+ reference2[i] = (VkAttachmentReference2) {
+ .sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,
+ .pNext = NULL,
+ .attachment = reference[i].attachment,
+ .layout = reference[i].layout,
+ };
+
+ if (is_input_attachment &&
+ reference2[i].attachment != VK_ATTACHMENT_UNUSED) {
+ assert(reference2[i].attachment < pass_info->attachmentCount);
+ const VkAttachmentDescription *att =
+ &pass_info->pAttachments[reference2[i].attachment];
+ reference2[i].aspectMask = vk_format_aspects(att->format);
+ }
+ }
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateRenderPass(VkDevice _device,
+ const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ uint32_t reference_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ reference_count += pCreateInfo->pSubpasses[i].inputAttachmentCount;
+ reference_count += pCreateInfo->pSubpasses[i].colorAttachmentCount;
+ if (pCreateInfo->pSubpasses[i].pResolveAttachments)
+ reference_count += pCreateInfo->pSubpasses[i].colorAttachmentCount;
+ if (pCreateInfo->pSubpasses[i].pDepthStencilAttachment)
+ reference_count += 1;
+ }
+
+ VK_MULTIALLOC(ma);
+ VK_MULTIALLOC_DECL(&ma, VkRenderPassCreateInfo2, create_info, 1);
+ VK_MULTIALLOC_DECL(&ma, VkSubpassDescription2, subpasses,
+ pCreateInfo->subpassCount);
+ VK_MULTIALLOC_DECL(&ma, VkAttachmentDescription2, attachments,
+ pCreateInfo->attachmentCount);
+ VK_MULTIALLOC_DECL(&ma, VkSubpassDependency2, dependencies,
+ pCreateInfo->dependencyCount);
+ VK_MULTIALLOC_DECL(&ma, VkAttachmentReference2, references,
+ reference_count);
+ if (!vk_multialloc_alloc2(&ma, &device->alloc, pAllocator,
+ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ VkAttachmentReference2 *reference_ptr = references;
+
+ const VkRenderPassMultiviewCreateInfo *multiview_info = NULL;
+ const VkRenderPassInputAttachmentAspectCreateInfo *aspect_info = NULL;
+ vk_foreach_struct_const(ext, pCreateInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
+ aspect_info = (const VkRenderPassInputAttachmentAspectCreateInfo *)ext;
+ /* We don't care about this information */
+ break;
+
+ case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+ multiview_info = (const VkRenderPassMultiviewCreateInfo*) ext;
+ break;
+
+ case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+ /* pass this through to CreateRenderPass2 */
+ break;
+
+ default:
+ mesa_logd("%s: ignored VkStructureType %u\n", __func__, ext->sType);
+ break;
+ }
+ }
+
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+ attachments[i] = (VkAttachmentDescription2) {
+ .sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2,
+ .pNext = NULL,
+ .flags = pCreateInfo->pAttachments[i].flags,
+ .format = pCreateInfo->pAttachments[i].format,
+ .samples = pCreateInfo->pAttachments[i].samples,
+ .loadOp = pCreateInfo->pAttachments[i].loadOp,
+ .storeOp = pCreateInfo->pAttachments[i].storeOp,
+ .stencilLoadOp = pCreateInfo->pAttachments[i].stencilLoadOp,
+ .stencilStoreOp = pCreateInfo->pAttachments[i].stencilStoreOp,
+ .initialLayout = pCreateInfo->pAttachments[i].initialLayout,
+ .finalLayout = pCreateInfo->pAttachments[i].finalLayout,
+ };
+ }
+
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ subpasses[i] = (VkSubpassDescription2) {
+ .sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2,
+ .pNext = NULL,
+ .flags = pCreateInfo->pSubpasses[i].flags,
+ .pipelineBindPoint = pCreateInfo->pSubpasses[i].pipelineBindPoint,
+ .viewMask = 0,
+ .inputAttachmentCount = pCreateInfo->pSubpasses[i].inputAttachmentCount,
+ .colorAttachmentCount = pCreateInfo->pSubpasses[i].colorAttachmentCount,
+ .preserveAttachmentCount = pCreateInfo->pSubpasses[i].preserveAttachmentCount,
+ .pPreserveAttachments = pCreateInfo->pSubpasses[i].pPreserveAttachments,
+ };
+
+ if (multiview_info && multiview_info->subpassCount) {
+ assert(multiview_info->subpassCount == pCreateInfo->subpassCount);
+ subpasses[i].viewMask = multiview_info->pViewMasks[i];
+ }
+
+ subpasses[i].pInputAttachments = reference_ptr;
+ translate_references(&reference_ptr,
+ subpasses[i].inputAttachmentCount,
+ pCreateInfo->pSubpasses[i].pInputAttachments,
+ pCreateInfo, true);
+ subpasses[i].pColorAttachments = reference_ptr;
+ translate_references(&reference_ptr,
+ subpasses[i].colorAttachmentCount,
+ pCreateInfo->pSubpasses[i].pColorAttachments,
+ pCreateInfo, false);
+ subpasses[i].pResolveAttachments = NULL;
+ if (pCreateInfo->pSubpasses[i].pResolveAttachments) {
+ subpasses[i].pResolveAttachments = reference_ptr;
+ translate_references(&reference_ptr,
+ subpasses[i].colorAttachmentCount,
+ pCreateInfo->pSubpasses[i].pResolveAttachments,
+ pCreateInfo, false);
+ }
+ subpasses[i].pDepthStencilAttachment = NULL;
+ if (pCreateInfo->pSubpasses[i].pDepthStencilAttachment) {
+ subpasses[i].pDepthStencilAttachment = reference_ptr;
+ translate_references(&reference_ptr, 1,
+ pCreateInfo->pSubpasses[i].pDepthStencilAttachment,
+ pCreateInfo, false);
+ }
+ }
+
+ assert(reference_ptr == references + reference_count);
+
+ if (aspect_info != NULL) {
+ for (uint32_t i = 0; i < aspect_info->aspectReferenceCount; i++) {
+ const VkInputAttachmentAspectReference *ref =
+ &aspect_info->pAspectReferences[i];
+
+ assert(ref->subpass < pCreateInfo->subpassCount);
+ VkSubpassDescription2 *subpass = &subpasses[ref->subpass];
+
+ assert(ref->inputAttachmentIndex < subpass->inputAttachmentCount);
+ VkAttachmentReference2 *att = (VkAttachmentReference2 *)
+ &subpass->pInputAttachments[ref->inputAttachmentIndex];
+
+ att->aspectMask = ref->aspectMask;
+ }
+ }
+
+ for (uint32_t i = 0; i < pCreateInfo->dependencyCount; i++) {
+ dependencies[i] = (VkSubpassDependency2) {
+ .sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
+ .pNext = NULL,
+ .srcSubpass = pCreateInfo->pDependencies[i].srcSubpass,
+ .dstSubpass = pCreateInfo->pDependencies[i].dstSubpass,
+ .srcStageMask = pCreateInfo->pDependencies[i].srcStageMask,
+ .dstStageMask = pCreateInfo->pDependencies[i].dstStageMask,
+ .srcAccessMask = pCreateInfo->pDependencies[i].srcAccessMask,
+ .dstAccessMask = pCreateInfo->pDependencies[i].dstAccessMask,
+ .dependencyFlags = pCreateInfo->pDependencies[i].dependencyFlags,
+ .viewOffset = 0,
+ };
+
+ if (multiview_info && multiview_info->dependencyCount) {
+ assert(multiview_info->dependencyCount == pCreateInfo->dependencyCount);
+ dependencies[i].viewOffset = multiview_info->pViewOffsets[i];
+ }
+ }
+
+ *create_info = (VkRenderPassCreateInfo2) {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2,
+ .pNext = pCreateInfo->pNext,
+ .flags = pCreateInfo->flags,
+ .attachmentCount = pCreateInfo->attachmentCount,
+ .pAttachments = attachments,
+ .subpassCount = pCreateInfo->subpassCount,
+ .pSubpasses = subpasses,
+ .dependencyCount = pCreateInfo->dependencyCount,
+ .pDependencies = dependencies,
+ };
+
+ if (multiview_info && multiview_info->correlationMaskCount > 0) {
+ create_info->correlatedViewMaskCount = multiview_info->correlationMaskCount;
+ create_info->pCorrelatedViewMasks = multiview_info->pCorrelationMasks;
+ }
+
+ VkResult result =
+ device->dispatch_table.CreateRenderPass2(_device, create_info,
+ pAllocator, pRenderPass);
+
+ vk_free2(&device->alloc, pAllocator, create_info);
+
+ return result;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents)
+{
+ /* We don't have a vk_command_buffer object but we can assume, since we're
+ * using common dispatch, that it's a vk_object of some sort.
+ */
+ struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
+
+ VkSubpassBeginInfo info = {
+ .sType = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO,
+ .contents = contents,
+ };
+
+ disp->device->dispatch_table.CmdBeginRenderPass2(commandBuffer,
+ pRenderPassBegin, &info);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdEndRenderPass(VkCommandBuffer commandBuffer)
+{
+ /* We don't have a vk_command_buffer object but we can assume, since we're
+ * using common dispatch, that it's a vk_object of some sort.
+ */
+ struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
+
+ VkSubpassEndInfo info = {
+ .sType = VK_STRUCTURE_TYPE_SUBPASS_END_INFO,
+ };
+
+ disp->device->dispatch_table.CmdEndRenderPass2(commandBuffer, &info);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdNextSubpass(VkCommandBuffer commandBuffer,
+ VkSubpassContents contents)
+{
+ /* We don't have a vk_command_buffer object but we can assume, since we're
+ * using common dispatch, that it's a vk_object of some sort.
+ */
+ struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
+
+ VkSubpassBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO,
+ .contents = contents,
+ };
+
+ VkSubpassEndInfo end_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBPASS_END_INFO,
+ };
+
+ disp->device->dispatch_table.CmdNextSubpass2(commandBuffer, &begin_info,
+ &end_info);
+}
+
+static unsigned
+num_subpass_attachments2(const VkSubpassDescription2 *desc)
+{
+ bool has_depth_stencil_attachment =
+ desc->pDepthStencilAttachment != NULL &&
+ desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED;
+
+ const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
+ vk_find_struct_const(desc->pNext,
+ SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE);
+
+ bool has_depth_stencil_resolve_attachment =
+ ds_resolve != NULL && ds_resolve->pDepthStencilResolveAttachment &&
+ ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED;
+
+ const VkFragmentShadingRateAttachmentInfoKHR *fsr_att_info =
+ vk_find_struct_const(desc->pNext,
+ FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR);
+
+ bool has_fragment_shading_rate_attachment =
+ fsr_att_info && fsr_att_info->pFragmentShadingRateAttachment &&
+ fsr_att_info->pFragmentShadingRateAttachment->attachment != VK_ATTACHMENT_UNUSED;
+
+ return desc->inputAttachmentCount +
+ desc->colorAttachmentCount +
+ (desc->pResolveAttachments ? desc->colorAttachmentCount : 0) +
+ has_depth_stencil_attachment +
+ has_depth_stencil_resolve_attachment +
+ has_fragment_shading_rate_attachment;
+}
+
+static void
+vk_render_pass_attachment_init(struct vk_render_pass_attachment *att,
+ const VkAttachmentDescription2 *desc)
+{
+ *att = (struct vk_render_pass_attachment) {
+ .format = desc->format,
+ .aspects = vk_format_aspects(desc->format),
+ .samples = desc->samples,
+ .view_mask = 0,
+ .load_op = desc->loadOp,
+ .store_op = desc->storeOp,
+ .stencil_load_op = desc->stencilLoadOp,
+ .stencil_store_op = desc->stencilStoreOp,
+ .initial_layout = desc->initialLayout,
+ .final_layout = desc->finalLayout,
+ .initial_stencil_layout = vk_att_desc_stencil_layout(desc, false),
+ .final_stencil_layout = vk_att_desc_stencil_layout(desc, true),
+ };
+}
+
+static void
+vk_subpass_attachment_init(struct vk_subpass_attachment *att,
+ struct vk_render_pass *pass,
+ uint32_t subpass_idx,
+ const VkAttachmentReference2 *ref,
+ const VkAttachmentDescription2 *attachments,
+ VkImageUsageFlagBits usage)
+{
+ if (ref->attachment >= pass->attachment_count) {
+ assert(ref->attachment == VK_ATTACHMENT_UNUSED);
+ *att = (struct vk_subpass_attachment) {
+ .attachment = VK_ATTACHMENT_UNUSED,
+ };
+ return;
+ }
+
+ struct vk_render_pass_attachment *pass_att =
+ &pass->attachments[ref->attachment];
+
+ *att = (struct vk_subpass_attachment) {
+ .attachment = ref->attachment,
+ .aspects = vk_format_aspects(pass_att->format),
+ .usage = usage,
+ .layout = ref->layout,
+ .stencil_layout = vk_att_ref_stencil_layout(ref, attachments),
+ };
+
+ switch (usage) {
+ case VK_IMAGE_USAGE_TRANSFER_DST_BIT:
+ break; /* No special aspect requirements */
+
+ case VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT:
+ /* From the Vulkan 1.2.184 spec:
+ *
+ * "aspectMask is ignored when this structure is used to describe
+ * anything other than an input attachment reference."
+ */
+ assert(!(ref->aspectMask & ~att->aspects));
+ att->aspects = ref->aspectMask;
+ break;
+
+ case VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT:
+ case VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR:
+ assert(att->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
+ break;
+
+ case VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT:
+ assert(!(att->aspects & ~(VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)));
+ break;
+
+ default:
+ unreachable("Invalid subpass attachment usage");
+ }
+}
+
+static void
+vk_subpass_attachment_link_resolve(struct vk_subpass_attachment *att,
+ struct vk_subpass_attachment *resolve,
+ const VkRenderPassCreateInfo2 *info)
+{
+ if (resolve->attachment == VK_ATTACHMENT_UNUSED)
+ return;
+
+ assert(att->attachment != VK_ATTACHMENT_UNUSED);
+ att->resolve = resolve;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateRenderPass2(VkDevice _device,
+ const VkRenderPassCreateInfo2 *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2);
+
+ VK_MULTIALLOC(ma);
+ VK_MULTIALLOC_DECL(&ma, struct vk_render_pass, pass, 1);
+ VK_MULTIALLOC_DECL(&ma, struct vk_render_pass_attachment, attachments,
+ pCreateInfo->attachmentCount);
+ VK_MULTIALLOC_DECL(&ma, struct vk_subpass, subpasses,
+ pCreateInfo->subpassCount);
+ VK_MULTIALLOC_DECL(&ma, struct vk_subpass_dependency, dependencies,
+ pCreateInfo->dependencyCount);
+
+ uint32_t subpass_attachment_count = 0;
+ uint32_t subpass_color_attachment_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+ subpass_attachment_count +=
+ num_subpass_attachments2(&pCreateInfo->pSubpasses[i]);
+ subpass_color_attachment_count +=
+ pCreateInfo->pSubpasses[i].colorAttachmentCount;
+ }
+ VK_MULTIALLOC_DECL(&ma, struct vk_subpass_attachment, subpass_attachments,
+ subpass_attachment_count);
+ VK_MULTIALLOC_DECL(&ma, VkFormat, subpass_color_formats,
+ subpass_color_attachment_count);
+ VK_MULTIALLOC_DECL(&ma, VkSampleCountFlagBits, subpass_color_samples,
+ subpass_color_attachment_count);
+
+ if (!vk_object_multizalloc(device, &ma, pAllocator,
+ VK_OBJECT_TYPE_RENDER_PASS))
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ pass->attachment_count = pCreateInfo->attachmentCount;
+ pass->attachments = attachments;
+ pass->subpass_count = pCreateInfo->subpassCount;
+ pass->subpasses = subpasses;
+ pass->dependency_count = pCreateInfo->dependencyCount;
+ pass->dependencies = dependencies;
+
+ for (uint32_t a = 0; a < pCreateInfo->attachmentCount; a++) {
+ vk_render_pass_attachment_init(&pass->attachments[a],
+ &pCreateInfo->pAttachments[a]);
+ }
+
+ struct vk_subpass_attachment *next_subpass_attachment = subpass_attachments;
+ VkFormat *next_subpass_color_format = subpass_color_formats;
+ VkSampleCountFlagBits *next_subpass_color_samples = subpass_color_samples;
+ for (uint32_t s = 0; s < pCreateInfo->subpassCount; s++) {
+ const VkSubpassDescription2 *desc = &pCreateInfo->pSubpasses[s];
+ struct vk_subpass *subpass = &pass->subpasses[s];
+ const VkMultisampledRenderToSingleSampledInfoEXT *mrtss =
+ vk_find_struct_const(desc->pNext, MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT);
+ if (mrtss && !mrtss->multisampledRenderToSingleSampledEnable)
+ mrtss = NULL;
+
+ subpass->attachment_count = num_subpass_attachments2(desc);
+ subpass->attachments = next_subpass_attachment;
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkRenderPassCreateInfo2-viewMask-03058
+ *
+ * "The VkSubpassDescription2::viewMask member of all elements of
+ * pSubpasses must either all be 0, or all not be 0"
+ */
+ if (desc->viewMask)
+ pass->is_multiview = true;
+ assert(pass->is_multiview == (desc->viewMask != 0));
+
+ /* For all view masks in the vk_render_pass data structure, we use a
+ * mask of 1 for non-multiview instead of a mask of 0.
+ */
+ subpass->view_mask = desc->viewMask ? desc->viewMask : 1;
+ pass->view_mask |= subpass->view_mask;
+
+ subpass->input_count = desc->inputAttachmentCount;
+ if (desc->inputAttachmentCount > 0) {
+ subpass->input_attachments = next_subpass_attachment;
+ next_subpass_attachment += desc->inputAttachmentCount;
+
+ for (uint32_t a = 0; a < desc->inputAttachmentCount; a++) {
+ vk_subpass_attachment_init(&subpass->input_attachments[a],
+ pass, s,
+ &desc->pInputAttachments[a],
+ pCreateInfo->pAttachments,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
+ }
+ }
+
+ subpass->color_count = desc->colorAttachmentCount;
+ if (desc->colorAttachmentCount > 0) {
+ subpass->color_attachments = next_subpass_attachment;
+ next_subpass_attachment += desc->colorAttachmentCount;
+
+ for (uint32_t a = 0; a < desc->colorAttachmentCount; a++) {
+ vk_subpass_attachment_init(&subpass->color_attachments[a],
+ pass, s,
+ &desc->pColorAttachments[a],
+ pCreateInfo->pAttachments,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+ }
+ }
+
+ if (desc->pResolveAttachments) {
+ subpass->color_resolve_count = desc->colorAttachmentCount;
+ subpass->color_resolve_attachments = next_subpass_attachment;
+ next_subpass_attachment += desc->colorAttachmentCount;
+
+ for (uint32_t a = 0; a < desc->colorAttachmentCount; a++) {
+ vk_subpass_attachment_init(&subpass->color_resolve_attachments[a],
+ pass, s,
+ &desc->pResolveAttachments[a],
+ pCreateInfo->pAttachments,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT);
+ vk_subpass_attachment_link_resolve(&subpass->color_attachments[a],
+ &subpass->color_resolve_attachments[a],
+ pCreateInfo);
+ }
+ }
+
+ if (desc->pDepthStencilAttachment &&
+ desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
+ subpass->depth_stencil_attachment = next_subpass_attachment++;
+
+ vk_subpass_attachment_init(subpass->depth_stencil_attachment,
+ pass, s,
+ desc->pDepthStencilAttachment,
+ pCreateInfo->pAttachments,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
+ }
+
+ const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
+ vk_find_struct_const(desc->pNext,
+ SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE);
+
+ if (ds_resolve) {
+ if (ds_resolve->pDepthStencilResolveAttachment &&
+ ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) {
+ subpass->depth_stencil_resolve_attachment = next_subpass_attachment++;
+
+ vk_subpass_attachment_init(subpass->depth_stencil_resolve_attachment,
+ pass, s,
+ ds_resolve->pDepthStencilResolveAttachment,
+ pCreateInfo->pAttachments,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT);
+ vk_subpass_attachment_link_resolve(subpass->depth_stencil_attachment,
+ subpass->depth_stencil_resolve_attachment,
+ pCreateInfo);
+ }
+ if (subpass->depth_stencil_resolve_attachment || mrtss) {
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178
+ *
+ * "If pDepthStencilResolveAttachment is not NULL and does not
+ * have the value VK_ATTACHMENT_UNUSED, depthResolveMode and
+ * stencilResolveMode must not both be VK_RESOLVE_MODE_NONE"
+ */
+ assert(ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE ||
+ ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE);
+
+ subpass->depth_resolve_mode = ds_resolve->depthResolveMode;
+ subpass->stencil_resolve_mode = ds_resolve->stencilResolveMode;
+ }
+ }
+
+ const VkFragmentShadingRateAttachmentInfoKHR *fsr_att_info =
+ vk_find_struct_const(desc->pNext,
+ FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR);
+
+ if (fsr_att_info && fsr_att_info->pFragmentShadingRateAttachment &&
+ fsr_att_info->pFragmentShadingRateAttachment->attachment != VK_ATTACHMENT_UNUSED) {
+ subpass->fragment_shading_rate_attachment = next_subpass_attachment++;
+ vk_subpass_attachment_init(subpass->fragment_shading_rate_attachment,
+ pass, s,
+ fsr_att_info->pFragmentShadingRateAttachment,
+ pCreateInfo->pAttachments,
+ VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR);
+ subpass->fragment_shading_rate_attachment_texel_size =
+ fsr_att_info->shadingRateAttachmentTexelSize;
+ subpass->pipeline_flags |=
+ VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR;
+ }
+
+ /* Figure out any self-dependencies */
+ assert(desc->colorAttachmentCount <= 32);
+ for (uint32_t a = 0; a < desc->inputAttachmentCount; a++) {
+ if (desc->pInputAttachments[a].attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ for (uint32_t c = 0; c < desc->colorAttachmentCount; c++) {
+ if (desc->pColorAttachments[c].attachment ==
+ desc->pInputAttachments[a].attachment) {
+ subpass->input_attachments[a].layout =
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
+ subpass->color_attachments[c].layout =
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
+ subpass->pipeline_flags |=
+ VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ }
+ }
+
+ if (desc->pDepthStencilAttachment != NULL &&
+ desc->pDepthStencilAttachment->attachment ==
+ desc->pInputAttachments[a].attachment) {
+ VkImageAspectFlags aspects =
+ subpass->input_attachments[a].aspects;
+ if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ subpass->input_attachments[a].layout =
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
+ subpass->depth_stencil_attachment->layout =
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
+ subpass->pipeline_flags |=
+ VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ }
+ if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ subpass->input_attachments[a].stencil_layout =
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
+ subpass->depth_stencil_attachment->stencil_layout =
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT;
+ subpass->pipeline_flags |=
+ VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+ }
+ }
+ }
+
+ VkFormat *color_formats = NULL;
+ VkSampleCountFlagBits *color_samples = NULL;
+ VkSampleCountFlagBits samples = 0;
+ if (desc->colorAttachmentCount > 0) {
+ color_formats = next_subpass_color_format;
+ color_samples = next_subpass_color_samples;
+ for (uint32_t a = 0; a < desc->colorAttachmentCount; a++) {
+ const VkAttachmentReference2 *ref = &desc->pColorAttachments[a];
+ if (ref->attachment >= pCreateInfo->attachmentCount) {
+ color_formats[a] = VK_FORMAT_UNDEFINED;
+ color_samples[a] = VK_SAMPLE_COUNT_1_BIT;
+ } else {
+ const VkAttachmentDescription2 *att =
+ &pCreateInfo->pAttachments[ref->attachment];
+
+ color_formats[a] = att->format;
+ color_samples[a] = att->samples;
+
+ samples |= att->samples;
+ }
+ }
+ next_subpass_color_format += desc->colorAttachmentCount;
+ next_subpass_color_samples += desc->colorAttachmentCount;
+ }
+
+ VkFormat depth_format = VK_FORMAT_UNDEFINED;
+ VkFormat stencil_format = VK_FORMAT_UNDEFINED;
+ VkSampleCountFlagBits depth_stencil_samples = VK_SAMPLE_COUNT_1_BIT;
+ if (desc->pDepthStencilAttachment != NULL) {
+ const VkAttachmentReference2 *ref = desc->pDepthStencilAttachment;
+ if (ref->attachment < pCreateInfo->attachmentCount) {
+ const VkAttachmentDescription2 *att =
+ &pCreateInfo->pAttachments[ref->attachment];
+
+ if (vk_format_has_depth(att->format))
+ depth_format = att->format;
+ if (vk_format_has_stencil(att->format))
+ stencil_format = att->format;
+
+ depth_stencil_samples = att->samples;
+
+ samples |= att->samples;
+ }
+ }
+
+ subpass->sample_count_info_amd = (VkAttachmentSampleCountInfoAMD) {
+ .sType = VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD,
+ .pNext = NULL,
+ .colorAttachmentCount = desc->colorAttachmentCount,
+ .pColorAttachmentSamples = color_samples,
+ .depthStencilAttachmentSamples = depth_stencil_samples,
+ };
+
+ subpass->pipeline_info = (VkPipelineRenderingCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,
+ .pNext = &subpass->sample_count_info_amd,
+ .viewMask = desc->viewMask,
+ .colorAttachmentCount = desc->colorAttachmentCount,
+ .pColorAttachmentFormats = color_formats,
+ .depthAttachmentFormat = depth_format,
+ .stencilAttachmentFormat = stencil_format,
+ };
+
+ subpass->inheritance_info = (VkCommandBufferInheritanceRenderingInfo) {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO,
+ .pNext = &subpass->sample_count_info_amd,
+ /* If we're inheriting, the contents are clearly in secondaries */
+ .flags = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT,
+ .viewMask = desc->viewMask,
+ .colorAttachmentCount = desc->colorAttachmentCount,
+ .pColorAttachmentFormats = color_formats,
+ .depthAttachmentFormat = depth_format,
+ .stencilAttachmentFormat = stencil_format,
+ .rasterizationSamples = samples,
+ };
+
+ if (mrtss) {
+ assert(mrtss->multisampledRenderToSingleSampledEnable);
+ subpass->mrtss = (VkMultisampledRenderToSingleSampledInfoEXT) {
+ .sType = VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT,
+ .multisampledRenderToSingleSampledEnable = VK_TRUE,
+ .rasterizationSamples = mrtss->rasterizationSamples,
+ };
+ }
+ }
+ assert(next_subpass_attachment ==
+ subpass_attachments + subpass_attachment_count);
+ assert(next_subpass_color_format ==
+ subpass_color_formats + subpass_color_attachment_count);
+ assert(next_subpass_color_samples ==
+ subpass_color_samples + subpass_color_attachment_count);
+
+ /* Walk backwards over the subpasses to compute view masks and
+ * last_subpass masks for all attachments.
+ */
+ for (uint32_t s = 0; s < pCreateInfo->subpassCount; s++) {
+ struct vk_subpass *subpass =
+ &pass->subpasses[(pCreateInfo->subpassCount - 1) - s];
+
+ /* First, compute last_subpass for all the attachments */
+ for (uint32_t a = 0; a < subpass->attachment_count; a++) {
+ struct vk_subpass_attachment *att = &subpass->attachments[a];
+ if (att->attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ assert(att->attachment < pass->attachment_count);
+ const struct vk_render_pass_attachment *pass_att =
+ &pass->attachments[att->attachment];
+
+ att->last_subpass = subpass->view_mask & ~pass_att->view_mask;
+ }
+
+ /* Then compute pass_att->view_mask. We do the two separately so that
+ * we end up with the right last_subpass even if the same attachment is
+ * used twice within a subpass.
+ */
+ for (uint32_t a = 0; a < subpass->attachment_count; a++) {
+ const struct vk_subpass_attachment *att = &subpass->attachments[a];
+ if (att->attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ assert(att->attachment < pass->attachment_count);
+ struct vk_render_pass_attachment *pass_att =
+ &pass->attachments[att->attachment];
+
+ pass_att->view_mask |= subpass->view_mask;
+ }
+ }
+
+ pass->dependency_count = pCreateInfo->dependencyCount;
+ for (uint32_t d = 0; d < pCreateInfo->dependencyCount; d++) {
+ const VkSubpassDependency2 *dep = &pCreateInfo->pDependencies[d];
+
+ pass->dependencies[d] = (struct vk_subpass_dependency) {
+ .flags = dep->dependencyFlags,
+ .src_subpass = dep->srcSubpass,
+ .dst_subpass = dep->dstSubpass,
+ .src_stage_mask = (VkPipelineStageFlags2)dep->srcStageMask,
+ .dst_stage_mask = (VkPipelineStageFlags2)dep->dstStageMask,
+ .src_access_mask = (VkAccessFlags2)dep->srcAccessMask,
+ .dst_access_mask = (VkAccessFlags2)dep->dstAccessMask,
+ .view_offset = dep->viewOffset,
+ };
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * "If a VkMemoryBarrier2 is included in the pNext chain,
+ * srcStageMask, dstStageMask, srcAccessMask, and dstAccessMask
+ * parameters are ignored. The synchronization and access scopes
+ * instead are defined by the parameters of VkMemoryBarrier2."
+ */
+ const VkMemoryBarrier2 *barrier =
+ vk_find_struct_const(dep->pNext, MEMORY_BARRIER_2);
+ if (barrier != NULL) {
+ pass->dependencies[d].src_stage_mask = barrier->srcStageMask;
+ pass->dependencies[d].dst_stage_mask = barrier->dstStageMask;
+ pass->dependencies[d].src_access_mask = barrier->srcAccessMask;
+ pass->dependencies[d].dst_access_mask = barrier->dstAccessMask;
+ }
+ }
+
+ const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT);
+ if (fdm_info) {
+ pass->fragment_density_map = fdm_info->fragmentDensityMapAttachment;
+ } else {
+ pass->fragment_density_map.attachment = VK_ATTACHMENT_UNUSED;
+ pass->fragment_density_map.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+
+ *pRenderPass = vk_render_pass_to_handle(pass);
+
+ return VK_SUCCESS;
+}
+
+const VkPipelineRenderingCreateInfo *
+vk_get_pipeline_rendering_create_info(const VkGraphicsPipelineCreateInfo *info)
+{
+ VK_FROM_HANDLE(vk_render_pass, render_pass, info->renderPass);
+ if (render_pass != NULL) {
+ assert(info->subpass < render_pass->subpass_count);
+ return &render_pass->subpasses[info->subpass].pipeline_info;
+ }
+
+ return vk_find_struct_const(info->pNext, PIPELINE_RENDERING_CREATE_INFO);
+}
+
+VkPipelineCreateFlags2KHR
+vk_get_pipeline_rendering_flags(const VkGraphicsPipelineCreateInfo *info)
+{
+ VkPipelineCreateFlags2KHR rendering_flags = 0;
+
+ VK_FROM_HANDLE(vk_render_pass, render_pass, info->renderPass);
+ if (render_pass != NULL) {
+ rendering_flags |= render_pass->subpasses[info->subpass].pipeline_flags;
+ if (render_pass->fragment_density_map.attachment != VK_ATTACHMENT_UNUSED)
+ rendering_flags |=
+ VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT;
+ }
+
+ return rendering_flags;
+}
+
+const VkAttachmentSampleCountInfoAMD *
+vk_get_pipeline_sample_count_info_amd(const VkGraphicsPipelineCreateInfo *info)
+{
+ VK_FROM_HANDLE(vk_render_pass, render_pass, info->renderPass);
+ if (render_pass != NULL) {
+ assert(info->subpass < render_pass->subpass_count);
+ return &render_pass->subpasses[info->subpass].sample_count_info_amd;
+ }
+
+ return vk_find_struct_const(info->pNext, ATTACHMENT_SAMPLE_COUNT_INFO_AMD);
+}
+
+const VkCommandBufferInheritanceRenderingInfo *
+vk_get_command_buffer_inheritance_rendering_info(
+ VkCommandBufferLevel level,
+ const VkCommandBufferBeginInfo *pBeginInfo)
+{
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT specifies that a
+ * secondary command buffer is considered to be entirely inside a render
+ * pass. If this is a primary command buffer, then this bit is ignored."
+ *
+ * Since we're only concerned with the continue case here, we can ignore
+ * any primary command buffers.
+ */
+ if (level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ return NULL;
+
+ if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))
+ return NULL;
+
+ const VkCommandBufferInheritanceInfo *inheritance =
+ pBeginInfo->pInheritanceInfo;
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * "If VkCommandBufferInheritanceInfo::renderPass is not VK_NULL_HANDLE,
+ * or VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT is not specified
+ * in VkCommandBufferBeginInfo::flags, parameters of this structure are
+ * ignored."
+ *
+ * If we have a render pass that wins, even if a
+ * VkCommandBufferInheritanceRenderingInfo struct is included in the pNext
+ * chain.
+ */
+ VK_FROM_HANDLE(vk_render_pass, render_pass, inheritance->renderPass);
+ if (render_pass != NULL) {
+ assert(inheritance->subpass < render_pass->subpass_count);
+ return &render_pass->subpasses[inheritance->subpass].inheritance_info;
+ }
+
+ return vk_find_struct_const(inheritance->pNext,
+ COMMAND_BUFFER_INHERITANCE_RENDERING_INFO);
+}
+
+const VkRenderingInfo *
+vk_get_command_buffer_inheritance_as_rendering_resume(
+ VkCommandBufferLevel level,
+ const VkCommandBufferBeginInfo *pBeginInfo,
+ void *stack_data)
+{
+ struct vk_gcbiarr_data *data = stack_data;
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT specifies that a
+ * secondary command buffer is considered to be entirely inside a render
+ * pass. If this is a primary command buffer, then this bit is ignored."
+ *
+ * Since we're only concerned with the continue case here, we can ignore
+ * any primary command buffers.
+ */
+ if (level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ return NULL;
+
+ if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))
+ return NULL;
+
+ const VkCommandBufferInheritanceInfo *inheritance =
+ pBeginInfo->pInheritanceInfo;
+
+ VK_FROM_HANDLE(vk_render_pass, pass, inheritance->renderPass);
+ if (pass == NULL)
+ return NULL;
+
+ assert(inheritance->subpass < pass->subpass_count);
+ const struct vk_subpass *subpass = &pass->subpasses[inheritance->subpass];
+
+ VK_FROM_HANDLE(vk_framebuffer, fb, inheritance->framebuffer);
+ if (fb == NULL || (fb->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT))
+ return NULL;
+
+ data->rendering = (VkRenderingInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
+ .flags = VK_RENDERING_RESUMING_BIT,
+ .renderArea = {
+ .offset = { 0, 0 },
+ .extent = { fb->width, fb->height },
+ },
+ .layerCount = fb->layers,
+ .viewMask = pass->is_multiview ? subpass->view_mask : 0,
+ };
+
+ VkRenderingAttachmentInfo *attachments = data->attachments;
+
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ const struct vk_subpass_attachment *sp_att =
+ &subpass->color_attachments[i];
+ if (sp_att->attachment == VK_ATTACHMENT_UNUSED) {
+ attachments[i] = (VkRenderingAttachmentInfo) {
+ .imageView = VK_NULL_HANDLE,
+ };
+ continue;
+ }
+
+ assert(sp_att->attachment < pass->attachment_count);
+ attachments[i] = (VkRenderingAttachmentInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = fb->attachments[sp_att->attachment],
+ .imageLayout = sp_att->layout,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ };
+ }
+ data->rendering.colorAttachmentCount = subpass->color_count;
+ data->rendering.pColorAttachments = attachments;
+ attachments += subpass->color_count;
+
+ if (subpass->depth_stencil_attachment) {
+ const struct vk_subpass_attachment *sp_att =
+ subpass->depth_stencil_attachment;
+ assert(sp_att->attachment < pass->attachment_count);
+
+ VK_FROM_HANDLE(vk_image_view, iview, fb->attachments[sp_att->attachment]);
+ if (iview->image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ *attachments = (VkRenderingAttachmentInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = vk_image_view_to_handle(iview),
+ .imageLayout = sp_att->layout,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ };
+ data->rendering.pDepthAttachment = attachments++;
+ }
+
+ if (iview->image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ *attachments = (VkRenderingAttachmentInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = vk_image_view_to_handle(iview),
+ .imageLayout = sp_att->stencil_layout,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ };
+ data->rendering.pStencilAttachment = attachments++;
+ }
+ }
+
+ if (subpass->fragment_shading_rate_attachment) {
+ const struct vk_subpass_attachment *sp_att =
+ subpass->fragment_shading_rate_attachment;
+ assert(sp_att->attachment < pass->attachment_count);
+
+ data->fsr_att = (VkRenderingFragmentShadingRateAttachmentInfoKHR) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR,
+ .imageView = fb->attachments[sp_att->attachment],
+ .imageLayout = sp_att->layout,
+ .shadingRateAttachmentTexelSize =
+ subpass->fragment_shading_rate_attachment_texel_size,
+ };
+ __vk_append_struct(&data->rendering, &data->fsr_att);
+ }
+
+ /* Append this one last because it lives in the subpass and we don't want
+ * to be changed by appending other structures later.
+ */
+ if (subpass->mrtss.multisampledRenderToSingleSampledEnable)
+ __vk_append_struct(&data->rendering, (void *)&subpass->mrtss);
+
+ return &data->rendering;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyRenderPass(VkDevice _device,
+ VkRenderPass renderPass,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_render_pass, pass, renderPass);
+
+ if (!pass)
+ return;
+
+ vk_object_free(device, pAllocator, pass);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetRenderAreaGranularity(VkDevice device,
+ VkRenderPass renderPass,
+ VkExtent2D *pGranularity)
+{
+ *pGranularity = (VkExtent2D){1, 1};
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetRenderingAreaGranularityKHR(
+ VkDevice _device, const VkRenderingAreaInfoKHR *pRenderingAreaInfo,
+ VkExtent2D *pGranularity)
+{
+ *pGranularity = (VkExtent2D) { 1, 1 };
+}
+
+static VkRenderPassSampleLocationsBeginInfoEXT *
+clone_rp_sample_locations(const VkRenderPassSampleLocationsBeginInfoEXT *loc)
+{
+ uint32_t sl_count = 0;
+
+ for (uint32_t i = 0; i < loc->attachmentInitialSampleLocationsCount; i++) {
+ const VkAttachmentSampleLocationsEXT *att_sl_in =
+ &loc->pAttachmentInitialSampleLocations[i];
+ sl_count += att_sl_in->sampleLocationsInfo.sampleLocationsCount;
+ }
+ for (uint32_t i = 0; i < loc->postSubpassSampleLocationsCount; i++) {
+ const VkSubpassSampleLocationsEXT *sp_sl_in =
+ &loc->pPostSubpassSampleLocations[i];
+ sl_count += sp_sl_in->sampleLocationsInfo.sampleLocationsCount;
+ }
+
+ VK_MULTIALLOC(ma);
+ VK_MULTIALLOC_DECL(&ma, VkRenderPassSampleLocationsBeginInfoEXT, new_loc, 1);
+ VK_MULTIALLOC_DECL(&ma, VkAttachmentSampleLocationsEXT, new_att_sl,
+ loc->attachmentInitialSampleLocationsCount);
+ VK_MULTIALLOC_DECL(&ma, VkSubpassSampleLocationsEXT, new_sp_sl,
+ loc->postSubpassSampleLocationsCount);
+ VK_MULTIALLOC_DECL(&ma, VkSampleLocationEXT, sl, sl_count);
+ if (!vk_multialloc_alloc(&ma, vk_default_allocator(),
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
+ return NULL;
+
+ VkSampleLocationEXT *next_sl = sl;
+ for (uint32_t i = 0; i < loc->attachmentInitialSampleLocationsCount; i++) {
+ const VkAttachmentSampleLocationsEXT *att_sl_in =
+ &loc->pAttachmentInitialSampleLocations[i];
+ const VkSampleLocationsInfoEXT *sli_in = &att_sl_in->sampleLocationsInfo;
+
+ typed_memcpy(next_sl, sli_in->pSampleLocations,
+ sli_in->sampleLocationsCount);
+
+ new_att_sl[i] = (VkAttachmentSampleLocationsEXT) {
+ .attachmentIndex = att_sl_in->attachmentIndex,
+ .sampleLocationsInfo = {
+ .sType = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT,
+ .sampleLocationsPerPixel = sli_in->sampleLocationsPerPixel,
+ .sampleLocationGridSize = sli_in->sampleLocationGridSize,
+ .sampleLocationsCount = sli_in->sampleLocationsCount,
+ .pSampleLocations = next_sl,
+ },
+ };
+
+ next_sl += sli_in->sampleLocationsCount;
+ }
+
+ for (uint32_t i = 0; i < loc->postSubpassSampleLocationsCount; i++) {
+ const VkSubpassSampleLocationsEXT *sp_sl_in =
+ &loc->pPostSubpassSampleLocations[i];
+ const VkSampleLocationsInfoEXT *sli_in = &sp_sl_in->sampleLocationsInfo;
+
+ typed_memcpy(next_sl, sli_in->pSampleLocations,
+ sli_in->sampleLocationsCount);
+
+ new_sp_sl[i] = (VkSubpassSampleLocationsEXT) {
+ .subpassIndex = sp_sl_in->subpassIndex,
+ .sampleLocationsInfo = {
+ .sType = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT,
+ .sampleLocationsPerPixel = sli_in->sampleLocationsPerPixel,
+ .sampleLocationGridSize = sli_in->sampleLocationGridSize,
+ .sampleLocationsCount = sli_in->sampleLocationsCount,
+ .pSampleLocations = next_sl,
+ },
+ };
+
+ next_sl += sli_in->sampleLocationsCount;
+ }
+
+ assert(next_sl == sl + sl_count);
+
+ *new_loc = (VkRenderPassSampleLocationsBeginInfoEXT) {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT,
+ .attachmentInitialSampleLocationsCount = loc->attachmentInitialSampleLocationsCount,
+ .pAttachmentInitialSampleLocations = new_att_sl,
+ .postSubpassSampleLocationsCount = loc->postSubpassSampleLocationsCount,
+ .pPostSubpassSampleLocations = new_sp_sl,
+ };
+
+ return new_loc;
+}
+
+static const VkSampleLocationsInfoEXT *
+get_subpass_sample_locations(const VkRenderPassSampleLocationsBeginInfoEXT *loc,
+ uint32_t subpass_idx)
+{
+ for (uint32_t i = 0; i < loc->postSubpassSampleLocationsCount; i++) {
+ if (loc->pPostSubpassSampleLocations[i].subpassIndex == subpass_idx)
+ return &loc->pPostSubpassSampleLocations[i].sampleLocationsInfo;
+ }
+
+ return NULL;
+}
+
+static bool
+vk_image_layout_supports_input_attachment(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_GENERAL:
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
+ case VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct stage_access {
+ VkPipelineStageFlagBits2 stages;
+ VkAccessFlagBits2 access;
+};
+
+static bool
+vk_image_layout_are_all_aspects_read_only(VkImageLayout layout,
+ VkImageAspectFlags aspects)
+{
+ u_foreach_bit(a, aspects) {
+ VkImageAspectFlagBits aspect = 1u << a;
+ if (!vk_image_layout_is_read_only(layout, aspect))
+ return false;
+ }
+ return true;
+}
+
+static struct stage_access
+stage_access_for_layout(VkImageLayout layout, VkImageAspectFlags aspects)
+{
+ VkPipelineStageFlagBits2 stages = 0;
+ VkAccessFlagBits2 access = 0;
+
+ if (vk_image_layout_supports_input_attachment(layout)) {
+ stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
+ access |= VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT;
+ }
+
+ if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ access |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+ if (!vk_image_layout_are_all_aspects_read_only(layout, aspects)) {
+ access |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+
+ /* It might be a resolve attachment */
+ stages |= VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT;
+ access |= VK_ACCESS_2_TRANSFER_WRITE_BIT;
+ }
+ } else {
+ /* Color */
+ if (!vk_image_layout_are_all_aspects_read_only(layout, aspects)) {
+ /* There are no read-only color attachments */
+ stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ access |= VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
+
+ /* It might be a resolve attachment */
+ stages |= VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT;
+ access |= VK_ACCESS_2_TRANSFER_WRITE_BIT;
+ }
+ }
+
+ return (struct stage_access) {
+ .stages = stages,
+ .access = access,
+ };
+}
+
+static void
+transition_image_range(const struct vk_image_view *image_view,
+ VkImageSubresourceRange range,
+ VkImageLayout old_layout,
+ VkImageLayout new_layout,
+ VkImageLayout old_stencil_layout,
+ VkImageLayout new_stencil_layout,
+ const VkSampleLocationsInfoEXT *sample_locations,
+ uint32_t *barrier_count,
+ uint32_t max_barrier_count,
+ VkImageMemoryBarrier2 *barriers)
+{
+ VkImageAspectFlags aspects_left = range.aspectMask;
+ while (aspects_left) {
+ range.aspectMask = aspects_left;
+
+ /* If we have a depth/stencil image and one of the layouts doesn't match
+ * between depth and stencil, we need two barriers. Restrict to depth
+ * and we'll pick up stencil on the next iteration.
+ */
+ if (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ (old_layout != old_stencil_layout ||
+ new_layout != new_stencil_layout))
+ range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+
+ if (range.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ /* We're down to a single aspect bit so this is going to be the last
+ * iteration and it's fine to stomp the input variables here.
+ */
+ old_layout = old_stencil_layout;
+ new_layout = new_stencil_layout;
+ }
+
+ if (new_layout != old_layout) {
+ /* We could go about carefully calculating every possible way the
+ * attachment may have been used in the render pass or we can break
+ * out the big hammer and throw in any stage and access flags
+ * possible for the given layouts.
+ */
+ struct stage_access src_sa, dst_sa;
+ src_sa = stage_access_for_layout(old_layout, range.aspectMask);
+ dst_sa = stage_access_for_layout(new_layout, range.aspectMask);
+
+ assert(*barrier_count < max_barrier_count);
+ barriers[(*barrier_count)++] = (VkImageMemoryBarrier2) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
+ .pNext = sample_locations,
+ .srcStageMask = src_sa.stages,
+ .srcAccessMask = src_sa.access,
+ .dstStageMask = dst_sa.stages,
+ .dstAccessMask = dst_sa.access,
+ .oldLayout = old_layout,
+ .newLayout = new_layout,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = vk_image_to_handle(image_view->image),
+ .subresourceRange = range,
+ };
+ }
+
+ aspects_left &= ~range.aspectMask;
+ }
+}
+
+static bool
+can_use_attachment_initial_layout(struct vk_command_buffer *cmd_buffer,
+ uint32_t att_idx,
+ uint32_t view_mask,
+ VkImageLayout *layout_out,
+ VkImageLayout *stencil_layout_out)
+{
+ const struct vk_render_pass *pass = cmd_buffer->render_pass;
+ const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
+ const struct vk_render_pass_attachment *rp_att = &pass->attachments[att_idx];
+ struct vk_attachment_state *att_state = &cmd_buffer->attachments[att_idx];
+ const struct vk_image_view *image_view = att_state->image_view;
+
+ if ((rp_att->aspects & ~VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ rp_att->load_op != VK_ATTACHMENT_LOAD_OP_CLEAR)
+ return false;
+
+ if ((rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ rp_att->stencil_load_op != VK_ATTACHMENT_LOAD_OP_CLEAR)
+ return false;
+
+ if (cmd_buffer->render_area.offset.x != 0 ||
+ cmd_buffer->render_area.offset.y != 0 ||
+ cmd_buffer->render_area.extent.width != image_view->extent.width ||
+ cmd_buffer->render_area.extent.height != image_view->extent.height)
+ return false;
+
+ if (image_view->image->image_type == VK_IMAGE_TYPE_3D) {
+ /* For 3D images, the view has to be the whole thing */
+ if (image_view->base_array_layer != 0)
+ return false;
+
+ if (pass->is_multiview) {
+ if (!util_is_power_of_two_or_zero(view_mask + 1) ||
+ util_last_bit(view_mask) != image_view->layer_count)
+ return false;
+ } else {
+ if (framebuffer->layers != image_view->layer_count)
+ return false;
+ }
+ }
+
+ /* Finally, check if the entire thing is undefined. It's ok to smash the
+ * view_mask now as the only thing using it will be the loop below.
+ */
+
+ /* 3D is stupidly special. See transition_attachment() */
+ if (image_view->image->image_type == VK_IMAGE_TYPE_3D)
+ view_mask = 1;
+
+ VkImageLayout layout = VK_IMAGE_LAYOUT_MAX_ENUM;
+ VkImageLayout stencil_layout = VK_IMAGE_LAYOUT_MAX_ENUM;
+
+ assert(view_mask != 0);
+ u_foreach_bit(view, view_mask) {
+ assert(view >= 0 && view < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT);
+ struct vk_attachment_view_state *att_view_state = &att_state->views[view];
+
+ if (rp_att->aspects & ~VK_IMAGE_ASPECT_STENCIL_BIT) {
+ if (layout == VK_IMAGE_LAYOUT_MAX_ENUM)
+ layout = att_view_state->layout;
+ else if (layout != att_view_state->layout)
+ return false;
+ }
+
+ if (rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ if (stencil_layout == VK_IMAGE_LAYOUT_MAX_ENUM)
+ stencil_layout = att_view_state->stencil_layout;
+ else if (stencil_layout != att_view_state->stencil_layout)
+ return false;
+ }
+ }
+
+ if (layout != VK_IMAGE_LAYOUT_MAX_ENUM)
+ *layout_out = layout;
+ else if (layout_out != NULL)
+ *layout_out = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ if (stencil_layout != VK_IMAGE_LAYOUT_MAX_ENUM)
+ *stencil_layout_out = stencil_layout;
+ else if (stencil_layout_out != NULL)
+ *stencil_layout_out = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ return true;
+}
+
+uint32_t
+vk_command_buffer_get_attachment_layout(const struct vk_command_buffer *cmd_buffer,
+ const struct vk_image *image,
+ VkImageLayout *out_layout,
+ VkImageLayout *out_stencil_layout)
+{
+ const struct vk_render_pass *render_pass = cmd_buffer->render_pass;
+ assert(render_pass != NULL);
+
+ const struct vk_subpass *subpass =
+ &render_pass->subpasses[cmd_buffer->subpass_idx];
+ int first_view = ffs(subpass->view_mask) - 1;
+
+ for (uint32_t a = 0; a < render_pass->attachment_count; a++) {
+ if (cmd_buffer->attachments[a].image_view->image == image) {
+ *out_layout = cmd_buffer->attachments[a].views[first_view].layout;
+ *out_stencil_layout =
+ cmd_buffer->attachments[a].views[first_view].stencil_layout;
+ return a;
+ }
+ }
+ unreachable("Image not found in attachments");
+}
+
+void
+vk_command_buffer_set_attachment_layout(struct vk_command_buffer *cmd_buffer,
+ uint32_t att_idx,
+ VkImageLayout layout,
+ VkImageLayout stencil_layout)
+{
+ const struct vk_render_pass *render_pass = cmd_buffer->render_pass;
+ const struct vk_subpass *subpass =
+ &render_pass->subpasses[cmd_buffer->subpass_idx];
+ uint32_t view_mask = subpass->view_mask;
+ struct vk_attachment_state *att_state = &cmd_buffer->attachments[att_idx];
+
+ u_foreach_bit(view, view_mask) {
+ assert(view >= 0 && view < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT);
+ struct vk_attachment_view_state *att_view_state = &att_state->views[view];
+
+ att_view_state->layout = layout;
+ att_view_state->stencil_layout = stencil_layout;
+ }
+}
+
+static void
+transition_attachment(struct vk_command_buffer *cmd_buffer,
+ uint32_t att_idx,
+ uint32_t view_mask,
+ VkImageLayout layout,
+ VkImageLayout stencil_layout,
+ uint32_t *barrier_count,
+ uint32_t max_barrier_count,
+ VkImageMemoryBarrier2 *barriers)
+{
+ const struct vk_render_pass *pass = cmd_buffer->render_pass;
+ const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
+ const struct vk_render_pass_attachment *pass_att =
+ &pass->attachments[att_idx];
+ struct vk_attachment_state *att_state = &cmd_buffer->attachments[att_idx];
+ const struct vk_image_view *image_view = att_state->image_view;
+
+ /* 3D is stupidly special. From the Vulkan 1.3.204 spec:
+ *
+ * "When the VkImageSubresourceRange structure is used to select a
+ * subset of the slices of a 3D image’s mip level in order to create
+ * a 2D or 2D array image view of a 3D image created with
+ * VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, baseArrayLayer and
+ * layerCount specify the first slice index and the number of slices
+ * to include in the created image view. Such an image view can be
+ * used as a framebuffer attachment that refers only to the specified
+ * range of slices of the selected mip level. However, any layout
+ * transitions performed on such an attachment view during a render
+ * pass instance still apply to the entire subresource referenced
+ * which includes all the slices of the selected mip level."
+ *
+ * To deal with this, we expand out the layer range to include the
+ * entire 3D image and treat them as having only a single view even when
+ * multiview is enabled. This later part means that we effectively only
+ * track one image layout for the entire attachment rather than one per
+ * view like we do for all the others.
+ */
+ if (image_view->image->image_type == VK_IMAGE_TYPE_3D)
+ view_mask = 1;
+
+ u_foreach_bit(view, view_mask) {
+ assert(view >= 0 && view < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT);
+ struct vk_attachment_view_state *att_view_state = &att_state->views[view];
+
+ /* First, check to see if we even need a transition */
+ if (att_view_state->layout == layout &&
+ att_view_state->stencil_layout == stencil_layout)
+ continue;
+
+ VkImageSubresourceRange range = {
+ .aspectMask = pass_att->aspects,
+ .baseMipLevel = image_view->base_mip_level,
+ .levelCount = 1,
+ };
+
+ /* From the Vulkan 1.3.207 spec:
+ *
+ * "Automatic layout transitions apply to the entire image
+ * subresource attached to the framebuffer. If multiview is not
+ * enabled and the attachment is a view of a 1D or 2D image, the
+ * automatic layout transitions apply to the number of layers
+ * specified by VkFramebufferCreateInfo::layers. If multiview is
+ * enabled and the attachment is a view of a 1D or 2D image, the
+ * automatic layout transitions apply to the layers corresponding to
+ * views which are used by some subpass in the render pass, even if
+ * that subpass does not reference the given attachment. If the
+ * attachment view is a 2D or 2D array view of a 3D image, even if
+ * the attachment view only refers to a subset of the slices of the
+ * selected mip level of the 3D image, automatic layout transitions
+ * apply to the entire subresource referenced which is the entire mip
+ * level in this case."
+ */
+ if (image_view->image->image_type == VK_IMAGE_TYPE_3D) {
+ assert(view == 0);
+ range.baseArrayLayer = 0;
+ range.layerCount = image_view->extent.depth;
+ } else if (pass->is_multiview) {
+ range.baseArrayLayer = image_view->base_array_layer + view;
+ range.layerCount = 1;
+ } else {
+ assert(view == 0);
+ range.baseArrayLayer = image_view->base_array_layer;
+ range.layerCount = framebuffer->layers;
+ }
+
+ transition_image_range(image_view, range,
+ att_view_state->layout, layout,
+ att_view_state->stencil_layout, stencil_layout,
+ att_view_state->sample_locations,
+ barrier_count, max_barrier_count, barriers);
+
+ att_view_state->layout = layout;
+ att_view_state->stencil_layout = stencil_layout;
+ }
+}
+
+static void
+load_attachment(struct vk_command_buffer *cmd_buffer,
+ uint32_t att_idx, uint32_t view_mask,
+ VkImageLayout layout, VkImageLayout stencil_layout)
+{
+ const struct vk_render_pass *pass = cmd_buffer->render_pass;
+ const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
+ const struct vk_render_pass_attachment *rp_att = &pass->attachments[att_idx];
+ struct vk_attachment_state *att_state = &cmd_buffer->attachments[att_idx];
+ struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ /* Don't load any views we've already loaded */
+ view_mask &= ~att_state->views_loaded;
+ if (view_mask == 0)
+ return;
+
+ /* From here on, if we return, we loaded the views */
+ att_state->views_loaded |= view_mask;
+
+ /* We only need to load/store if there's a clear */
+ bool need_load_store = false;
+ if ((rp_att->aspects & ~VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ rp_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
+ need_load_store = true;
+
+ if ((rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ rp_att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
+ need_load_store = true;
+
+ if (!need_load_store)
+ return;
+
+ const VkRenderingAttachmentInfo att = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = vk_image_view_to_handle(att_state->image_view),
+ .imageLayout = layout,
+ .loadOp = rp_att->load_op,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .clearValue = att_state->clear_value,
+ };
+
+ const VkRenderingAttachmentInfo stencil_att = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = vk_image_view_to_handle(att_state->image_view),
+ .imageLayout = stencil_layout,
+ .loadOp = rp_att->stencil_load_op,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .clearValue = att_state->clear_value,
+ };
+
+ VkRenderingInfo render = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
+ .renderArea = cmd_buffer->render_area,
+ .layerCount = pass->is_multiview ? 1 : framebuffer->layers,
+ .viewMask = pass->is_multiview ? view_mask : 0,
+ };
+
+ if (rp_att->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ if (rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
+ render.pDepthAttachment = &att;
+ if (rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
+ render.pStencilAttachment = &stencil_att;
+ } else {
+ render.colorAttachmentCount = 1;
+ render.pColorAttachments = &att;
+ }
+
+ disp->CmdBeginRendering(vk_command_buffer_to_handle(cmd_buffer), &render);
+ disp->CmdEndRendering(vk_command_buffer_to_handle(cmd_buffer));
+}
+
+static void
+begin_subpass(struct vk_command_buffer *cmd_buffer,
+ const VkSubpassBeginInfo *begin_info)
+{
+ const struct vk_render_pass *pass = cmd_buffer->render_pass;
+ const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
+ const uint32_t subpass_idx = cmd_buffer->subpass_idx;
+ assert(subpass_idx < pass->subpass_count);
+ const struct vk_subpass *subpass = &pass->subpasses[subpass_idx];
+ struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ /* First, we figure out all our attachments and attempt to handle image
+ * layout transitions and load ops as part of vkCmdBeginRendering if we
+ * can. For any we can't handle this way, we'll need explicit barriers
+ * or quick vkCmdBegin/EndRendering to do the load op.
+ */
+
+ STACK_ARRAY(VkRenderingAttachmentInfo, color_attachments,
+ subpass->color_count);
+ STACK_ARRAY(VkRenderingAttachmentInitialLayoutInfoMESA,
+ color_attachment_initial_layouts,
+ subpass->color_count);
+
+ for (uint32_t i = 0; i < subpass->color_count; i++) {
+ const struct vk_subpass_attachment *sp_att =
+ &subpass->color_attachments[i];
+ VkRenderingAttachmentInfo *color_attachment = &color_attachments[i];
+
+ if (sp_att->attachment == VK_ATTACHMENT_UNUSED) {
+ *color_attachment = (VkRenderingAttachmentInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = VK_NULL_HANDLE,
+ };
+ continue;
+ }
+
+ assert(sp_att->attachment < pass->attachment_count);
+ const struct vk_render_pass_attachment *rp_att =
+ &pass->attachments[sp_att->attachment];
+ struct vk_attachment_state *att_state =
+ &cmd_buffer->attachments[sp_att->attachment];
+
+ *color_attachment = (VkRenderingAttachmentInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ .imageView = vk_image_view_to_handle(att_state->image_view),
+ .imageLayout = sp_att->layout,
+ };
+
+ if (!(subpass->view_mask & att_state->views_loaded)) {
+ /* None of these views have been used before */
+ color_attachment->loadOp = rp_att->load_op;
+ color_attachment->clearValue = att_state->clear_value;
+ att_state->views_loaded |= subpass->view_mask;
+
+ VkImageLayout initial_layout;
+ if (can_use_attachment_initial_layout(cmd_buffer,
+ sp_att->attachment,
+ subpass->view_mask,
+ &initial_layout, NULL) &&
+ sp_att->layout != initial_layout) {
+ assert(color_attachment->loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);
+
+ VkRenderingAttachmentInitialLayoutInfoMESA *color_initial_layout =
+ &color_attachment_initial_layouts[i];
+ *color_initial_layout = (VkRenderingAttachmentInitialLayoutInfoMESA) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INITIAL_LAYOUT_INFO_MESA,
+ .initialLayout = initial_layout,
+ };
+ __vk_append_struct(color_attachment, color_initial_layout);
+
+ vk_command_buffer_set_attachment_layout(cmd_buffer,
+ sp_att->attachment,
+ sp_att->layout,
+ VK_IMAGE_LAYOUT_UNDEFINED);
+ }
+ } else {
+ /* We've seen at least one of the views of this attachment before so
+ * we need to LOAD_OP_LOAD.
+ */
+ color_attachment->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ }
+
+ if (!(subpass->view_mask & ~sp_att->last_subpass)) {
+ /* This is the last subpass for every view */
+ color_attachment->storeOp = rp_att->store_op;
+ } else {
+ /* For at least one of our views, this isn't the last subpass
+ *
+ * In the edge case where we have lots of weird overlap between view
+ * masks of different subThis may mean that we get STORE_OP_STORE in
+ * some places where it may have wanted STORE_OP_NONE but that should
+ * be harmless.
+ */
+ color_attachment->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ }
+
+ if (sp_att->resolve != NULL) {
+ assert(sp_att->resolve->attachment < pass->attachment_count);
+ struct vk_attachment_state *res_att_state =
+ &cmd_buffer->attachments[sp_att->resolve->attachment];
+
+ /* Resolve attachments are entirely overwritten by the resolve
+ * operation so the load op really doesn't matter. We can consider
+ * the resolve as being the load.
+ */
+ res_att_state->views_loaded |= subpass->view_mask;
+
+ if (vk_format_is_int(res_att_state->image_view->format))
+ color_attachment->resolveMode = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT;
+ else
+ color_attachment->resolveMode = VK_RESOLVE_MODE_AVERAGE_BIT;
+
+ color_attachment->resolveImageView =
+ vk_image_view_to_handle(res_att_state->image_view);
+ color_attachment->resolveImageLayout = sp_att->resolve->layout;
+ } else if (subpass->mrtss.multisampledRenderToSingleSampledEnable &&
+ rp_att->samples == VK_SAMPLE_COUNT_1_BIT) {
+ if (vk_format_is_int(att_state->image_view->format))
+ color_attachment->resolveMode = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT;
+ else
+ color_attachment->resolveMode = VK_RESOLVE_MODE_AVERAGE_BIT;
+ }
+ }
+
+ VkRenderingAttachmentInfo depth_attachment = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ };
+ VkRenderingAttachmentInfo stencil_attachment = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ };
+ VkRenderingAttachmentInitialLayoutInfoMESA depth_initial_layout = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INITIAL_LAYOUT_INFO_MESA,
+ };
+ VkRenderingAttachmentInitialLayoutInfoMESA stencil_initial_layout = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INITIAL_LAYOUT_INFO_MESA,
+ };
+
+ const VkSampleLocationsInfoEXT *sample_locations = NULL;
+ if (subpass->depth_stencil_attachment != NULL) {
+ const struct vk_subpass_attachment *sp_att =
+ subpass->depth_stencil_attachment;
+
+ assert(sp_att->attachment < pass->attachment_count);
+ const struct vk_render_pass_attachment *rp_att =
+ &pass->attachments[sp_att->attachment];
+ struct vk_attachment_state *att_state =
+ &cmd_buffer->attachments[sp_att->attachment];
+
+ assert(sp_att->aspects == rp_att->aspects);
+ if (rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ depth_attachment.imageView =
+ vk_image_view_to_handle(att_state->image_view);
+ depth_attachment.imageLayout = sp_att->layout;
+ }
+
+ if (rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ stencil_attachment.imageView =
+ vk_image_view_to_handle(att_state->image_view);
+ stencil_attachment.imageLayout = sp_att->stencil_layout;
+ }
+
+ if (!(subpass->view_mask & att_state->views_loaded)) {
+ /* None of these views have been used before */
+ depth_attachment.loadOp = rp_att->load_op;
+ depth_attachment.clearValue = att_state->clear_value;
+ stencil_attachment.loadOp = rp_att->stencil_load_op;
+ stencil_attachment.clearValue = att_state->clear_value;
+ att_state->views_loaded |= subpass->view_mask;
+
+ VkImageLayout initial_layout, initial_stencil_layout;
+ if (can_use_attachment_initial_layout(cmd_buffer,
+ sp_att->attachment,
+ subpass->view_mask,
+ &initial_layout,
+ &initial_stencil_layout)) {
+ if ((rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ sp_att->layout != initial_layout) {
+ assert(depth_attachment.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);
+ depth_initial_layout.initialLayout = initial_layout;
+ __vk_append_struct(&depth_attachment,
+ &depth_initial_layout);
+ }
+
+ if ((rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ sp_att->stencil_layout != initial_stencil_layout) {
+ assert(stencil_attachment.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);
+ stencil_initial_layout.initialLayout = initial_stencil_layout;
+ __vk_append_struct(&stencil_attachment,
+ &stencil_initial_layout);
+ }
+
+ vk_command_buffer_set_attachment_layout(cmd_buffer,
+ sp_att->attachment,
+ sp_att->layout,
+ sp_att->stencil_layout);
+ }
+ } else {
+ /* We've seen at least one of the views of this attachment before so
+ * we need to LOAD_OP_LOAD.
+ */
+ depth_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ stencil_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ }
+
+ if (!(subpass->view_mask & ~sp_att->last_subpass)) {
+ /* This is the last subpass for every view */
+ depth_attachment.storeOp = rp_att->store_op;
+ stencil_attachment.storeOp = rp_att->stencil_store_op;
+ } else {
+ /* For at least one of our views, this isn't the last subpass
+ *
+ * In the edge case where we have lots of weird overlap between view
+ * masks of different subThis may mean that we get STORE_OP_STORE in
+ * some places where it may have wanted STORE_OP_NONE but that should
+ * be harmless.
+ */
+ depth_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ stencil_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ }
+
+ /* From the Vulkan 1.3.212 spec:
+ *
+ * "If the current render pass does not use the attachment as a
+ * depth/stencil attachment in any subpass that happens-before, the
+ * automatic layout transition uses the sample locations state
+ * specified in the sampleLocationsInfo member of the element of the
+ * VkRenderPassSampleLocationsBeginInfoEXT::pAttachmentInitialSampleLocations
+ * array for which the attachmentIndex member equals the attachment
+ * index of the attachment, if one is specified. Otherwise, the
+ * automatic layout transition uses the sample locations state
+ * specified in the sampleLocationsInfo member of the element of the
+ * VkRenderPassSampleLocationsBeginInfoEXT::pPostSubpassSampleLocations
+ * array for which the subpassIndex member equals the index of the
+ * subpass that last used the attachment as a depth/stencil
+ * attachment, if one is specified."
+ *
+ * Unfortunately, this says nothing whatsoever about multiview.
+ * However, since multiview render passes are described as a single-view
+ * render pass repeated per-view, we assume this is per-view.
+ */
+ if (cmd_buffer->pass_sample_locations != NULL &&
+ (att_state->image_view->image->create_flags &
+ VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT)) {
+ sample_locations =
+ get_subpass_sample_locations(cmd_buffer->pass_sample_locations,
+ subpass_idx);
+
+ u_foreach_bit(view, subpass->view_mask)
+ att_state->views[view].sample_locations = sample_locations;
+ }
+
+ if (sp_att->resolve != NULL ||
+ (subpass->mrtss.multisampledRenderToSingleSampledEnable &&
+ rp_att->samples == VK_SAMPLE_COUNT_1_BIT)) {
+ const struct vk_subpass_attachment *res_sp_att = sp_att->resolve ? sp_att->resolve : sp_att;
+ assert(res_sp_att->attachment < pass->attachment_count);
+ const struct vk_render_pass_attachment *res_rp_att =
+ &pass->attachments[res_sp_att->attachment];
+ struct vk_attachment_state *res_att_state =
+ &cmd_buffer->attachments[res_sp_att->attachment];
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * "VkSubpassDescriptionDepthStencilResolve::depthResolveMode is
+ * ignored if the VkFormat of the pDepthStencilResolveAttachment
+ * does not have a depth component. Similarly,
+ * VkSubpassDescriptionDepthStencilResolve::stencilResolveMode is
+ * ignored if the VkFormat of the pDepthStencilResolveAttachment
+ * does not have a stencil component."
+ *
+ * TODO: Should we handle this here or when we create the render
+ * pass? Handling it here makes load ops "correct" in the sense
+ * that, if we resolve to the wrong aspect, we will still consider
+ * it bound and clear it if requested.
+ */
+ VkResolveModeFlagBits depth_resolve_mode = VK_RESOLVE_MODE_NONE;
+ if (res_rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
+ depth_resolve_mode = subpass->depth_resolve_mode;
+
+ VkResolveModeFlagBits stencil_resolve_mode = VK_RESOLVE_MODE_NONE;
+ if (res_rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
+ stencil_resolve_mode = subpass->stencil_resolve_mode;
+
+ VkImageAspectFlags resolved_aspects = 0;
+
+ if (depth_resolve_mode != VK_RESOLVE_MODE_NONE) {
+ depth_attachment.resolveMode = depth_resolve_mode;
+ if (sp_att->resolve) {
+ depth_attachment.resolveImageView =
+ vk_image_view_to_handle(res_att_state->image_view);
+ depth_attachment.resolveImageLayout =
+ sp_att->resolve->layout;
+ }
+
+ resolved_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ }
+
+ if (stencil_resolve_mode != VK_RESOLVE_MODE_NONE) {
+ stencil_attachment.resolveMode = stencil_resolve_mode;
+ if (sp_att->resolve) {
+ stencil_attachment.resolveImageView =
+ vk_image_view_to_handle(res_att_state->image_view);
+ stencil_attachment.resolveImageLayout =
+ sp_att->resolve->stencil_layout;
+ }
+
+ resolved_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+
+ if (sp_att->resolve && resolved_aspects == rp_att->aspects) {
+ /* The resolve attachment is entirely overwritten by the
+ * resolve operation so the load op really doesn't matter.
+ * We can consider the resolve as being the load.
+ */
+ res_att_state->views_loaded |= subpass->view_mask;
+ }
+ }
+ }
+
+ /* Next, handle any barriers we need. This may include a general
+ * VkMemoryBarrier for subpass dependencies and it may include some
+ * number of VkImageMemoryBarriers for layout transitions.
+ */
+
+ bool needs_mem_barrier = false;
+ VkMemoryBarrier2 mem_barrier = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ };
+ for (uint32_t d = 0; d < pass->dependency_count; d++) {
+ const struct vk_subpass_dependency *dep = &pass->dependencies[d];
+ if (dep->dst_subpass != subpass_idx)
+ continue;
+
+ if (dep->flags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkSubpassDependency2-dependencyFlags-03091
+ *
+ * "If dependencyFlags includes VK_DEPENDENCY_VIEW_LOCAL_BIT,
+ * dstSubpass must not be equal to VK_SUBPASS_EXTERNAL"
+ */
+ assert(dep->src_subpass != VK_SUBPASS_EXTERNAL);
+
+ assert(dep->src_subpass < pass->subpass_count);
+ const struct vk_subpass *src_subpass =
+ &pass->subpasses[dep->src_subpass];
+
+ /* Figure out the set of views in the source subpass affected by this
+ * dependency.
+ */
+ uint32_t src_dep_view_mask = subpass->view_mask;
+ if (dep->view_offset >= 0)
+ src_dep_view_mask <<= dep->view_offset;
+ else
+ src_dep_view_mask >>= -dep->view_offset;
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * "If the dependency is view-local, then each view (dstView) in
+ * the destination subpass depends on the view dstView +
+ * pViewOffsets[dependency] in the source subpass. If there is not
+ * such a view in the source subpass, then this dependency does
+ * not affect that view in the destination subpass."
+ */
+ if (!(src_subpass->view_mask & src_dep_view_mask))
+ continue;
+ }
+
+ needs_mem_barrier = true;
+ mem_barrier.srcStageMask |= dep->src_stage_mask;
+ mem_barrier.srcAccessMask |= dep->src_access_mask;
+ mem_barrier.dstStageMask |= dep->dst_stage_mask;
+ mem_barrier.dstAccessMask |= dep->dst_access_mask;
+ }
+
+ if (subpass_idx == 0) {
+ /* From the Vulkan 1.3.232 spec:
+ *
+ * "If there is no subpass dependency from VK_SUBPASS_EXTERNAL to the
+ * first subpass that uses an attachment, then an implicit subpass
+ * dependency exists from VK_SUBPASS_EXTERNAL to the first subpass it
+ * is used in. The implicit subpass dependency only exists if there
+ * exists an automatic layout transition away from initialLayout. The
+ * subpass dependency operates as if defined with the following
+ * parameters:
+ *
+ * VkSubpassDependency implicitDependency = {
+ * .srcSubpass = VK_SUBPASS_EXTERNAL;
+ * .dstSubpass = firstSubpass; // First subpass attachment is used in
+ * .srcStageMask = VK_PIPELINE_STAGE_NONE;
+ * .dstStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ * .srcAccessMask = 0;
+ * .dstAccessMask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ * VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ * VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ * VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ * .dependencyFlags = 0;
+ * };"
+ *
+ * We could track individual subpasses and attachments and views to make
+ * sure we only insert this barrier when it's absolutely necessary.
+ * However, this is only going to happen for the first subpass and
+ * you're probably going to take a stall in BeginRenderPass() anyway.
+ * If this is ever a perf problem, we can re-evaluate and do something
+ * more intellegent at that time.
+ */
+ needs_mem_barrier = true;
+ mem_barrier.dstStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ mem_barrier.dstAccessMask |= VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ }
+
+ uint32_t max_image_barrier_count = 0;
+ for (uint32_t a = 0; a < subpass->attachment_count; a++) {
+ const struct vk_subpass_attachment *sp_att = &subpass->attachments[a];
+ if (sp_att->attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ assert(sp_att->attachment < pass->attachment_count);
+ const struct vk_render_pass_attachment *rp_att =
+ &pass->attachments[sp_att->attachment];
+
+ max_image_barrier_count += util_bitcount(subpass->view_mask) *
+ util_bitcount(rp_att->aspects);
+ }
+ if (pass->fragment_density_map.attachment != VK_ATTACHMENT_UNUSED)
+ max_image_barrier_count += util_bitcount(subpass->view_mask);
+ STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, max_image_barrier_count);
+ uint32_t image_barrier_count = 0;
+
+ for (uint32_t a = 0; a < subpass->attachment_count; a++) {
+ const struct vk_subpass_attachment *sp_att = &subpass->attachments[a];
+ if (sp_att->attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ /* If we're using an initial layout, the attachment will already be
+ * marked as transitioned and this will be a no-op.
+ */
+ transition_attachment(cmd_buffer, sp_att->attachment,
+ subpass->view_mask,
+ sp_att->layout, sp_att->stencil_layout,
+ &image_barrier_count,
+ max_image_barrier_count,
+ image_barriers);
+ }
+ if (pass->fragment_density_map.attachment != VK_ATTACHMENT_UNUSED) {
+ transition_attachment(cmd_buffer, pass->fragment_density_map.attachment,
+ subpass->view_mask,
+ pass->fragment_density_map.layout,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ &image_barrier_count,
+ max_image_barrier_count,
+ image_barriers);
+ }
+ assert(image_barrier_count <= max_image_barrier_count);
+
+ if (needs_mem_barrier || image_barrier_count > 0) {
+ const VkDependencyInfo dependency_info = {
+ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ .dependencyFlags = 0,
+ .memoryBarrierCount = needs_mem_barrier ? 1 : 0,
+ .pMemoryBarriers = needs_mem_barrier ? &mem_barrier : NULL,
+ .imageMemoryBarrierCount = image_barrier_count,
+ .pImageMemoryBarriers = image_barrier_count > 0 ?
+ image_barriers : NULL,
+ };
+ cmd_buffer->runtime_rp_barrier = true;
+ disp->CmdPipelineBarrier2(vk_command_buffer_to_handle(cmd_buffer),
+ &dependency_info);
+ cmd_buffer->runtime_rp_barrier = false;
+ }
+
+ STACK_ARRAY_FINISH(image_barriers);
+
+ /* Next, handle any VK_ATTACHMENT_LOAD_OP_CLEAR that we couldn't handle
+ * directly by emitting a quick vkCmdBegin/EndRendering to do the load.
+ */
+ for (uint32_t a = 0; a < subpass->attachment_count; a++) {
+ const struct vk_subpass_attachment *sp_att = &subpass->attachments[a];
+ if (sp_att->attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ load_attachment(cmd_buffer, sp_att->attachment, subpass->view_mask,
+ sp_att->layout, sp_att->stencil_layout);
+ }
+
+ /* TODO: Handle preserve attachments
+ *
+ * For immediate renderers, this isn't a big deal as LOAD_OP_LOAD and
+ * STORE_OP_STORE are effectively free. However, before this gets used on
+ * a tiling GPU, we should really hook up preserve attachments and use them
+ * to determine when we can use LOAD/STORE_OP_DONT_CARE between subpasses.
+ */
+
+ VkRenderingInfo rendering = {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
+ .renderArea = cmd_buffer->render_area,
+ .layerCount = pass->is_multiview ? 1 : framebuffer->layers,
+ .viewMask = pass->is_multiview ? subpass->view_mask : 0,
+ .colorAttachmentCount = subpass->color_count,
+ .pColorAttachments = color_attachments,
+ .pDepthAttachment = &depth_attachment,
+ .pStencilAttachment = &stencil_attachment,
+ };
+
+ VkRenderingFragmentShadingRateAttachmentInfoKHR fsr_attachment;
+ if (subpass->fragment_shading_rate_attachment) {
+ const struct vk_subpass_attachment *sp_att =
+ subpass->fragment_shading_rate_attachment;
+
+ assert(sp_att->attachment < pass->attachment_count);
+ struct vk_attachment_state *att_state =
+ &cmd_buffer->attachments[sp_att->attachment];
+
+ /* Fragment shading rate attachments have no loadOp (it's implicitly
+ * LOAD_OP_LOAD) so we need to ensure the load op happens.
+ */
+ load_attachment(cmd_buffer, sp_att->attachment, subpass->view_mask,
+ sp_att->layout, sp_att->stencil_layout);
+
+ fsr_attachment = (VkRenderingFragmentShadingRateAttachmentInfoKHR) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR,
+ .imageView = vk_image_view_to_handle(att_state->image_view),
+ .imageLayout = sp_att->layout,
+ .shadingRateAttachmentTexelSize =
+ subpass->fragment_shading_rate_attachment_texel_size,
+ };
+ __vk_append_struct(&rendering, &fsr_attachment);
+ }
+
+ VkRenderingFragmentDensityMapAttachmentInfoEXT fdm_attachment;
+ if (pass->fragment_density_map.attachment != VK_ATTACHMENT_UNUSED) {
+ assert(pass->fragment_density_map.attachment < pass->attachment_count);
+ struct vk_attachment_state *att_state =
+ &cmd_buffer->attachments[pass->fragment_density_map.attachment];
+
+ /* From the Vulkan 1.3.125 spec:
+ *
+ * VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550
+ *
+ * If fragmentDensityMapAttachment is not VK_ATTACHMENT_UNUSED,
+ * fragmentDensityMapAttachment must reference an attachment with a
+ * loadOp equal to VK_ATTACHMENT_LOAD_OP_LOAD or
+ * VK_ATTACHMENT_LOAD_OP_DONT_CARE
+ *
+ * This means we don't have to implement the load op.
+ */
+
+ fdm_attachment = (VkRenderingFragmentDensityMapAttachmentInfoEXT) {
+ .sType = VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT,
+ .imageView = vk_image_view_to_handle(att_state->image_view),
+ .imageLayout = pass->fragment_density_map.layout,
+ };
+ __vk_append_struct(&rendering, &fdm_attachment);
+ }
+
+ VkSampleLocationsInfoEXT sample_locations_tmp;
+ if (sample_locations) {
+ sample_locations_tmp = *sample_locations;
+ __vk_append_struct(&rendering, &sample_locations_tmp);
+ }
+
+ /* Append this one last because it lives in the subpass and we don't want
+ * to be changed by appending other structures later.
+ */
+ if (subpass->mrtss.multisampledRenderToSingleSampledEnable)
+ __vk_append_struct(&rendering, (void *)&subpass->mrtss);
+
+ disp->CmdBeginRendering(vk_command_buffer_to_handle(cmd_buffer),
+ &rendering);
+
+ STACK_ARRAY_FINISH(color_attachments);
+ STACK_ARRAY_FINISH(color_attachment_initial_layouts);
+}
+
+static void
+end_subpass(struct vk_command_buffer *cmd_buffer,
+ const VkSubpassEndInfo *end_info)
+{
+ const struct vk_render_pass *pass = cmd_buffer->render_pass;
+ const uint32_t subpass_idx = cmd_buffer->subpass_idx;
+ struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ disp->CmdEndRendering(vk_command_buffer_to_handle(cmd_buffer));
+
+ bool needs_mem_barrier = false;
+ VkMemoryBarrier2 mem_barrier = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ };
+ for (uint32_t d = 0; d < pass->dependency_count; d++) {
+ const struct vk_subpass_dependency *dep = &pass->dependencies[d];
+ if (dep->src_subpass != subpass_idx)
+ continue;
+
+ if (dep->dst_subpass != VK_SUBPASS_EXTERNAL)
+ continue;
+
+ needs_mem_barrier = true;
+ mem_barrier.srcStageMask |= dep->src_stage_mask;
+ mem_barrier.srcAccessMask |= dep->src_access_mask;
+ mem_barrier.dstStageMask |= dep->dst_stage_mask;
+ mem_barrier.dstAccessMask |= dep->dst_access_mask;
+ }
+
+ if (subpass_idx == pass->subpass_count - 1) {
+ /* From the Vulkan 1.3.232 spec:
+ *
+ * "Similarly, if there is no subpass dependency from the last
+ * subpass that uses an attachment to VK_SUBPASS_EXTERNAL, then an
+ * implicit subpass dependency exists from the last subpass it is
+ * used in to VK_SUBPASS_EXTERNAL. The implicit subpass dependency
+ * only exists if there exists an automatic layout transition into
+ * finalLayout. The subpass dependency operates as if defined with
+ * the following parameters:
+ *
+ * VkSubpassDependency implicitDependency = {
+ * .srcSubpass = lastSubpass; // Last subpass attachment is used in
+ * .dstSubpass = VK_SUBPASS_EXTERNAL;
+ * .srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ * .dstStageMask = VK_PIPELINE_STAGE_NONE;
+ * .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ * VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ * .dstAccessMask = 0;
+ * .dependencyFlags = 0;
+ * };"
+ *
+ * We could track individual subpasses and attachments and views to make
+ * sure we only insert this barrier when it's absolutely necessary.
+ * However, this is only going to happen for the last subpass and
+ * you're probably going to take a stall in EndRenderPass() anyway.
+ * If this is ever a perf problem, we can re-evaluate and do something
+ * more intellegent at that time.
+ */
+ needs_mem_barrier = true;
+ mem_barrier.srcStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ mem_barrier.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ }
+
+ if (needs_mem_barrier) {
+ const VkDependencyInfo dependency_info = {
+ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ .dependencyFlags = 0,
+ .memoryBarrierCount = 1,
+ .pMemoryBarriers = &mem_barrier,
+ };
+ cmd_buffer->runtime_rp_barrier = true;
+ disp->CmdPipelineBarrier2(vk_command_buffer_to_handle(cmd_buffer),
+ &dependency_info);
+ cmd_buffer->runtime_rp_barrier = false;
+ }
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBeginInfo,
+ const VkSubpassBeginInfo *pSubpassBeginInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(vk_render_pass, pass, pRenderPassBeginInfo->renderPass);
+ VK_FROM_HANDLE(vk_framebuffer, framebuffer,
+ pRenderPassBeginInfo->framebuffer);
+
+ assert(cmd_buffer->render_pass == NULL);
+ cmd_buffer->render_pass = pass;
+ cmd_buffer->subpass_idx = 0;
+
+ assert(cmd_buffer->framebuffer == NULL);
+ cmd_buffer->framebuffer = framebuffer;
+
+ cmd_buffer->render_area = pRenderPassBeginInfo->renderArea;
+
+ assert(cmd_buffer->attachments == NULL);
+ if (pass->attachment_count > ARRAY_SIZE(cmd_buffer->_attachments)) {
+ cmd_buffer->attachments = malloc(pass->attachment_count *
+ sizeof(*cmd_buffer->attachments));
+ } else {
+ cmd_buffer->attachments = cmd_buffer->_attachments;
+ }
+
+ const VkRenderPassAttachmentBeginInfo *attach_begin =
+ vk_find_struct_const(pRenderPassBeginInfo,
+ RENDER_PASS_ATTACHMENT_BEGIN_INFO);
+ if (!attach_begin)
+ assert(pass->attachment_count == framebuffer->attachment_count);
+
+ const VkImageView *image_views;
+ if (attach_begin && attach_begin->attachmentCount != 0) {
+ assert(attach_begin->attachmentCount == pass->attachment_count);
+ image_views = attach_begin->pAttachments;
+ } else {
+ assert(framebuffer->attachment_count >= pass->attachment_count);
+ image_views = framebuffer->attachments;
+ }
+
+ for (uint32_t a = 0; a < pass->attachment_count; ++a) {
+ VK_FROM_HANDLE(vk_image_view, image_view, image_views[a]);
+ const struct vk_render_pass_attachment *pass_att = &pass->attachments[a];
+ struct vk_attachment_state *att_state = &cmd_buffer->attachments[a];
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkFramebufferCreateInfo-pAttachments-00880
+ *
+ * "If renderpass is not VK_NULL_HANDLE and flags does not include
+ * VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, each element of pAttachments
+ * must have been created with a VkFormat value that matches the
+ * VkFormat specified by the corresponding VkAttachmentDescription in
+ * renderPass"
+ *
+ * and
+ *
+ * VUID-VkRenderPassBeginInfo-framebuffer-03216
+ *
+ * "If framebuffer was created with a VkFramebufferCreateInfo::flags
+ * value that included VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, each
+ * element of the pAttachments member of a
+ * VkRenderPassAttachmentBeginInfo structure included in the pNext
+ * chain must be a VkImageView of an image created with a value of
+ * VkImageViewCreateInfo::format equal to the corresponding value of
+ * VkAttachmentDescription::format in renderPass"
+ */
+ assert(image_view->format == pass_att->format);
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkFramebufferCreateInfo-pAttachments-00881
+ *
+ * "If renderpass is not VK_NULL_HANDLE and flags does not include
+ * VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, each element of pAttachments
+ * must have been created with a samples value that matches the
+ * samples value specified by the corresponding
+ * VkAttachmentDescription in renderPass"
+ *
+ * and
+ *
+ * UID-VkRenderPassBeginInfo-framebuffer-03217
+ *
+ * "If framebuffer was created with a VkFramebufferCreateInfo::flags
+ * value that included VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, each
+ * element of the pAttachments member of a
+ * VkRenderPassAttachmentBeginInfo structure included in the pNext
+ * chain must be a VkImageView of an image created with a value of
+ * VkImageCreateInfo::samples equal to the corresponding value of
+ * VkAttachmentDescription::samples in renderPass"
+ */
+ assert(image_view->image->samples == pass_att->samples);
+
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * If multiview is enabled and the shading rate attachment has
+ * multiple layers, the shading rate attachment texel is selected
+ * from the layer determined by the ViewIndex built-in. If multiview
+ * is disabled, and both the shading rate attachment and the
+ * framebuffer have multiple layers, the shading rate attachment
+ * texel is selected from the layer determined by the Layer built-in.
+ * Otherwise, the texel is unconditionally selected from the first
+ * layer of the attachment.
+ */
+ if (!(image_view->usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR))
+ assert(util_last_bit(pass_att->view_mask) <= image_view->layer_count);
+
+ *att_state = (struct vk_attachment_state) {
+ .image_view = image_view,
+ .views_loaded = 0,
+ };
+
+ for (uint32_t v = 0; v < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT; v++) {
+ att_state->views[v] = (struct vk_attachment_view_state) {
+ .layout = pass_att->initial_layout,
+ .stencil_layout = pass_att->initial_stencil_layout,
+ };
+ }
+
+ if (a < pRenderPassBeginInfo->clearValueCount)
+ att_state->clear_value = pRenderPassBeginInfo->pClearValues[a];
+ }
+
+ const VkRenderPassSampleLocationsBeginInfoEXT *rp_sl_info =
+ vk_find_struct_const(pRenderPassBeginInfo->pNext,
+ RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT);
+ if (rp_sl_info) {
+ cmd_buffer->pass_sample_locations = clone_rp_sample_locations(rp_sl_info);
+ assert(cmd_buffer->pass_sample_locations);
+
+ for (uint32_t i = 0; i < rp_sl_info->attachmentInitialSampleLocationsCount; i++) {
+ const VkAttachmentSampleLocationsEXT *att_sl =
+ &rp_sl_info->pAttachmentInitialSampleLocations[i];
+
+ assert(att_sl->attachmentIndex < pass->attachment_count);
+ struct vk_attachment_state *att_state =
+ &cmd_buffer->attachments[att_sl->attachmentIndex];
+
+ /* Sample locations only matter for depth/stencil images created with
+ * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT
+ */
+ if (vk_format_is_depth_or_stencil(att_state->image_view->format) &&
+ (att_state->image_view->image->create_flags &
+ VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT)) {
+ for (uint32_t v = 0; v < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT; v++)
+ att_state->views[v].sample_locations = &att_sl->sampleLocationsInfo;
+ }
+ }
+ }
+
+ begin_subpass(cmd_buffer, pSubpassBeginInfo);
+}
+
+void
+vk_command_buffer_reset_render_pass(struct vk_command_buffer *cmd_buffer)
+{
+ cmd_buffer->render_pass = NULL;
+ cmd_buffer->subpass_idx = 0;
+ cmd_buffer->framebuffer = NULL;
+ if (cmd_buffer->attachments != cmd_buffer->_attachments)
+ free(cmd_buffer->attachments);
+ cmd_buffer->attachments = NULL;
+ if (cmd_buffer->pass_sample_locations != NULL)
+ vk_free(vk_default_allocator(), cmd_buffer->pass_sample_locations);
+ cmd_buffer->pass_sample_locations = NULL;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdNextSubpass2(VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfo *pSubpassBeginInfo,
+ const VkSubpassEndInfo *pSubpassEndInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ end_subpass(cmd_buffer, pSubpassEndInfo);
+ cmd_buffer->subpass_idx++;
+ begin_subpass(cmd_buffer, pSubpassBeginInfo);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfo *pSubpassEndInfo)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ const struct vk_render_pass *pass = cmd_buffer->render_pass;
+ struct vk_device_dispatch_table *disp =
+ &cmd_buffer->base.device->dispatch_table;
+
+ end_subpass(cmd_buffer, pSubpassEndInfo);
+
+ /* Make sure all our attachments end up in their finalLayout */
+
+ uint32_t max_image_barrier_count = 0;
+ for (uint32_t a = 0; a < pass->attachment_count; a++) {
+ const struct vk_render_pass_attachment *rp_att = &pass->attachments[a];
+
+ max_image_barrier_count += util_bitcount(pass->view_mask) *
+ util_bitcount(rp_att->aspects);
+ }
+ STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, max_image_barrier_count);
+ uint32_t image_barrier_count = 0;
+
+ for (uint32_t a = 0; a < pass->attachment_count; a++) {
+ const struct vk_render_pass_attachment *rp_att = &pass->attachments[a];
+
+ transition_attachment(cmd_buffer, a, pass->view_mask,
+ rp_att->final_layout,
+ rp_att->final_stencil_layout,
+ &image_barrier_count,
+ max_image_barrier_count,
+ image_barriers);
+ }
+ assert(image_barrier_count <= max_image_barrier_count);
+
+ if (image_barrier_count > 0) {
+ const VkDependencyInfo dependency_info = {
+ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ .dependencyFlags = 0,
+ .imageMemoryBarrierCount = image_barrier_count,
+ .pImageMemoryBarriers = image_barriers,
+ };
+ cmd_buffer->runtime_rp_barrier = true;
+ disp->CmdPipelineBarrier2(vk_command_buffer_to_handle(cmd_buffer),
+ &dependency_info);
+ cmd_buffer->runtime_rp_barrier = false;
+ }
+
+ STACK_ARRAY_FINISH(image_barriers);
+
+ vk_command_buffer_reset_render_pass(cmd_buffer);
+}
diff --git a/src/vulkan/runtime/vk_render_pass.h b/src/vulkan/runtime/vk_render_pass.h
new file mode 100644
index 00000000000..9acd65aa3ad
--- /dev/null
+++ b/src/vulkan/runtime/vk_render_pass.h
@@ -0,0 +1,461 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_RENDER_PASS_H
+#define VK_RENDER_PASS_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_command_buffer;
+struct vk_image;
+
+/**
+ * Pseudo-extension struct that may be chained into VkRenderingAttachmentInfo
+ * to indicate an initial layout for the attachment. This is only allowed if
+ * all of the following conditions are met:
+ *
+ * 1. VkRenderingAttachmentInfo::loadOp == LOAD_OP_CLEAR
+ *
+ * 2. VkRenderingInfo::renderArea is tne entire image view LOD
+ *
+ * 3. For 3D image attachments, VkRenderingInfo::viewMask == 0 AND
+ * VkRenderingInfo::layerCount references the entire bound image view
+ * OR VkRenderingInfo::viewMask is dense (no holes) and references the
+ * entire bound image view. (2D and 2D array images have no such
+ * requirement.)
+ *
+ * If this struct is included in the pNext chain of a
+ * VkRenderingAttachmentInfo, the driver is responsible for transitioning the
+ * bound region of the image from
+ * VkRenderingAttachmentInitialLayoutInfoMESA::initialLayout to
+ * VkRenderingAttachmentInfo::imageLayout prior to rendering.
+ */
+typedef struct VkRenderingAttachmentInitialLayoutInfoMESA {
+ VkStructureType sType;
+#define VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INITIAL_LAYOUT_INFO_MESA (VkStructureType)1000044901
+#define VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INITIAL_LAYOUT_INFO_MESA_cast VkRenderingAttachmentInitialLayoutInfoMESA
+ const void* pNext;
+
+ /** Initial layout of the attachment */
+ VkImageLayout initialLayout;
+} VkRenderingAttachmentInitialLayoutInfoMESA;
+
+/***/
+struct vk_subpass_attachment {
+ /** VkAttachmentReference2::attachment */
+ uint32_t attachment;
+
+ /** Aspects referenced by this attachment
+ *
+ * For an input attachment, this is VkAttachmentReference2::aspectMask.
+ * For all others, it's equal to the vk_render_pass_attachment::aspects.
+ */
+ VkImageAspectFlags aspects;
+
+ /** Usage for this attachment
+ *
+ * This is a single VK_IMAGE_USAGE_* describing the usage of this subpass
+ * attachment. Resolve attachments are VK_IMAGE_USAGE_TRANSFER_DST_BIT.
+ */
+ VkImageUsageFlagBits usage;
+
+ /** VkAttachmentReference2::layout */
+ VkImageLayout layout;
+
+ /** VkAttachmentReferenceStencilLayout::stencilLayout
+ *
+ * If VK_KHR_separate_depth_stencil_layouts is not used, this will be
+ * layout if the attachment contains stencil and VK_IMAGE_LAYOUT_UNDEFINED
+ * otherwise.
+ */
+ VkImageLayout stencil_layout;
+
+ /** A per-view mask for if this is the last use of this attachment
+ *
+ * If the same render pass attachment is used multiple ways within a
+ * subpass, corresponding last_subpass bits will be set in all of them.
+ * For the non-multiview case, only the first bit is used.
+ */
+ uint32_t last_subpass;
+
+ /** Resolve attachment, if any */
+ struct vk_subpass_attachment *resolve;
+};
+
+/***/
+struct vk_subpass {
+ /** Count of all attachments referenced by this subpass */
+ uint32_t attachment_count;
+
+ /** Array of all attachments referenced by this subpass */
+ struct vk_subpass_attachment *attachments;
+
+ /** VkSubpassDescription2::inputAttachmentCount */
+ uint32_t input_count;
+
+ /** VkSubpassDescription2::pInputAttachments */
+ struct vk_subpass_attachment *input_attachments;
+
+ /** VkSubpassDescription2::colorAttachmentCount */
+ uint32_t color_count;
+
+ /** VkSubpassDescription2::pColorAttachments */
+ struct vk_subpass_attachment *color_attachments;
+
+ /** VkSubpassDescription2::colorAttachmentCount or zero */
+ uint32_t color_resolve_count;
+
+ /** VkSubpassDescription2::pResolveAttachments */
+ struct vk_subpass_attachment *color_resolve_attachments;
+
+ /** VkSubpassDescription2::pDepthStencilAttachment */
+ struct vk_subpass_attachment *depth_stencil_attachment;
+
+ /** VkSubpassDescriptionDepthStencilResolve::pDepthStencilResolveAttachment */
+ struct vk_subpass_attachment *depth_stencil_resolve_attachment;
+
+ /** VkFragmentShadingRateAttachmentInfoKHR::pFragmentShadingRateAttachment */
+ struct vk_subpass_attachment *fragment_shading_rate_attachment;
+
+ /** VkSubpassDescription2::viewMask or 1 for non-multiview
+ *
+ * For all view masks in the vk_render_pass data structure, we use a mask
+ * of 1 for non-multiview instead of a mask of 0. To determine if the
+ * render pass is multiview or not, see vk_render_pass::is_multiview.
+ */
+ uint32_t view_mask;
+
+ /** VkSubpassDescriptionDepthStencilResolve::depthResolveMode */
+ VkResolveModeFlagBits depth_resolve_mode;
+
+ /** VkSubpassDescriptionDepthStencilResolve::stencilResolveMode */
+ VkResolveModeFlagBits stencil_resolve_mode;
+
+ /** VkFragmentShadingRateAttachmentInfoKHR::shadingRateAttachmentTexelSize */
+ VkExtent2D fragment_shading_rate_attachment_texel_size;
+
+ /** Extra VkPipelineCreateFlags for this subpass */
+ VkPipelineCreateFlagBits2KHR pipeline_flags;
+
+ /** VkAttachmentSampleCountInfoAMD for this subpass
+ *
+ * This is in the pNext chain of pipeline_info and inheritance_info.
+ */
+ VkAttachmentSampleCountInfoAMD sample_count_info_amd;
+
+ /** VkPipelineRenderingCreateInfo for this subpass
+ *
+ * Returned by vk_get_pipeline_rendering_create_info() if
+ * VkGraphicsPipelineCreateInfo::renderPass != VK_NULL_HANDLE.
+ */
+ VkPipelineRenderingCreateInfo pipeline_info;
+
+ /** VkCommandBufferInheritanceRenderingInfo for this subpass
+ *
+ * Returned by vk_get_command_buffer_inheritance_rendering_info() if
+ * VkCommandBufferInheritanceInfo::renderPass != VK_NULL_HANDLE.
+ */
+ VkCommandBufferInheritanceRenderingInfo inheritance_info;
+
+ /** VkMultisampledRenderToSingleSampledInfoEXT for this subpass */
+ VkMultisampledRenderToSingleSampledInfoEXT mrtss;
+};
+
+/***/
+struct vk_render_pass_attachment {
+ /** VkAttachmentDescription2::format */
+ VkFormat format;
+
+ /** Aspects contained in format */
+ VkImageAspectFlags aspects;
+
+ /** VkAttachmentDescription2::samples */
+ uint32_t samples;
+
+ /** Views in which this attachment is used, 0 for unused
+ *
+ * For non-multiview, this will be 1 if the attachment is used.
+ */
+ uint32_t view_mask;
+
+ /** VkAttachmentDescription2::loadOp */
+ VkAttachmentLoadOp load_op;
+
+ /** VkAttachmentDescription2::storeOp */
+ VkAttachmentStoreOp store_op;
+
+ /** VkAttachmentDescription2::stencilLoadOp */
+ VkAttachmentLoadOp stencil_load_op;
+
+ /** VkAttachmentDescription2::stencilStoreOp */
+ VkAttachmentStoreOp stencil_store_op;
+
+ /** VkAttachmentDescription2::initialLayout */
+ VkImageLayout initial_layout;
+
+ /** VkAttachmentDescription2::finalLayout */
+ VkImageLayout final_layout;
+
+ /** VkAttachmentDescriptionStencilLayout::stencilInitialLayout
+ *
+ * If VK_KHR_separate_depth_stencil_layouts is not used, this will be
+ * initial_layout if format contains stencil and VK_IMAGE_LAYOUT_UNDEFINED
+ * otherwise.
+ */
+ VkImageLayout initial_stencil_layout;
+
+ /** VkAttachmentDescriptionStencilLayout::stencilFinalLayout
+ *
+ * If VK_KHR_separate_depth_stencil_layouts is not used, this will be
+ * final_layout if format contains stencil and VK_IMAGE_LAYOUT_UNDEFINED
+ * otherwise.
+ */
+ VkImageLayout final_stencil_layout;
+};
+
+/***/
+struct vk_subpass_dependency {
+ /** VkSubpassDependency2::dependencyFlags */
+ VkDependencyFlags flags;
+
+ /** VkSubpassDependency2::srcSubpass */
+ uint32_t src_subpass;
+
+ /** VkSubpassDependency2::dstSubpass */
+ uint32_t dst_subpass;
+
+ /** VkSubpassDependency2::srcStageMask */
+ VkPipelineStageFlags2 src_stage_mask;
+
+ /** VkSubpassDependency2::dstStageMask */
+ VkPipelineStageFlags2 dst_stage_mask;
+
+ /** VkSubpassDependency2::srcAccessMask */
+ VkAccessFlags2 src_access_mask;
+
+ /** VkSubpassDependency2::dstAccessMask */
+ VkAccessFlags2 dst_access_mask;
+
+ /** VkSubpassDependency2::viewOffset */
+ int32_t view_offset;
+};
+
+/***/
+struct vk_render_pass {
+ struct vk_object_base base;
+
+ /** True if this render pass uses multiview
+ *
+ * This is true if all subpasses have viewMask != 0.
+ */
+ bool is_multiview;
+
+ /** Views used by this render pass or 1 for non-multiview */
+ uint32_t view_mask;
+
+ /** VkRenderPassCreateInfo2::attachmentCount */
+ uint32_t attachment_count;
+
+ /** VkRenderPassCreateInfo2::pAttachments */
+ struct vk_render_pass_attachment *attachments;
+
+ /** VkRenderPassCreateInfo2::subpassCount */
+ uint32_t subpass_count;
+
+ /** VkRenderPassCreateInfo2::subpasses */
+ struct vk_subpass *subpasses;
+
+ /** VkRenderPassCreateInfo2::dependencyCount */
+ uint32_t dependency_count;
+
+ /** VkRenderPassFragmentDensityMapCreateInfoEXT::fragmentDensityMapAttachment */
+ VkAttachmentReference fragment_density_map;
+
+ /** VkRenderPassCreateInfo2::pDependencies */
+ struct vk_subpass_dependency *dependencies;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_render_pass, base, VkRenderPass,
+ VK_OBJECT_TYPE_RENDER_PASS);
+
+/** Returns the VkPipelineRenderingCreateInfo for a graphics pipeline
+ *
+ * For render-pass-free drivers, this can be used in the implementation of
+ * vkCreateGraphicsPipelines to get the VkPipelineRenderingCreateInfo. If
+ * VkGraphicsPipelineCreateInfo::renderPass is not VK_NULL_HANDLE, it will
+ * return a representation of the specified subpass as a
+ * VkPipelineRenderingCreateInfo. If VkGraphicsPipelineCreateInfo::renderPass
+ * is VK_NULL_HANDLE and there is a VkPipelineRenderingCreateInfo in the pNext
+ * chain of VkGraphicsPipelineCreateInfo, it will return that.
+ *
+ * :param info: |in| One of the pCreateInfos from vkCreateGraphicsPipelines
+ */
+const VkPipelineRenderingCreateInfo *
+vk_get_pipeline_rendering_create_info(const VkGraphicsPipelineCreateInfo *info);
+
+/** Returns any extra VkPipelineCreateFlags from the render pass
+ *
+ * For render-pass-free drivers, this can be used to get any extra pipeline
+ * create flags implied by the render pass. In particular, a render pass may
+ * want to add one or both of the following:
+ *
+ * - VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT
+ * - VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT
+ * - VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR
+ * - VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT
+ *
+ * If VkGraphicsPipelineCreateInfo::renderPass is VK_NULL_HANDLE, the relevant
+ * flags from VkGraphicsPipelineCreateInfo::flags will be returned.
+ *
+ * :param info: |in| One of the pCreateInfos from vkCreateGraphicsPipelines
+ */
+VkPipelineCreateFlags2KHR
+vk_get_pipeline_rendering_flags(const VkGraphicsPipelineCreateInfo *info);
+
+/** Returns the VkAttachmentSampleCountInfoAMD for a graphics pipeline
+ *
+ * For render-pass-free drivers, this can be used in the implementaiton of
+ * vkCreateGraphicsPipelines to get the VkAttachmentSampleCountInfoAMD. If
+ * VkGraphicsPipelineCreateInfo::renderPass is not VK_NULL_HANDLE, it will
+ * return the sample counts from the specified subpass as a
+ * VkAttachmentSampleCountInfoAMD. If VkGraphicsPipelineCreateInfo::renderPass
+ * is VK_NULL_HANDLE and there is a VkAttachmentSampleCountInfoAMD in the pNext
+ * chain of VkGraphicsPipelineCreateInfo, it will return that.
+ *
+ * :param info: |in| One of the pCreateInfos from vkCreateGraphicsPipelines
+ */
+const VkAttachmentSampleCountInfoAMD *
+vk_get_pipeline_sample_count_info_amd(const VkGraphicsPipelineCreateInfo *info);
+
+/**
+ * Returns the VkCommandBufferInheritanceRenderingInfo for secondary command
+ * buffer execution
+ *
+ * For render-pass-free drivers, this can be used in the implementation of
+ * vkCmdExecuteCommands to get the VkCommandBufferInheritanceRenderingInfo.
+ * If VkCommandBufferInheritanceInfo::renderPass is not VK_NULL_HANDLE, it
+ * will return a representation of the specified subpass as a
+ * VkCommandBufferInheritanceRenderingInfo. If
+ * VkCommandBufferInheritanceInfo::renderPass is not VK_NULL_HANDLE and there
+ * is a VkCommandBufferInheritanceRenderingInfo in the pNext chain of
+ * VkCommandBufferBeginInfo, it will return that.
+ *
+ * :param level: |in| The nesting level of this command buffer
+ * :param pBeginInfo: |in| The pBeginInfo from vkBeginCommandBuffer
+ */
+const VkCommandBufferInheritanceRenderingInfo *
+vk_get_command_buffer_inheritance_rendering_info(
+ VkCommandBufferLevel level,
+ const VkCommandBufferBeginInfo *pBeginInfo);
+
+struct vk_gcbiarr_data {
+ VkRenderingInfo rendering;
+ VkRenderingFragmentShadingRateAttachmentInfoKHR fsr_att;
+ VkRenderingAttachmentInfo attachments[];
+};
+
+#define VK_GCBIARR_DATA_SIZE(max_color_rts) (\
+ sizeof(struct vk_gcbiarr_data) + \
+ sizeof(VkRenderingAttachmentInfo) * ((max_color_rts) + 2) \
+)
+
+/**
+ * Constructs a VkRenderingInfo for the inheritance rendering info
+ *
+ * For render-pass-free drivers, this can be used in the implementaiton of
+ * vkCmdExecuteCommands to get a VkRenderingInfo representing the subpass and
+ * framebuffer provided via the inheritance info for a command buffer created
+ * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT. The mental model
+ * here is that VkExecuteCommands() implicitly suspends the render pass and
+ * VkBeginCommandBuffer() resumes it. If a VkRenderingInfo cannot be
+ * constructed due to a missing framebuffer or similar, NULL will be
+ * returned.
+ *
+ * :param level: |in| The nesting level of this command buffer
+ * :param pBeginInfo: |in| The pBeginInfo from vkBeginCommandBuffer
+ * :param stack_data: |out| An opaque blob of data which will be overwritten by
+ * this function, passed in from the caller to avoid
+ * heap allocations. It must be at least
+ * VK_GCBIARR_DATA_SIZE(max_color_rts) bytes.
+ */
+const VkRenderingInfo *
+vk_get_command_buffer_inheritance_as_rendering_resume(
+ VkCommandBufferLevel level,
+ const VkCommandBufferBeginInfo *pBeginInfo,
+ void *stack_data);
+
+/**
+ * Return true if the subpass dependency is framebuffer-local.
+ */
+static bool
+vk_subpass_dependency_is_fb_local(const VkSubpassDependency2 *dep,
+ VkPipelineStageFlags2 src_stage_mask,
+ VkPipelineStageFlags2 dst_stage_mask)
+{
+ if (dep->srcSubpass == VK_SUBPASS_EXTERNAL ||
+ dep->dstSubpass == VK_SUBPASS_EXTERNAL)
+ return true;
+
+ /* This is straight from the Vulkan 1.2 spec, section 7.1.4 "Framebuffer
+ * Region Dependencies":
+ */
+ const VkPipelineStageFlags2 framebuffer_space_stages =
+ VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT |
+ VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT |
+ VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
+
+ const VkPipelineStageFlags2 src_framebuffer_space_stages =
+ framebuffer_space_stages | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
+ const VkPipelineStageFlags2 dst_framebuffer_space_stages =
+ framebuffer_space_stages | VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
+
+ /* Check for frambuffer-space dependency. */
+ if ((src_stage_mask & ~src_framebuffer_space_stages) ||
+ (dst_stage_mask & ~dst_framebuffer_space_stages))
+ return false;
+
+ /* Check for framebuffer-local dependency. */
+ return dep->dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT;
+}
+
+uint32_t
+vk_command_buffer_get_attachment_layout(const struct vk_command_buffer *cmd_buffer,
+ const struct vk_image *image,
+ VkImageLayout *out_layout,
+ VkImageLayout *out_stencil_layout);
+
+void
+vk_command_buffer_set_attachment_layout(struct vk_command_buffer *cmd_buffer,
+ uint32_t att_idx,
+ VkImageLayout layout,
+ VkImageLayout stencil_layout);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_RENDER_PASS_H */
diff --git a/src/vulkan/runtime/vk_sampler.c b/src/vulkan/runtime/vk_sampler.c
new file mode 100644
index 00000000000..bda852ebf90
--- /dev/null
+++ b/src/vulkan/runtime/vk_sampler.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_sampler.h"
+
+#include "vk_format.h"
+#include "vk_util.h"
+#include "vk_ycbcr_conversion.h"
+
+VkClearColorValue
+vk_border_color_value(VkBorderColor color)
+{
+ switch (color) {
+ case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
+ return (VkClearColorValue) { .float32 = { 0, 0, 0, 0 } };
+ case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
+ return (VkClearColorValue) { .int32 = { 0, 0, 0, 0 } };
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
+ return (VkClearColorValue) { .float32 = { 0, 0, 0, 1 } };
+ case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
+ return (VkClearColorValue) { .int32 = { 0, 0, 0, 1 } };
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
+ return (VkClearColorValue) { .float32 = { 1, 1, 1, 1 } };
+ case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
+ return (VkClearColorValue) { .int32 = { 1, 1, 1, 1 } };
+ default:
+ unreachable("Invalid or custom border color enum");
+ }
+}
+
+bool
+vk_border_color_is_int(VkBorderColor color)
+{
+ switch (color) {
+ case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
+ case VK_BORDER_COLOR_FLOAT_CUSTOM_EXT:
+ return false;
+ case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
+ case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
+ case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
+ case VK_BORDER_COLOR_INT_CUSTOM_EXT:
+ return true;
+ default:
+ unreachable("Invalid border color enum");
+ }
+}
+
+VkClearColorValue
+vk_sampler_border_color_value(const VkSamplerCreateInfo *pCreateInfo,
+ VkFormat *format_out)
+{
+ if (vk_border_color_is_custom(pCreateInfo->borderColor)) {
+ const VkSamplerCustomBorderColorCreateInfoEXT *border_color_info =
+ vk_find_struct_const(pCreateInfo->pNext,
+ SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT);
+ if (format_out)
+ *format_out = border_color_info->format;
+
+ return border_color_info->customBorderColor;
+ } else {
+ if (format_out)
+ *format_out = VK_FORMAT_UNDEFINED;
+
+ return vk_border_color_value(pCreateInfo->borderColor);
+ }
+}
+
+void *
+vk_sampler_create(struct vk_device *device,
+ const VkSamplerCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ struct vk_sampler *sampler;
+
+ sampler = vk_object_zalloc(device, alloc, size, VK_OBJECT_TYPE_SAMPLER);
+ if (!sampler)
+ return NULL;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
+
+ sampler->format = VK_FORMAT_UNDEFINED;
+ sampler->border_color = pCreateInfo->borderColor;
+ sampler->reduction_mode = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE;
+
+ if (!vk_border_color_is_custom(pCreateInfo->borderColor)) {
+ sampler->border_color_value =
+ vk_border_color_value(pCreateInfo->borderColor);
+ }
+
+ vk_foreach_struct_const(ext, pCreateInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: {
+ const VkSamplerCustomBorderColorCreateInfoEXT *cbc_info = (void *)ext;
+ if (!vk_border_color_is_custom(pCreateInfo->borderColor))
+ break;
+
+ sampler->border_color_value = cbc_info->customBorderColor;
+ if (cbc_info->format != VK_FORMAT_UNDEFINED)
+ sampler->format = cbc_info->format;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO: {
+ const VkSamplerReductionModeCreateInfo *rm_info = (void *)ext;
+ sampler->reduction_mode = rm_info->reductionMode;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {
+ const VkSamplerYcbcrConversionInfo *yc_info = (void *)ext;
+ VK_FROM_HANDLE(vk_ycbcr_conversion, conversion, yc_info->conversion);
+
+ /* From the Vulkan 1.2.259 spec:
+ *
+ * "A VkSamplerYcbcrConversionInfo must be provided for samplers
+ * to be used with image views that access
+ * VK_IMAGE_ASPECT_COLOR_BIT if the format is one of the formats
+ * that require a sampler YCbCr conversion, or if the image view
+ * has an external format."
+ *
+ * This means that on Android we can end up with one of these even if
+ * YCbCr isn't being used at all. Leave sampler->ycbcr_conversion NULL
+ * if it isn't a YCbCr format.
+ */
+ if (vk_format_get_ycbcr_info(conversion->state.format) == NULL)
+ break;
+
+ sampler->ycbcr_conversion = conversion;
+ sampler->format = conversion->state.format;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return sampler;
+}
+
+void
+vk_sampler_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_sampler *sampler)
+{
+ vk_object_free(device, alloc, sampler);
+}
diff --git a/src/vulkan/runtime/vk_sampler.h b/src/vulkan/runtime/vk_sampler.h
new file mode 100644
index 00000000000..541b02916c2
--- /dev/null
+++ b/src/vulkan/runtime/vk_sampler.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright © 2022 Collabora, LTD
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_SAMPLER_H
+#define VK_SAMPLER_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline bool
+vk_border_color_is_custom(VkBorderColor color)
+{
+ return color == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT ||
+ color == VK_BORDER_COLOR_INT_CUSTOM_EXT;
+}
+
+VkClearColorValue vk_border_color_value(VkBorderColor color);
+bool vk_border_color_is_int(VkBorderColor color);
+
+VkClearColorValue
+vk_sampler_border_color_value(const VkSamplerCreateInfo *pCreateInfo,
+ VkFormat *format_out);
+
+struct vk_sampler {
+ struct vk_object_base base;
+
+ /** Format of paired image views or VK_FORMAT_UNDEFINED
+ *
+ * This is taken either from VkSamplerYcbcrConversionCreateInfo::format or
+ * VkSamplerCustomBorderColorCreateInfoEXT::format.
+ */
+ VkFormat format;
+
+ /** VkSamplerCreateInfo::borderColor */
+ VkBorderColor border_color;
+
+ /** Border color value
+ *
+ * If VkSamplerCreateInfo::borderColor is one of the Vulkan 1.0 enumerated
+ * border colors, this will be the VkClearColorValue representation of that
+ * value. VkSamplerCreateInfo::borderColor is VK_BORDER_COLOR_*_CUSTOM_EXT,
+ * this is VkSamplerCustomBorderColorCreateInfoEXT::customBorderColor.
+ */
+ VkClearColorValue border_color_value;
+
+ /**
+ * VkSamplerReductionModeCreateInfo::reductionMode or
+ * VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE.
+ */
+ VkSamplerReductionMode reduction_mode;
+
+ /** VkSamplerYcbcrConversionInfo::conversion or NULL
+ *
+ * We ensure that this is always NULL whenever vk_sampler::format is not a
+ * YCbCr format. This is important on Android where YCbCr conversion
+ * objects are required for all EXTERNAL formats, even if they are not
+ * YCbCr formats.
+ */
+ struct vk_ycbcr_conversion *ycbcr_conversion;
+};
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_sampler, base, VkSampler,
+ VK_OBJECT_TYPE_SAMPLER);
+
+void *vk_sampler_create(struct vk_device *device,
+ const VkSamplerCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+void vk_sampler_destroy(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_sampler *sampler);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_SAMPLER_H */
diff --git a/src/vulkan/runtime/vk_semaphore.c b/src/vulkan/runtime/vk_semaphore.c
new file mode 100644
index 00000000000..7044ed9aea2
--- /dev/null
+++ b/src/vulkan/runtime/vk_semaphore.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_semaphore.h"
+
+#include "util/os_time.h"
+#include "util/perf/cpu_trace.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_util.h"
+
+static VkExternalSemaphoreHandleTypeFlags
+vk_sync_semaphore_import_types(const struct vk_sync_type *type,
+ VkSemaphoreType semaphore_type)
+{
+ VkExternalSemaphoreHandleTypeFlags handle_types = 0;
+
+ if (type->import_opaque_fd)
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+ if (type->export_sync_file && semaphore_type == VK_SEMAPHORE_TYPE_BINARY)
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ if (type->import_win32_handle) {
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT;
+ if (type->features & VK_SYNC_FEATURE_TIMELINE)
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT;
+ }
+
+ return handle_types;
+}
+
+static VkExternalSemaphoreHandleTypeFlags
+vk_sync_semaphore_export_types(const struct vk_sync_type *type,
+ VkSemaphoreType semaphore_type)
+{
+ VkExternalSemaphoreHandleTypeFlags handle_types = 0;
+
+ if (type->export_opaque_fd)
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+ if (type->export_sync_file && semaphore_type == VK_SEMAPHORE_TYPE_BINARY)
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ if (type->export_win32_handle) {
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT;
+ if (type->features & VK_SYNC_FEATURE_TIMELINE)
+ handle_types |= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT;
+ }
+
+ return handle_types;
+}
+
+static VkExternalSemaphoreHandleTypeFlags
+vk_sync_semaphore_handle_types(const struct vk_sync_type *type,
+ VkSemaphoreType semaphore_type)
+{
+ return vk_sync_semaphore_export_types(type, semaphore_type) &
+ vk_sync_semaphore_import_types(type, semaphore_type);
+}
+
+static const struct vk_sync_type *
+get_semaphore_sync_type(struct vk_physical_device *pdevice,
+ VkSemaphoreType semaphore_type,
+ VkExternalSemaphoreHandleTypeFlags handle_types)
+{
+ assert(semaphore_type == VK_SEMAPHORE_TYPE_BINARY ||
+ semaphore_type == VK_SEMAPHORE_TYPE_TIMELINE);
+
+ enum vk_sync_features req_features = VK_SYNC_FEATURE_GPU_WAIT;
+ if (semaphore_type == VK_SEMAPHORE_TYPE_TIMELINE) {
+ req_features |= VK_SYNC_FEATURE_TIMELINE |
+ VK_SYNC_FEATURE_CPU_WAIT;
+ } else {
+ req_features |= VK_SYNC_FEATURE_BINARY;
+ }
+
+ for (const struct vk_sync_type *const *t =
+ pdevice->supported_sync_types; *t; t++) {
+ if (req_features & ~(*t)->features)
+ continue;
+
+ if (handle_types & ~vk_sync_semaphore_handle_types(*t, semaphore_type))
+ continue;
+
+ return *t;
+ }
+
+ return NULL;
+}
+
+static VkSemaphoreType
+get_semaphore_type(const void *pNext, uint64_t *initial_value)
+{
+ const VkSemaphoreTypeCreateInfo *type_info =
+ vk_find_struct_const(pNext, SEMAPHORE_TYPE_CREATE_INFO);
+
+ if (!type_info)
+ return VK_SEMAPHORE_TYPE_BINARY;
+
+ if (initial_value)
+ *initial_value = type_info->initialValue;
+ return type_info->semaphoreType;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateSemaphore(VkDevice _device,
+ const VkSemaphoreCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSemaphore *pSemaphore)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_semaphore *semaphore;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
+
+ uint64_t initial_value = 0;
+ const VkSemaphoreType semaphore_type =
+ get_semaphore_type(pCreateInfo->pNext, &initial_value);
+
+ if (semaphore_type == VK_SEMAPHORE_TYPE_TIMELINE)
+ assert(device->timeline_mode != VK_DEVICE_TIMELINE_MODE_NONE);
+
+ const VkExportSemaphoreCreateInfo *export =
+ vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
+ VkExternalSemaphoreHandleTypeFlags handle_types =
+ export ? export->handleTypes : 0;
+
+ const struct vk_sync_type *sync_type =
+ get_semaphore_sync_type(device->physical, semaphore_type, handle_types);
+ if (sync_type == NULL) {
+ /* We should always be able to get a semaphore type for internal */
+ assert(get_semaphore_sync_type(device->physical, semaphore_type, 0) != NULL);
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "Combination of external handle types is unsupported "
+ "for VkSemaphore creation.");
+ }
+
+ /* If the timeline mode is ASSISTED, then any permanent binary semaphore
+ * types need to be able to support move. We don't require this for
+ * temporary unless that temporary is also used as a semaphore signal
+ * operation which is much trickier to assert early.
+ */
+ if (semaphore_type == VK_SEMAPHORE_TYPE_BINARY &&
+ vk_device_supports_threaded_submit(device))
+ assert(sync_type->move);
+
+ /* Allocate a vk_semaphore + vk_sync implementation. Because the permanent
+ * field of vk_semaphore is the base field of the vk_sync implementation,
+ * we can make the 2 structures overlap.
+ */
+ size_t size = offsetof(struct vk_semaphore, permanent) + sync_type->size;
+ semaphore = vk_object_zalloc(device, pAllocator, size,
+ VK_OBJECT_TYPE_SEMAPHORE);
+ if (semaphore == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ semaphore->type = semaphore_type;
+
+ enum vk_sync_flags sync_flags = 0;
+ if (semaphore_type == VK_SEMAPHORE_TYPE_TIMELINE)
+ sync_flags |= VK_SYNC_IS_TIMELINE;
+ if (handle_types)
+ sync_flags |= VK_SYNC_IS_SHAREABLE;
+
+ VkResult result = vk_sync_init(device, &semaphore->permanent,
+ sync_type, sync_flags, initial_value);
+ if (result != VK_SUCCESS) {
+ vk_object_free(device, pAllocator, semaphore);
+ return result;
+ }
+
+#ifdef _WIN32
+ const VkExportSemaphoreWin32HandleInfoKHR *export_win32 =
+ vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR);
+ if (export_win32) {
+ result = vk_sync_set_win32_export_params(device, &semaphore->permanent, export_win32->pAttributes,
+ export_win32->dwAccess, export_win32->name);
+ if (result != VK_SUCCESS) {
+ vk_sync_finish(device, &semaphore->permanent);
+ vk_object_free(device, pAllocator, semaphore);
+ return result;
+ }
+ }
+#endif
+
+ *pSemaphore = vk_semaphore_to_handle(semaphore);
+
+ return VK_SUCCESS;
+}
+
+void
+vk_semaphore_reset_temporary(struct vk_device *device,
+ struct vk_semaphore *semaphore)
+{
+ if (semaphore->temporary == NULL)
+ return;
+
+ vk_sync_destroy(device, semaphore->temporary);
+ semaphore->temporary = NULL;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroySemaphore(VkDevice _device,
+ VkSemaphore _semaphore,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
+
+ if (semaphore == NULL)
+ return;
+
+ vk_semaphore_reset_temporary(device, semaphore);
+ vk_sync_finish(device, &semaphore->permanent);
+
+ vk_object_free(device, pAllocator, semaphore);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetPhysicalDeviceExternalSemaphoreProperties(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
+ VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+
+ assert(pExternalSemaphoreInfo->sType ==
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO);
+ const VkExternalSemaphoreHandleTypeFlagBits handle_type =
+ pExternalSemaphoreInfo->handleType;
+
+ const VkSemaphoreType semaphore_type =
+ get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
+
+ const struct vk_sync_type *sync_type =
+ get_semaphore_sync_type(pdevice, semaphore_type, handle_type);
+ if (sync_type == NULL) {
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
+ pExternalSemaphoreProperties->compatibleHandleTypes = 0;
+ pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
+ return;
+ }
+
+ VkExternalSemaphoreHandleTypeFlagBits import =
+ vk_sync_semaphore_import_types(sync_type, semaphore_type);
+ VkExternalSemaphoreHandleTypeFlagBits export =
+ vk_sync_semaphore_export_types(sync_type, semaphore_type);
+
+ VkExternalSemaphoreHandleTypeFlagBits opaque_types[] = {
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
+ };
+ for (uint32_t i = 0; i < ARRAY_SIZE(opaque_types); ++i) {
+ if (handle_type != opaque_types[i]) {
+ const struct vk_sync_type *opaque_sync_type =
+ get_semaphore_sync_type(pdevice, semaphore_type, opaque_types[i]);
+
+ /* If we're a different vk_sync_type than the one selected when only
+ * an opaque type is set, then we can't import/export that opaque type. Put
+ * differently, there can only be one OPAQUE_FD/WIN32_HANDLE sync type.
+ */
+ if (sync_type != opaque_sync_type) {
+ import &= ~opaque_types[i];
+ export &= ~opaque_types[i];
+ }
+ }
+ }
+
+ VkExternalSemaphoreHandleTypeFlags compatible = import & export;
+ VkExternalSemaphoreFeatureFlags features = 0;
+ if (handle_type & export)
+ features |= VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT;
+ if (handle_type & import)
+ features |= VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
+
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes = export;
+ pExternalSemaphoreProperties->compatibleHandleTypes = compatible;
+ pExternalSemaphoreProperties->externalSemaphoreFeatures = features;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetSemaphoreCounterValue(VkDevice _device,
+ VkSemaphore _semaphore,
+ uint64_t *pValue)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
+
+ if (vk_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
+ struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
+ return vk_sync_get_value(device, sync, pValue);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_WaitSemaphores(VkDevice _device,
+ const VkSemaphoreWaitInfo *pWaitInfo,
+ uint64_t timeout)
+{
+ MESA_TRACE_FUNC();
+
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ if (vk_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
+ if (pWaitInfo->semaphoreCount == 0)
+ return VK_SUCCESS;
+
+ uint64_t abs_timeout_ns = os_time_get_absolute_timeout(timeout);
+
+ const uint32_t wait_count = pWaitInfo->semaphoreCount;
+ STACK_ARRAY(struct vk_sync_wait, waits, pWaitInfo->semaphoreCount);
+
+ for (uint32_t i = 0; i < wait_count; i++) {
+ VK_FROM_HANDLE(vk_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
+ assert(semaphore->type == VK_SEMAPHORE_TYPE_TIMELINE);
+
+ waits[i] = (struct vk_sync_wait) {
+ .sync = vk_semaphore_get_active_sync(semaphore),
+ .stage_mask = ~(VkPipelineStageFlags2)0,
+ .wait_value = pWaitInfo->pValues[i],
+ };
+ }
+
+ enum vk_sync_wait_flags wait_flags = VK_SYNC_WAIT_COMPLETE;
+ if (pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT)
+ wait_flags |= VK_SYNC_WAIT_ANY;
+
+ VkResult result = vk_sync_wait_many(device, wait_count, waits,
+ wait_flags, abs_timeout_ns);
+
+ STACK_ARRAY_FINISH(waits);
+
+ VkResult device_status = vk_device_check_status(device);
+ if (device_status != VK_SUCCESS)
+ return device_status;
+
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_SignalSemaphore(VkDevice _device,
+ const VkSemaphoreSignalInfo *pSignalInfo)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, pSignalInfo->semaphore);
+ struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
+ VkResult result;
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * UID-VkSemaphoreSignalInfo-semaphore-03257
+ *
+ * "semaphore must have been created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_TIMELINE."
+ */
+ assert(semaphore->type == VK_SEMAPHORE_TYPE_TIMELINE);
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-VkSemaphoreSignalInfo-value-03258
+ *
+ * "value must have a value greater than the current value of the
+ * semaphore"
+ *
+ * Since 0 is the lowest possible semaphore timeline value, we can assert
+ * that a non-zero signal value is provided.
+ */
+ if (unlikely(pSignalInfo->value == 0)) {
+ return vk_device_set_lost(device,
+ "Tried to signal a timeline with value 0");
+ }
+
+ result = vk_sync_signal(device, sync, pSignalInfo->value);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ if (device->submit_mode == VK_QUEUE_SUBMIT_MODE_DEFERRED) {
+ result = vk_device_flush(device);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+#ifdef _WIN32
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_ImportSemaphoreWin32HandleKHR(VkDevice _device,
+ const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, pImportSemaphoreWin32HandleInfo->semaphore);
+
+ assert(pImportSemaphoreWin32HandleInfo->sType ==
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR);
+
+ const HANDLE handle = pImportSemaphoreWin32HandleInfo->handle;
+ const wchar_t *name = pImportSemaphoreWin32HandleInfo->name;
+ const VkExternalSemaphoreHandleTypeFlagBits handle_type =
+ pImportSemaphoreWin32HandleInfo->handleType;
+
+ struct vk_sync *temporary = NULL, *sync;
+ if (pImportSemaphoreWin32HandleInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-VkImportSemaphoreWin32HandleInfoKHR-flags-03322
+ *
+ * "If flags contains VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, the
+ * VkSemaphoreTypeCreateInfo::semaphoreType field of the semaphore
+ * from which handle or name was exported must not be
+ * VK_SEMAPHORE_TYPE_TIMELINE"
+ */
+ if (unlikely(semaphore->type == VK_SEMAPHORE_TYPE_TIMELINE)) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "Cannot temporarily import into a timeline "
+ "semaphore");
+ }
+
+ const struct vk_sync_type *sync_type =
+ get_semaphore_sync_type(device->physical, semaphore->type, handle_type);
+
+ VkResult result = vk_sync_create(device, sync_type, 0 /* flags */,
+ 0 /* initial_value */, &temporary);
+ if (result != VK_SUCCESS)
+ return result;
+
+ sync = temporary;
+ } else {
+ sync = &semaphore->permanent;
+ }
+ assert(handle_type &
+ vk_sync_semaphore_handle_types(sync->type, semaphore->type));
+
+ VkResult result;
+ switch (pImportSemaphoreWin32HandleInfo->handleType) {
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT:
+ result = vk_sync_import_win32_handle(device, sync, handle, name);
+ break;
+
+ default:
+ result = vk_error(semaphore, VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ }
+
+ if (result != VK_SUCCESS) {
+ if (temporary != NULL)
+ vk_sync_destroy(device, temporary);
+ return result;
+ }
+
+ /* From a spec correctness point of view, we could probably replace the
+ * semaphore's temporary payload with the new vk_sync at the top. However,
+ * we choose to be nice to applications and only replace the semaphore if
+ * the import succeeded.
+ */
+ if (temporary) {
+ vk_semaphore_reset_temporary(device, semaphore);
+ semaphore->temporary = temporary;
+ }
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetSemaphoreWin32HandleKHR(VkDevice _device,
+ const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
+ HANDLE *pHandle)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, pGetWin32HandleInfo->semaphore);
+
+ assert(pGetWin32HandleInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR);
+
+ struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
+
+ VkResult result;
+ switch (pGetWin32HandleInfo->handleType) {
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT:
+ result = vk_sync_export_win32_handle(device, sync, pHandle);
+ if (result != VK_SUCCESS)
+ return result;
+ break;
+
+ default:
+ unreachable("Invalid semaphore export handle type");
+ }
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Export operations have the same transference as the specified
+ * handle type’s import operations. [...] If the semaphore was using
+ * a temporarily imported payload, the semaphore’s prior permanent
+ * payload will be restored."
+ */
+ vk_semaphore_reset_temporary(device, semaphore);
+
+ return VK_SUCCESS;
+}
+
+#else
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_ImportSemaphoreFdKHR(VkDevice _device,
+ const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
+
+ assert(pImportSemaphoreFdInfo->sType ==
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR);
+
+ const int fd = pImportSemaphoreFdInfo->fd;
+ const VkExternalSemaphoreHandleTypeFlagBits handle_type =
+ pImportSemaphoreFdInfo->handleType;
+
+ struct vk_sync *temporary = NULL, *sync;
+ if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-VkImportSemaphoreFdInfoKHR-flags-03323
+ *
+ * "If flags contains VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, the
+ * VkSemaphoreTypeCreateInfo::semaphoreType field of the semaphore
+ * from which handle or name was exported must not be
+ * VK_SEMAPHORE_TYPE_TIMELINE"
+ */
+ if (unlikely(semaphore->type == VK_SEMAPHORE_TYPE_TIMELINE)) {
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
+ "Cannot temporarily import into a timeline "
+ "semaphore");
+ }
+
+ const struct vk_sync_type *sync_type =
+ get_semaphore_sync_type(device->physical, semaphore->type, handle_type);
+
+ VkResult result = vk_sync_create(device, sync_type, 0 /* flags */,
+ 0 /* initial_value */, &temporary);
+ if (result != VK_SUCCESS)
+ return result;
+
+ sync = temporary;
+ } else {
+ sync = &semaphore->permanent;
+ }
+ assert(handle_type &
+ vk_sync_semaphore_handle_types(sync->type, semaphore->type));
+
+ VkResult result;
+ switch (pImportSemaphoreFdInfo->handleType) {
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ result = vk_sync_import_opaque_fd(device, sync, fd);
+ break;
+
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ result = vk_sync_import_sync_file(device, sync, fd);
+ break;
+
+ default:
+ result = vk_error(semaphore, VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ }
+
+ if (result != VK_SUCCESS) {
+ if (temporary != NULL)
+ vk_sync_destroy(device, temporary);
+ return result;
+ }
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Importing a semaphore payload from a file descriptor transfers
+ * ownership of the file descriptor from the application to the Vulkan
+ * implementation. The application must not perform any operations on
+ * the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ if (fd != -1)
+ close(fd);
+
+ /* From a spec correctness point of view, we could probably replace the
+ * semaphore's temporary payload with the new vk_sync at the top. However,
+ * we choose to be nice to applications and only replace the semaphore if
+ * the import succeeded.
+ */
+ if (temporary) {
+ vk_semaphore_reset_temporary(device, semaphore);
+ semaphore->temporary = temporary;
+ }
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetSemaphoreFdKHR(VkDevice _device,
+ const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
+ int *pFd)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_semaphore, semaphore, pGetFdInfo->semaphore);
+
+ assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
+
+ struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
+
+ VkResult result;
+ switch (pGetFdInfo->handleType) {
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ result = vk_sync_export_opaque_fd(device, sync, pFd);
+ if (result != VK_SUCCESS)
+ return result;
+ break;
+
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * VUID-VkSemaphoreGetFdInfoKHR-handleType-03253
+ *
+ * "If handleType refers to a handle type with copy payload
+ * transference semantics, semaphore must have been created with a
+ * VkSemaphoreType of VK_SEMAPHORE_TYPE_BINARY."
+ */
+ if (unlikely(semaphore->type != VK_SEMAPHORE_TYPE_BINARY)) {
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "Cannot export a timeline semaphore as SYNC_FD");
+ }
+
+ /* From the Vulkan 1.2.194 spec:
+ * VUID-VkSemaphoreGetFdInfoKHR-handleType-03254
+ *
+ * "If handleType refers to a handle type with copy payload
+ * transference semantics, semaphore must have an associated
+ * semaphore signal operation that has been submitted for execution
+ * and any semaphore signal operations on which it depends (if any)
+ * must have also been submitted for execution."
+ *
+ * If we have real timelines, it's possible that the time point doesn't
+ * exist yet and is waiting for one of our submit threads to trigger.
+ * However, thanks to the above bit of spec text, that wait should never
+ * block for long.
+ */
+ if (vk_device_supports_threaded_submit(device)) {
+ result = vk_sync_wait(device, sync, 0,
+ VK_SYNC_WAIT_PENDING,
+ UINT64_MAX);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ result = vk_sync_export_sync_file(device, sync, pFd);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Export operations have the same transference as the specified
+ * handle type’s import operations. Additionally, exporting a
+ * semaphore payload to a handle with copy transference has the same
+ * side effects on the source semaphore’s payload as executing a
+ * semaphore wait operation."
+ *
+ * In other words, exporting a sync file also resets the semaphore. We
+ * only care about this for the permanent payload because the temporary
+ * payload will be destroyed below.
+ */
+ if (sync == &semaphore->permanent) {
+ result = vk_sync_reset(device, sync);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+ break;
+
+ default:
+ unreachable("Invalid semaphore export handle type");
+ }
+
+ /* From the Vulkan 1.2.194 spec:
+ *
+ * "Export operations have the same transference as the specified
+ * handle type’s import operations. [...] If the semaphore was using
+ * a temporarily imported payload, the semaphore’s prior permanent
+ * payload will be restored."
+ */
+ vk_semaphore_reset_temporary(device, semaphore);
+
+ return VK_SUCCESS;
+}
+
+#endif /* !defined(_WIN32) */
diff --git a/src/vulkan/runtime/vk_semaphore.h b/src/vulkan/runtime/vk_semaphore.h
new file mode 100644
index 00000000000..141f39a3f4d
--- /dev/null
+++ b/src/vulkan/runtime/vk_semaphore.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_SEMAPHORE_H
+#define VK_SEMAPHORE_H
+
+#include "vk_object.h"
+#include "vk_sync.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_sync;
+
+struct vk_semaphore {
+ struct vk_object_base base;
+
+ /** VkSemaphoreTypeCreateInfo::semaphoreType */
+ VkSemaphoreType type;
+
+ /* Temporary semaphore state.
+ *
+ * A semaphore *may* have temporary state. That state is added to the
+ * semaphore by an import operation and is reset back to NULL when the
+ * semaphore is reset. A semaphore with temporary state cannot be signaled
+ * because the semaphore must already be signaled before the temporary
+ * state can be exported from the semaphore in the other process and
+ * imported here.
+ */
+ struct vk_sync *temporary;
+
+ /** Permanent semaphore state.
+ *
+ * Every semaphore has some form of permanent state.
+ *
+ * This field must be last
+ */
+ alignas(8) struct vk_sync permanent;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_semaphore, base, VkSemaphore,
+ VK_OBJECT_TYPE_SEMAPHORE);
+
+void vk_semaphore_reset_temporary(struct vk_device *device,
+ struct vk_semaphore *semaphore);
+
+static inline struct vk_sync *
+vk_semaphore_get_active_sync(struct vk_semaphore *semaphore)
+{
+ return semaphore->temporary ? semaphore->temporary : &semaphore->permanent;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_SEMAPHORE_H */
+
diff --git a/src/vulkan/runtime/vk_shader.c b/src/vulkan/runtime/vk_shader.c
new file mode 100644
index 00000000000..d124113a0cc
--- /dev/null
+++ b/src/vulkan/runtime/vk_shader.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright © 2024 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_shader.h"
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_common_entrypoints.h"
+#include "vk_descriptor_set_layout.h"
+#include "vk_device.h"
+#include "vk_nir.h"
+#include "vk_physical_device.h"
+#include "vk_pipeline.h"
+
+#include "util/mesa-sha1.h"
+
+void *
+vk_shader_zalloc(struct vk_device *device,
+ const struct vk_shader_ops *ops,
+ gl_shader_stage stage,
+ const VkAllocationCallbacks *alloc,
+ size_t size)
+{
+ /* For internal allocations, we need to allocate from the device scope
+ * because they might be put in pipeline caches. Importantly, it is
+ * impossible for the client to get at this pointer and we apply this
+ * heuristic before we account for allocation fallbacks so this will only
+ * ever happen for internal shader objectx.
+ */
+ const VkSystemAllocationScope alloc_scope =
+ alloc == &device->alloc ? VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
+ : VK_SYSTEM_ALLOCATION_SCOPE_OBJECT;
+
+ struct vk_shader *shader = vk_zalloc2(&device->alloc, alloc, size, 8,
+ alloc_scope);
+ if (shader == NULL)
+ return NULL;
+
+ vk_object_base_init(device, &shader->base, VK_OBJECT_TYPE_SHADER_EXT);
+ shader->ops = ops;
+ shader->stage = stage;
+
+ return shader;
+}
+
+void
+vk_shader_free(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_shader *shader)
+{
+ vk_object_base_finish(&shader->base);
+ vk_free2(&device->alloc, alloc, shader);
+}
+
+int
+vk_shader_cmp_graphics_stages(gl_shader_stage a, gl_shader_stage b)
+{
+ static const int stage_order[MESA_SHADER_MESH + 1] = {
+ [MESA_SHADER_VERTEX] = 1,
+ [MESA_SHADER_TESS_CTRL] = 2,
+ [MESA_SHADER_TESS_EVAL] = 3,
+ [MESA_SHADER_GEOMETRY] = 4,
+ [MESA_SHADER_TASK] = 5,
+ [MESA_SHADER_MESH] = 6,
+ [MESA_SHADER_FRAGMENT] = 7,
+ };
+
+ assert(a < ARRAY_SIZE(stage_order) && stage_order[a] > 0);
+ assert(b < ARRAY_SIZE(stage_order) && stage_order[b] > 0);
+
+ return stage_order[a] - stage_order[b];
+}
+
+struct stage_idx {
+ gl_shader_stage stage;
+ uint32_t idx;
+};
+
+static int
+cmp_stage_idx(const void *_a, const void *_b)
+{
+ const struct stage_idx *a = _a, *b = _b;
+ return vk_shader_cmp_graphics_stages(a->stage, b->stage);
+}
+
+static nir_shader *
+vk_shader_to_nir(struct vk_device *device,
+ const VkShaderCreateInfoEXT *info,
+ const struct vk_pipeline_robustness_state *rs)
+{
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+
+ const gl_shader_stage stage = vk_to_mesa_shader_stage(info->stage);
+ const nir_shader_compiler_options *nir_options =
+ ops->get_nir_options(device->physical, stage, rs);
+ struct spirv_to_nir_options spirv_options =
+ ops->get_spirv_options(device->physical, stage, rs);
+
+ enum gl_subgroup_size subgroup_size = vk_get_subgroup_size(
+ vk_spirv_version(info->pCode, info->codeSize),
+ stage, info->pNext,
+ info->flags & VK_SHADER_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT,
+ info->flags &VK_SHADER_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+
+ nir_shader *nir = vk_spirv_to_nir(device,
+ info->pCode, info->codeSize,
+ stage, info->pName,
+ subgroup_size,
+ info->pSpecializationInfo,
+ &spirv_options, nir_options,
+ false /* internal */, NULL);
+ if (nir == NULL)
+ return NULL;
+
+ if (ops->preprocess_nir != NULL)
+ ops->preprocess_nir(device->physical, nir);
+
+ return nir;
+}
+
+struct set_layouts {
+ struct vk_descriptor_set_layout *set_layouts[MESA_VK_MAX_DESCRIPTOR_SETS];
+};
+
+static void
+vk_shader_compile_info_init(struct vk_shader_compile_info *info,
+ struct set_layouts *set_layouts,
+ const VkShaderCreateInfoEXT *vk_info,
+ const struct vk_pipeline_robustness_state *rs,
+ nir_shader *nir)
+{
+ for (uint32_t sl = 0; sl < vk_info->setLayoutCount; sl++) {
+ set_layouts->set_layouts[sl] =
+ vk_descriptor_set_layout_from_handle(vk_info->pSetLayouts[sl]);
+ }
+
+ *info = (struct vk_shader_compile_info) {
+ .stage = nir->info.stage,
+ .flags = vk_info->flags,
+ .next_stage_mask = vk_info->nextStage,
+ .nir = nir,
+ .robustness = rs,
+ .set_layout_count = vk_info->setLayoutCount,
+ .set_layouts = set_layouts->set_layouts,
+ .push_constant_range_count = vk_info->pushConstantRangeCount,
+ .push_constant_ranges = vk_info->pPushConstantRanges,
+ };
+}
+
+PRAGMA_DIAGNOSTIC_PUSH
+PRAGMA_DIAGNOSTIC_ERROR(-Wpadded)
+struct vk_shader_bin_header {
+ char mesavkshaderbin[16];
+ VkDriverId driver_id;
+ uint8_t uuid[VK_UUID_SIZE];
+ uint32_t version;
+ uint64_t size;
+ uint8_t sha1[SHA1_DIGEST_LENGTH];
+ uint32_t _pad;
+};
+PRAGMA_DIAGNOSTIC_POP
+static_assert(sizeof(struct vk_shader_bin_header) == 72,
+ "This struct has no holes");
+
+static void
+vk_shader_bin_header_init(struct vk_shader_bin_header *header,
+ struct vk_physical_device *device)
+{
+ *header = (struct vk_shader_bin_header) {
+ .mesavkshaderbin = "MesaVkShaderBin",
+ .driver_id = device->properties.driverID,
+ };
+
+ memcpy(header->uuid, device->properties.shaderBinaryUUID, VK_UUID_SIZE);
+ header->version = device->properties.shaderBinaryVersion;
+}
+
+static VkResult
+vk_shader_serialize(struct vk_device *device,
+ struct vk_shader *shader,
+ struct blob *blob)
+{
+ struct vk_shader_bin_header header;
+ vk_shader_bin_header_init(&header, device->physical);
+
+ ASSERTED intptr_t header_offset = blob_reserve_bytes(blob, sizeof(header));
+ assert(header_offset == 0);
+
+ bool success = shader->ops->serialize(device, shader, blob);
+ if (!success || blob->out_of_memory)
+ return VK_INCOMPLETE;
+
+ /* Finalize and write the header */
+ header.size = blob->size;
+ if (blob->data != NULL) {
+ assert(sizeof(header) <= blob->size);
+
+ struct mesa_sha1 sha1_ctx;
+ _mesa_sha1_init(&sha1_ctx);
+
+ /* Hash the header with a zero SHA1 */
+ _mesa_sha1_update(&sha1_ctx, &header, sizeof(header));
+
+ /* Hash the serialized data */
+ _mesa_sha1_update(&sha1_ctx, blob->data + sizeof(header),
+ blob->size - sizeof(header));
+
+ _mesa_sha1_final(&sha1_ctx, header.sha1);
+
+ blob_overwrite_bytes(blob, header_offset, &header, sizeof(header));
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_shader_deserialize(struct vk_device *device,
+ size_t data_size, const void *data,
+ const VkAllocationCallbacks* pAllocator,
+ struct vk_shader **shader_out)
+{
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+
+ struct blob_reader blob;
+ blob_reader_init(&blob, data, data_size);
+
+ struct vk_shader_bin_header header, ref_header;
+ blob_copy_bytes(&blob, &header, sizeof(header));
+ if (blob.overrun)
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ vk_shader_bin_header_init(&ref_header, device->physical);
+
+ if (memcmp(header.mesavkshaderbin, ref_header.mesavkshaderbin,
+ sizeof(header.mesavkshaderbin)))
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ if (header.driver_id != ref_header.driver_id)
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ if (memcmp(header.uuid, ref_header.uuid, sizeof(header.uuid)))
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ /* From the Vulkan 1.3.276 spec:
+ *
+ * "Guaranteed compatibility of shader binaries is expressed through a
+ * combination of the shaderBinaryUUID and shaderBinaryVersion members
+ * of the VkPhysicalDeviceShaderObjectPropertiesEXT structure queried
+ * from a physical device. Binary shaders retrieved from a physical
+ * device with a certain shaderBinaryUUID are guaranteed to be
+ * compatible with all other physical devices reporting the same
+ * shaderBinaryUUID and the same or higher shaderBinaryVersion."
+ *
+ * We handle the version check here on behalf of the driver and then pass
+ * the version into the driver's deserialize callback.
+ *
+ * If a driver doesn't want to mess with versions, they can always make the
+ * UUID a hash and always report version 0 and that will make this check
+ * effectively a no-op.
+ */
+ if (header.version > ref_header.version)
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ /* Reject shader binaries that are the wrong size. */
+ if (header.size != data_size)
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ assert(blob.current == (uint8_t *)data + sizeof(header));
+ blob.end = (uint8_t *)data + data_size;
+
+ struct mesa_sha1 sha1_ctx;
+ _mesa_sha1_init(&sha1_ctx);
+
+ /* Hash the header with a zero SHA1 */
+ struct vk_shader_bin_header sha1_header = header;
+ memset(sha1_header.sha1, 0, sizeof(sha1_header.sha1));
+ _mesa_sha1_update(&sha1_ctx, &sha1_header, sizeof(sha1_header));
+
+ /* Hash the serialized data */
+ _mesa_sha1_update(&sha1_ctx, (uint8_t *)data + sizeof(header),
+ data_size - sizeof(header));
+
+ _mesa_sha1_final(&sha1_ctx, ref_header.sha1);
+ if (memcmp(header.sha1, ref_header.sha1, sizeof(header.sha1)))
+ return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
+
+ /* We've now verified that the header matches and that the data has the
+ * right SHA1 hash so it's safe to call into the driver.
+ */
+ return ops->deserialize(device, &blob, header.version,
+ pAllocator, shader_out);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_GetShaderBinaryDataEXT(VkDevice _device,
+ VkShaderEXT _shader,
+ size_t *pDataSize,
+ void *pData)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_shader, shader, _shader);
+ VkResult result;
+
+ /* From the Vulkan 1.3.275 spec:
+ *
+ * "If pData is NULL, then the size of the binary shader code of the
+ * shader object, in bytes, is returned in pDataSize. Otherwise,
+ * pDataSize must point to a variable set by the user to the size of the
+ * buffer, in bytes, pointed to by pData, and on return the variable is
+ * overwritten with the amount of data actually written to pData. If
+ * pDataSize is less than the size of the binary shader code, nothing is
+ * written to pData, and VK_INCOMPLETE will be returned instead of
+ * VK_SUCCESS."
+ *
+ * This is annoying. Unlike basically every other Vulkan data return
+ * method, we're not allowed to overwrite the client-provided memory region
+ * on VK_INCOMPLETE. This means we either need to query the blob size
+ * up-front by serializing twice or we need to serialize into temporary
+ * memory and memcpy into the client-provided region. We choose the first
+ * approach.
+ *
+ * In the common case, this means that vk_shader_ops::serialize will get
+ * called 3 times: Once for the client to get the size, once for us to
+ * validate the client's size, and once to actually write the data. It's a
+ * bit heavy-weight but this shouldn't be in a hot path and this is better
+ * for memory efficiency. Also, the vk_shader_ops::serialize should be
+ * pretty fast on a null blob.
+ */
+ struct blob blob;
+ blob_init_fixed(&blob, NULL, SIZE_MAX);
+ result = vk_shader_serialize(device, shader, &blob);
+ assert(result == VK_SUCCESS);
+
+ if (result != VK_SUCCESS) {
+ *pDataSize = 0;
+ return result;
+ } else if (pData == NULL) {
+ *pDataSize = blob.size;
+ return VK_SUCCESS;
+ } else if (blob.size > *pDataSize) {
+ /* No data written */
+ *pDataSize = 0;
+ return VK_INCOMPLETE;
+ }
+
+ blob_init_fixed(&blob, pData, *pDataSize);
+ result = vk_shader_serialize(device, shader, &blob);
+ assert(result == VK_SUCCESS);
+
+ *pDataSize = blob.size;
+
+ return result;
+}
+
+/* The only place where we have "real" linking is graphics shaders and there
+ * is a limit as to how many of them can be linked together at one time.
+ */
+#define VK_MAX_LINKED_SHADER_STAGES MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateShadersEXT(VkDevice _device,
+ uint32_t createInfoCount,
+ const VkShaderCreateInfoEXT *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator,
+ VkShaderEXT *pShaders)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+ VkResult first_fail_or_success = VK_SUCCESS;
+
+ struct vk_pipeline_robustness_state rs = {
+ .storage_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT,
+ .uniform_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT,
+ .vertex_inputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT,
+ .images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT,
+ };
+
+ /* From the Vulkan 1.3.274 spec:
+ *
+ * "When this function returns, whether or not it succeeds, it is
+ * guaranteed that every element of pShaders will have been overwritten
+ * by either VK_NULL_HANDLE or a valid VkShaderEXT handle."
+ *
+ * Zeroing up-front makes the error path easier.
+ */
+ memset(pShaders, 0, createInfoCount * sizeof(*pShaders));
+
+ bool has_linked_spirv = false;
+ for (uint32_t i = 0; i < createInfoCount; i++) {
+ if (pCreateInfos[i].codeType == VK_SHADER_CODE_TYPE_SPIRV_EXT &&
+ (pCreateInfos[i].flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT))
+ has_linked_spirv = true;
+ }
+
+ uint32_t linked_count = 0;
+ struct stage_idx linked[VK_MAX_LINKED_SHADER_STAGES];
+
+ for (uint32_t i = 0; i < createInfoCount; i++) {
+ const VkShaderCreateInfoEXT *vk_info = &pCreateInfos[i];
+ VkResult result = VK_SUCCESS;
+
+ switch (vk_info->codeType) {
+ case VK_SHADER_CODE_TYPE_BINARY_EXT: {
+ /* This isn't required by Vulkan but we're allowed to fail binary
+ * import for basically any reason. This seems like a pretty good
+ * reason.
+ */
+ if (has_linked_spirv &&
+ (vk_info->flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT)) {
+ result = vk_errorf(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT,
+ "Cannot mix linked binary and SPIR-V");
+ break;
+ }
+
+ struct vk_shader *shader;
+ result = vk_shader_deserialize(device, vk_info->codeSize,
+ vk_info->pCode, pAllocator,
+ &shader);
+ if (result != VK_SUCCESS)
+ break;
+
+ pShaders[i] = vk_shader_to_handle(shader);
+ break;
+ }
+
+ case VK_SHADER_CODE_TYPE_SPIRV_EXT: {
+ if (vk_info->flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT) {
+ /* Stash it and compile later */
+ assert(linked_count < ARRAY_SIZE(linked));
+ linked[linked_count++] = (struct stage_idx) {
+ .stage = vk_to_mesa_shader_stage(vk_info->stage),
+ .idx = i,
+ };
+ } else {
+ nir_shader *nir = vk_shader_to_nir(device, vk_info, &rs);
+ if (nir == NULL) {
+ result = vk_errorf(device, VK_ERROR_UNKNOWN,
+ "Failed to compile shader to NIR");
+ break;
+ }
+
+ struct vk_shader_compile_info info;
+ struct set_layouts set_layouts;
+ vk_shader_compile_info_init(&info, &set_layouts,
+ vk_info, &rs, nir);
+
+ struct vk_shader *shader;
+ result = ops->compile(device, 1, &info, NULL /* state */,
+ pAllocator, &shader);
+ if (result != VK_SUCCESS)
+ break;
+
+ pShaders[i] = vk_shader_to_handle(shader);
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unknown shader code type");
+ }
+
+ if (first_fail_or_success == VK_SUCCESS)
+ first_fail_or_success = result;
+ }
+
+ if (linked_count > 0) {
+ struct set_layouts set_layouts[VK_MAX_LINKED_SHADER_STAGES];
+ struct vk_shader_compile_info infos[VK_MAX_LINKED_SHADER_STAGES];
+ VkResult result = VK_SUCCESS;
+
+ /* Sort so we guarantee the driver always gets them in-order */
+ qsort(linked, linked_count, sizeof(*linked), cmp_stage_idx);
+
+ /* Memset for easy error handling */
+ memset(infos, 0, sizeof(infos));
+
+ for (uint32_t l = 0; l < linked_count; l++) {
+ const VkShaderCreateInfoEXT *vk_info = &pCreateInfos[linked[l].idx];
+
+ nir_shader *nir = vk_shader_to_nir(device, vk_info, &rs);
+ if (nir == NULL) {
+ result = vk_errorf(device, VK_ERROR_UNKNOWN,
+ "Failed to compile shader to NIR");
+ break;
+ }
+
+ vk_shader_compile_info_init(&infos[l], &set_layouts[l],
+ vk_info, &rs, nir);
+ }
+
+ if (result == VK_SUCCESS) {
+ struct vk_shader *shaders[VK_MAX_LINKED_SHADER_STAGES];
+
+ result = ops->compile(device, linked_count, infos, NULL /* state */,
+ pAllocator, shaders);
+ if (result == VK_SUCCESS) {
+ for (uint32_t l = 0; l < linked_count; l++)
+ pShaders[linked[l].idx] = vk_shader_to_handle(shaders[l]);
+ }
+ } else {
+ for (uint32_t l = 0; l < linked_count; l++) {
+ if (infos[l].nir != NULL)
+ ralloc_free(infos[l].nir);
+ }
+ }
+
+ if (first_fail_or_success == VK_SUCCESS)
+ first_fail_or_success = result;
+ }
+
+ return first_fail_or_success;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyShaderEXT(VkDevice _device,
+ VkShaderEXT _shader,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_shader, shader, _shader);
+
+ if (shader == NULL)
+ return;
+
+ vk_shader_destroy(device, shader, pAllocator);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdBindShadersEXT(VkCommandBuffer commandBuffer,
+ uint32_t stageCount,
+ const VkShaderStageFlagBits *pStages,
+ const VkShaderEXT *pShaders)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+ const struct vk_device_shader_ops *ops = device->shader_ops;
+
+ STACK_ARRAY(gl_shader_stage, stages, stageCount);
+ STACK_ARRAY(struct vk_shader *, shaders, stageCount);
+
+ VkShaderStageFlags vk_stages = 0;
+ for (uint32_t i = 0; i < stageCount; i++) {
+ vk_stages |= pStages[i];
+ stages[i] = vk_to_mesa_shader_stage(pStages[i]);
+ shaders[i] = pShaders != NULL ? vk_shader_from_handle(pShaders[i]) : NULL;
+ }
+
+ vk_cmd_unbind_pipelines_for_stages(cmd_buffer, vk_stages);
+ if (vk_stages & ~VK_SHADER_STAGE_COMPUTE_BIT)
+ vk_cmd_set_rp_attachments(cmd_buffer, ~0);
+
+ ops->cmd_bind_shaders(cmd_buffer, stageCount, stages, shaders);
+}
diff --git a/src/vulkan/runtime/vk_shader.h b/src/vulkan/runtime/vk_shader.h
new file mode 100644
index 00000000000..8fb5090b129
--- /dev/null
+++ b/src/vulkan/runtime/vk_shader.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright © 2024 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VK_SHADER_H
+#define VK_SHADER_H
+
+#include "compiler/spirv/nir_spirv.h"
+#include "vk_limits.h"
+#include "vk_pipeline_cache.h"
+
+#include "util/mesa-blake3.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct blob;
+struct nir_shader;
+struct vk_command_buffer;
+struct vk_device;
+struct vk_descriptor_set_layout;
+struct vk_dynamic_graphics_state;
+struct vk_graphics_pipeline_state;
+struct vk_physical_device;
+struct vk_pipeline;
+struct vk_pipeline_robustness_state;
+
+int vk_shader_cmp_graphics_stages(gl_shader_stage a, gl_shader_stage b);
+
+#define VK_SHADER_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_MESA 0x1000
+
+struct vk_shader_compile_info {
+ gl_shader_stage stage;
+ VkShaderCreateFlagsEXT flags;
+ VkShaderStageFlags next_stage_mask;
+ struct nir_shader *nir;
+
+ const struct vk_pipeline_robustness_state *robustness;
+
+ uint32_t set_layout_count;
+ struct vk_descriptor_set_layout * const *set_layouts;
+
+ uint32_t push_constant_range_count;
+ const VkPushConstantRange *push_constant_ranges;
+};
+
+struct vk_shader_ops;
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic error "-Wpadded"
+#endif
+struct vk_shader_pipeline_cache_key {
+ gl_shader_stage stage;
+ blake3_hash blake3;
+};
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+
+struct vk_shader {
+ struct vk_object_base base;
+
+ const struct vk_shader_ops *ops;
+
+ gl_shader_stage stage;
+
+ /* Used for the generic VkPipeline implementation */
+ struct {
+ struct vk_pipeline_cache_object cache_obj;
+ struct vk_shader_pipeline_cache_key cache_key;
+ } pipeline;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_shader, base, VkShaderEXT,
+ VK_OBJECT_TYPE_SHADER_EXT);
+
+struct vk_shader_ops {
+ /** Destroy a vk_shader_object */
+ void (*destroy)(struct vk_device *device,
+ struct vk_shader *shader,
+ const VkAllocationCallbacks* pAllocator);
+
+ /** Serialize a vk_shader_object to a blob
+ *
+ * This function shouldn't need to do any validation of the blob data
+ * beyond basic sanity checking. The common implementation of
+ * vkGetShaderBinaryEXT verifies the blobUUID and version of input data as
+ * well as a size and checksum to ensure integrity. This callback is only
+ * invoked after validation of the input binary data.
+ */
+ bool (*serialize)(struct vk_device *device,
+ const struct vk_shader *shader,
+ struct blob *blob);
+
+ /** Returns executable properties for this shader
+ *
+ * This is equivalent to vkGetPipelineExecutableProperties(), only for a
+ * single vk_shader.
+ */
+ VkResult (*get_executable_properties)(struct vk_device *device,
+ const struct vk_shader *shader,
+ uint32_t *executable_count,
+ VkPipelineExecutablePropertiesKHR *properties);
+
+ /** Returns executable statistics for this shader
+ *
+ * This is equivalent to vkGetPipelineExecutableStatistics(), only for a
+ * single vk_shader.
+ */
+ VkResult (*get_executable_statistics)(struct vk_device *device,
+ const struct vk_shader *shader,
+ uint32_t executable_index,
+ uint32_t *statistic_count,
+ VkPipelineExecutableStatisticKHR *statistics);
+
+ /** Returns executable internal representations for this shader
+ *
+ * This is equivalent to vkGetPipelineExecutableInternalRepresentations(),
+ * only for a single vk_shader.
+ */
+ VkResult (*get_executable_internal_representations)(
+ struct vk_device *device,
+ const struct vk_shader *shader,
+ uint32_t executable_index,
+ uint32_t *internal_representation_count,
+ VkPipelineExecutableInternalRepresentationKHR *internal_representations);
+};
+
+void *vk_shader_zalloc(struct vk_device *device,
+ const struct vk_shader_ops *ops,
+ gl_shader_stage stage,
+ const VkAllocationCallbacks *alloc,
+ size_t size);
+void vk_shader_free(struct vk_device *device,
+ const VkAllocationCallbacks *alloc,
+ struct vk_shader *shader);
+
+static inline void
+vk_shader_destroy(struct vk_device *device,
+ struct vk_shader *shader,
+ const VkAllocationCallbacks *alloc)
+{
+ shader->ops->destroy(device, shader, alloc);
+}
+
+struct vk_device_shader_ops {
+ /** Retrieves a NIR compiler options struct
+ *
+ * NIR compiler options are only allowed to vary based on physical device,
+ * stage, and robustness state.
+ */
+ const struct nir_shader_compiler_options *(*get_nir_options)(
+ struct vk_physical_device *device,
+ gl_shader_stage stage,
+ const struct vk_pipeline_robustness_state *rs);
+
+ /** Retrieves a SPIR-V options struct
+ *
+ * SPIR-V options are only allowed to vary based on physical device, stage,
+ * and robustness state.
+ */
+ struct spirv_to_nir_options (*get_spirv_options)(
+ struct vk_physical_device *device,
+ gl_shader_stage stage,
+ const struct vk_pipeline_robustness_state *rs);
+
+ /** Preprocesses a NIR shader
+ *
+ * This callback is optional.
+ *
+ * If non-NULL, this callback is invoked after the SPIR-V is parsed into
+ * NIR and before it is handed to compile(). The driver should do as much
+ * generic optimization and lowering as it can here. Importantly, the
+ * preprocess step only knows about the NIR input and the physical device,
+ * not any enabled device features or pipeline state. This allows us to
+ * potentially cache this shader and re-use it across pipelines.
+ */
+ void (*preprocess_nir)(struct vk_physical_device *device, nir_shader *nir);
+
+ /** True if the driver wants geometry stages linked
+ *
+ * If set to true, geometry stages will always be compiled with
+ * VK_SHADER_CREATE_LINK_STAGE_BIT_EXT when pipelines are used.
+ */
+ bool link_geom_stages;
+
+ /** Hash a vk_graphics_state object
+ *
+ * This callback hashes whatever bits of vk_graphics_pipeline_state might
+ * be used to compile a shader in one of the given stages.
+ */
+ void (*hash_graphics_state)(struct vk_physical_device *device,
+ const struct vk_graphics_pipeline_state *state,
+ VkShaderStageFlags stages,
+ blake3_hash blake3_out);
+
+ /** Compile (and potentially link) a set of shaders
+ *
+ * Unlike vkCreateShadersEXT, this callback will only ever be called with
+ * multiple shaders if VK_SHADER_CREATE_LINK_STAGE_BIT_EXT is set on all of
+ * them. We also guarantee that the shaders occur in the call in Vulkan
+ * pipeline stage order as dictated by vk_shader_cmp_graphics_stages().
+ *
+ * This callback consumes all input NIR shaders, regardless of whether or
+ * not it was successful.
+ */
+ VkResult (*compile)(struct vk_device *device,
+ uint32_t shader_count,
+ struct vk_shader_compile_info *infos,
+ const struct vk_graphics_pipeline_state *state,
+ const VkAllocationCallbacks* pAllocator,
+ struct vk_shader **shaders_out);
+
+ /** Create a vk_shader from a binary blob */
+ VkResult (*deserialize)(struct vk_device *device,
+ struct blob_reader *blob,
+ uint32_t binary_version,
+ const VkAllocationCallbacks* pAllocator,
+ struct vk_shader **shader_out);
+
+ /** Bind a set of shaders
+ *
+ * This is roughly equivalent to vkCmdBindShadersEXT()
+ */
+ void (*cmd_bind_shaders)(struct vk_command_buffer *cmd_buffer,
+ uint32_t stage_count,
+ const gl_shader_stage *stages,
+ struct vk_shader ** const shaders);
+
+ /** Sets dynamic state */
+ void (*cmd_set_dynamic_graphics_state)(struct vk_command_buffer *cmd_buffer,
+ const struct vk_dynamic_graphics_state *state);
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_SHADER_H */
diff --git a/src/vulkan/runtime/vk_shader_module.c b/src/vulkan/runtime/vk_shader_module.c
new file mode 100644
index 00000000000..556d4f0bf2a
--- /dev/null
+++ b/src/vulkan/runtime/vk_shader_module.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_shader_module.h"
+
+#include "vk_alloc.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_log.h"
+#include "vk_nir.h"
+#include "vk_pipeline.h"
+#include "vk_util.h"
+
+void vk_shader_module_init(struct vk_device *device,
+ struct vk_shader_module *module,
+ const VkShaderModuleCreateInfo *create_info)
+{
+ vk_object_base_init(device, &module->base, VK_OBJECT_TYPE_SHADER_MODULE);
+
+ module->nir = NULL;
+
+ module->size = create_info->codeSize;
+ memcpy(module->data, create_info->pCode, module->size);
+
+ _mesa_blake3_compute(module->data, module->size, module->hash);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateShaderModule(VkDevice _device,
+ const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkShaderModule *pShaderModule)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_shader_module *module;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
+ assert(pCreateInfo->flags == 0);
+
+ module = vk_alloc2(&device->alloc, pAllocator,
+ sizeof(*module) + pCreateInfo->codeSize, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (module == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ vk_shader_module_init(device, module, pCreateInfo);
+
+ *pShaderModule = vk_shader_module_to_handle(module);
+
+ return VK_SUCCESS;
+}
+
+const uint8_t vk_shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE] = "MESA-BLAKE3";
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetShaderModuleIdentifierEXT(VkDevice _device,
+ VkShaderModule _module,
+ VkShaderModuleIdentifierEXT *pIdentifier)
+{
+ VK_FROM_HANDLE(vk_shader_module, module, _module);
+ memcpy(pIdentifier->identifier, module->hash, sizeof(module->hash));
+ pIdentifier->identifierSize = sizeof(module->hash);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetShaderModuleCreateInfoIdentifierEXT(VkDevice _device,
+ const VkShaderModuleCreateInfo *pCreateInfo,
+ VkShaderModuleIdentifierEXT *pIdentifier)
+{
+ _mesa_blake3_compute(pCreateInfo->pCode, pCreateInfo->codeSize,
+ pIdentifier->identifier);
+ pIdentifier->identifierSize = sizeof(blake3_hash);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroyShaderModule(VkDevice _device,
+ VkShaderModule _module,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_shader_module, module, _module);
+
+ if (!module)
+ return;
+
+ /* NIR modules (which are only created internally by the driver) are not
+ * dynamically allocated so we should never call this for them.
+ * Instead the driver is responsible for freeing the NIR code when it is
+ * no longer needed.
+ */
+ assert(module->nir == NULL);
+
+ vk_object_free(device, pAllocator, module);
+}
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+uint32_t
+vk_shader_module_spirv_version(const struct vk_shader_module *mod)
+{
+ if (mod->nir != NULL)
+ return 0;
+
+ return vk_spirv_version((uint32_t *)mod->data, mod->size);
+}
+
+VkResult
+vk_shader_module_to_nir(struct vk_device *device,
+ const struct vk_shader_module *mod,
+ gl_shader_stage stage,
+ const char *entrypoint_name,
+ const VkSpecializationInfo *spec_info,
+ const struct spirv_to_nir_options *spirv_options,
+ const nir_shader_compiler_options *nir_options,
+ void *mem_ctx, nir_shader **nir_out)
+{
+ const VkPipelineShaderStageCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = mesa_to_vk_shader_stage(stage),
+ .module = vk_shader_module_to_handle((struct vk_shader_module *)mod),
+ .pName = entrypoint_name,
+ .pSpecializationInfo = spec_info,
+ };
+ return vk_pipeline_shader_stage_to_nir(device, &info,
+ spirv_options, nir_options,
+ mem_ctx, nir_out);
+}
diff --git a/src/vulkan/util/vk_shader_module.h b/src/vulkan/runtime/vk_shader_module.h
index d4e64dfc35a..c5c81cf3778 100644
--- a/src/vulkan/util/vk_shader_module.h
+++ b/src/vulkan/runtime/vk_shader_module.h
@@ -24,7 +24,10 @@
#ifndef VK_SHADER_MODULE_H
#define VK_SHADER_MODULE_H
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "util/mesa-blake3.h"
+#include "compiler/shader_enums.h"
#include "vk_object.h"
#ifdef __cplusplus
@@ -32,21 +35,41 @@ extern "C" {
#endif
struct nir_shader;
+struct nir_shader_compiler_options;
+struct spirv_to_nir_options;
struct vk_shader_module {
struct vk_object_base base;
struct nir_shader *nir;
- unsigned char sha1[20];
+ blake3_hash hash;
uint32_t size;
char data[0];
};
+extern const uint8_t vk_shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE];
+
VK_DEFINE_NONDISP_HANDLE_CASTS(vk_shader_module, base, VkShaderModule,
VK_OBJECT_TYPE_SHADER_MODULE)
+void vk_shader_module_init(struct vk_device *device,
+ struct vk_shader_module *module,
+ const VkShaderModuleCreateInfo *create_info);
+
+uint32_t vk_shader_module_spirv_version(const struct vk_shader_module *mod);
+
+VkResult
+vk_shader_module_to_nir(struct vk_device *device,
+ const struct vk_shader_module *mod,
+ gl_shader_stage stage,
+ const char *entrypoint_name,
+ const VkSpecializationInfo *spec_info,
+ const struct spirv_to_nir_options *spirv_options,
+ const struct nir_shader_compiler_options *nir_options,
+ void *mem_ctx, struct nir_shader **nir_out);
+
/* this should only be used for stack-allocated, temporary objects */
#define vk_shader_module_handle_from_nir(_nir) \
- vk_shader_module_to_handle(&(struct vk_shader_module) { \
+ ((VkShaderModule)(uintptr_t)&(struct vk_shader_module) { \
.base.type = VK_OBJECT_TYPE_SHADER_MODULE, \
.nir = _nir, \
})
diff --git a/src/vulkan/runtime/vk_standard_sample_locations.c b/src/vulkan/runtime/vk_standard_sample_locations.c
new file mode 100644
index 00000000000..de3fa499301
--- /dev/null
+++ b/src/vulkan/runtime/vk_standard_sample_locations.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_standard_sample_locations.h"
+
+#include "vk_graphics_state.h"
+
+/**
+ * 1x MSAA has a single sample at the center: (0.5, 0.5) -> (0x8, 0x8).
+ */
+static const struct vk_sample_locations_state sample_locations_state_1x = {
+ .per_pixel = VK_SAMPLE_COUNT_1_BIT,
+ .grid_size = { 1, 1 },
+ .locations = {
+ { 0.5, 0.5 },
+ },
+};
+
+
+/**
+ * 2x MSAA sample positions are (0.25, 0.25) and (0.75, 0.75):
+ * 4 c
+ * 4 0
+ * c 1
+ */
+static const struct vk_sample_locations_state sample_locations_state_2x = {
+ .per_pixel = VK_SAMPLE_COUNT_2_BIT,
+ .grid_size = { 1, 1 },
+ .locations = {
+ { 0.75, 0.75 },
+ { 0.25, 0.25 },
+ },
+};
+
+/**
+ * Sample positions:
+ * 2 6 a e
+ * 2 0
+ * 6 1
+ * a 2
+ * e 3
+ */
+static const struct vk_sample_locations_state sample_locations_state_4x = {
+ .per_pixel = VK_SAMPLE_COUNT_4_BIT,
+ .grid_size = { 1, 1 },
+ .locations = {
+ { 0.375, 0.125 },
+ { 0.875, 0.375 },
+ { 0.125, 0.625 },
+ { 0.625, 0.875 },
+ },
+};
+
+/**
+ * Sample positions:
+ * 1 3 5 7 9 b d f
+ * 1 7
+ * 3 3
+ * 5 0
+ * 7 5
+ * 9 2
+ * b 1
+ * d 4
+ * f 6
+ */
+static const struct vk_sample_locations_state sample_locations_state_8x = {
+ .per_pixel = VK_SAMPLE_COUNT_8_BIT,
+ .grid_size = { 1, 1 },
+ .locations = {
+ { 0.5625, 0.3125 },
+ { 0.4375, 0.6875 },
+ { 0.8125, 0.5625 },
+ { 0.3125, 0.1875 },
+ { 0.1875, 0.8125 },
+ { 0.0625, 0.4375 },
+ { 0.6875, 0.9375 },
+ { 0.9375, 0.0625 },
+ },
+};
+
+/**
+ * Sample positions:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 a b c d e f
+ * 0 15
+ * 1 9
+ * 2 10
+ * 3 7
+ * 4 13
+ * 5 1
+ * 6 4
+ * 7 3
+ * 8 12
+ * 9 0
+ * a 2
+ * b 6
+ * c 11
+ * d 5
+ * e 8
+ * f 14
+ */
+static const struct vk_sample_locations_state sample_locations_state_16x = {
+ .per_pixel = VK_SAMPLE_COUNT_16_BIT,
+ .grid_size = { 1, 1 },
+ .locations = {
+ { 0.5625, 0.5625 },
+ { 0.4375, 0.3125 },
+ { 0.3125, 0.6250 },
+ { 0.7500, 0.4375 },
+ { 0.1875, 0.3750 },
+ { 0.6250, 0.8125 },
+ { 0.8125, 0.6875 },
+ { 0.6875, 0.1875 },
+ { 0.3750, 0.8750 },
+ { 0.5000, 0.0625 },
+ { 0.2500, 0.1250 },
+ { 0.1250, 0.7500 },
+ { 0.0000, 0.5000 },
+ { 0.9375, 0.2500 },
+ { 0.8750, 0.9375 },
+ { 0.0625, 0.0000 },
+ },
+};
+
+const struct vk_sample_locations_state *
+vk_standard_sample_locations_state(VkSampleCountFlagBits sample_count)
+{
+ switch (sample_count) {
+ case 1: return &sample_locations_state_1x;
+ case 2: return &sample_locations_state_2x;
+ case 4: return &sample_locations_state_4x;
+ case 8: return &sample_locations_state_8x;
+ case 16: return &sample_locations_state_16x;
+ default: unreachable("Sample count has no standard locations");
+ }
+}
diff --git a/src/vulkan/runtime/vk_standard_sample_locations.h b/src/vulkan/runtime/vk_standard_sample_locations.h
new file mode 100644
index 00000000000..8dc57a4acd2
--- /dev/null
+++ b/src/vulkan/runtime/vk_standard_sample_locations.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_STANDARD_SAMPLE_LOCATIONS_H
+#define VK_STANDARD_SAMPLE_LOCATIONS_H
+
+#include "vulkan/vulkan_core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_sample_locations_state;
+
+/** Returns standard sample locations for a given sample count
+ *
+ * These are the sample locations defined in the Vulkan spec for when
+ * standardSampleLocations is supported.
+ */
+const struct vk_sample_locations_state*
+vk_standard_sample_locations_state(VkSampleCountFlagBits sample_count);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_STANDARD_SAMPLE_LOCATIONS_H */
diff --git a/src/vulkan/runtime/vk_sync.c b/src/vulkan/runtime/vk_sync.c
new file mode 100644
index 00000000000..da680ca8a10
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_sync.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "util/u_debug.h"
+#include "util/macros.h"
+#include "util/os_time.h"
+
+#include "vk_alloc.h"
+#include "vk_device.h"
+#include "vk_log.h"
+
+static void
+vk_sync_type_validate(const struct vk_sync_type *type)
+{
+ assert(type->init);
+ assert(type->finish);
+
+ assert(type->features & (VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_TIMELINE));
+
+ if (type->features & VK_SYNC_FEATURE_TIMELINE) {
+ assert(type->features & VK_SYNC_FEATURE_GPU_WAIT);
+ assert(type->features & VK_SYNC_FEATURE_CPU_WAIT);
+ assert(type->features & VK_SYNC_FEATURE_CPU_SIGNAL);
+ assert(type->features & (VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL |
+ VK_SYNC_FEATURE_WAIT_PENDING));
+ assert(type->signal);
+ assert(type->get_value);
+ }
+
+ if (!(type->features & VK_SYNC_FEATURE_BINARY)) {
+ assert(!(type->features & (VK_SYNC_FEATURE_GPU_MULTI_WAIT |
+ VK_SYNC_FEATURE_CPU_RESET)));
+ assert(!type->import_sync_file);
+ assert(!type->export_sync_file);
+ }
+
+ if (type->features & VK_SYNC_FEATURE_CPU_WAIT) {
+ assert(type->wait || type->wait_many);
+ } else {
+ assert(!(type->features & (VK_SYNC_FEATURE_WAIT_ANY |
+ VK_SYNC_FEATURE_WAIT_PENDING)));
+ }
+
+ if (type->features & VK_SYNC_FEATURE_GPU_MULTI_WAIT)
+ assert(type->features & VK_SYNC_FEATURE_GPU_WAIT);
+
+ if (type->features & VK_SYNC_FEATURE_CPU_RESET)
+ assert(type->reset);
+
+ if (type->features & VK_SYNC_FEATURE_CPU_SIGNAL)
+ assert(type->signal);
+}
+
+VkResult
+vk_sync_init(struct vk_device *device,
+ struct vk_sync *sync,
+ const struct vk_sync_type *type,
+ enum vk_sync_flags flags,
+ uint64_t initial_value)
+{
+ vk_sync_type_validate(type);
+
+ if (flags & VK_SYNC_IS_TIMELINE)
+ assert(type->features & VK_SYNC_FEATURE_TIMELINE);
+ else
+ assert(type->features & VK_SYNC_FEATURE_BINARY);
+
+ assert(type->size >= sizeof(*sync));
+ memset(sync, 0, type->size);
+ sync->type = type;
+ sync->flags = flags;
+
+ return type->init(device, sync, initial_value);
+}
+
+void
+vk_sync_finish(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ sync->type->finish(device, sync);
+}
+
+VkResult
+vk_sync_create(struct vk_device *device,
+ const struct vk_sync_type *type,
+ enum vk_sync_flags flags,
+ uint64_t initial_value,
+ struct vk_sync **sync_out)
+{
+ struct vk_sync *sync;
+
+ sync = vk_alloc(&device->alloc, type->size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (sync == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ VkResult result = vk_sync_init(device, sync, type, flags, initial_value);
+ if (result != VK_SUCCESS) {
+ vk_free(&device->alloc, sync);
+ return result;
+ }
+
+ *sync_out = sync;
+
+ return VK_SUCCESS;
+}
+
+void
+vk_sync_destroy(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ vk_sync_finish(device, sync);
+ vk_free(&device->alloc, sync);
+}
+
+VkResult
+vk_sync_signal(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t value)
+{
+ assert(sync->type->features & VK_SYNC_FEATURE_CPU_SIGNAL);
+
+ if (sync->flags & VK_SYNC_IS_TIMELINE)
+ assert(value > 0);
+ else
+ assert(value == 0);
+
+ return sync->type->signal(device, sync, value);
+}
+
+VkResult
+vk_sync_get_value(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t *value)
+{
+ assert(sync->flags & VK_SYNC_IS_TIMELINE);
+ return sync->type->get_value(device, sync, value);
+}
+
+VkResult
+vk_sync_reset(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ assert(sync->type->features & VK_SYNC_FEATURE_CPU_RESET);
+ assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
+ return sync->type->reset(device, sync);
+}
+
+VkResult vk_sync_move(struct vk_device *device,
+ struct vk_sync *dst,
+ struct vk_sync *src)
+{
+ assert(!(dst->flags & VK_SYNC_IS_TIMELINE));
+ assert(!(src->flags & VK_SYNC_IS_TIMELINE));
+ assert(dst->type == src->type);
+
+ return src->type->move(device, dst, src);
+}
+
+static void
+assert_valid_wait(struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags)
+{
+ assert(sync->type->features & VK_SYNC_FEATURE_CPU_WAIT);
+
+ if (!(sync->flags & VK_SYNC_IS_TIMELINE))
+ assert(wait_value == 0);
+
+ if (wait_flags & VK_SYNC_WAIT_PENDING)
+ assert(sync->type->features & VK_SYNC_FEATURE_WAIT_PENDING);
+}
+
+static uint64_t
+get_max_abs_timeout_ns(void)
+{
+ static int max_timeout_ms = -1;
+ if (max_timeout_ms < 0)
+ max_timeout_ms = debug_get_num_option("MESA_VK_MAX_TIMEOUT", 0);
+
+ if (max_timeout_ms == 0)
+ return UINT64_MAX;
+ else
+ return os_time_get_absolute_timeout(max_timeout_ms * 1000000ull);
+}
+
+static VkResult
+__vk_sync_wait(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ assert_valid_wait(sync, wait_value, wait_flags);
+
+ /* This doesn't make sense for a single wait */
+ assert(!(wait_flags & VK_SYNC_WAIT_ANY));
+
+ if (sync->type->wait) {
+ return sync->type->wait(device, sync, wait_value,
+ wait_flags, abs_timeout_ns);
+ } else {
+ struct vk_sync_wait wait = {
+ .sync = sync,
+ .stage_mask = ~(VkPipelineStageFlags2)0,
+ .wait_value = wait_value,
+ };
+ return sync->type->wait_many(device, 1, &wait, wait_flags,
+ abs_timeout_ns);
+ }
+}
+
+VkResult
+vk_sync_wait(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ uint64_t max_abs_timeout_ns = get_max_abs_timeout_ns();
+ if (abs_timeout_ns > max_abs_timeout_ns) {
+ VkResult result =
+ __vk_sync_wait(device, sync, wait_value, wait_flags,
+ max_abs_timeout_ns);
+ if (unlikely(result == VK_TIMEOUT))
+ return vk_device_set_lost(device, "Maximum timeout exceeded!");
+ return result;
+ } else {
+ return __vk_sync_wait(device, sync, wait_value, wait_flags,
+ abs_timeout_ns);
+ }
+}
+
+static bool
+can_wait_many(uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags)
+{
+ if (waits[0].sync->type->wait_many == NULL)
+ return false;
+
+ if ((wait_flags & VK_SYNC_WAIT_ANY) &&
+ !(waits[0].sync->type->features & VK_SYNC_FEATURE_WAIT_ANY))
+ return false;
+
+ for (uint32_t i = 0; i < wait_count; i++) {
+ assert_valid_wait(waits[i].sync, waits[i].wait_value, wait_flags);
+ if (waits[i].sync->type != waits[0].sync->type)
+ return false;
+ }
+
+ return true;
+}
+
+static VkResult
+__vk_sync_wait_many(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ if (wait_count == 0)
+ return VK_SUCCESS;
+
+ if (wait_count == 1) {
+ return __vk_sync_wait(device, waits[0].sync, waits[0].wait_value,
+ wait_flags & ~VK_SYNC_WAIT_ANY, abs_timeout_ns);
+ }
+
+ if (can_wait_many(wait_count, waits, wait_flags)) {
+ return waits[0].sync->type->wait_many(device, wait_count, waits,
+ wait_flags, abs_timeout_ns);
+ } else if (wait_flags & VK_SYNC_WAIT_ANY) {
+ /* If we have multiple syncs and they don't support wait_any or they're
+ * not all the same type, there's nothing better we can do than spin.
+ */
+ do {
+ for (uint32_t i = 0; i < wait_count; i++) {
+ VkResult result = __vk_sync_wait(device, waits[i].sync,
+ waits[i].wait_value,
+ wait_flags & ~VK_SYNC_WAIT_ANY,
+ 0 /* abs_timeout_ns */);
+ if (result != VK_TIMEOUT)
+ return result;
+ }
+ } while (os_time_get_nano() < abs_timeout_ns);
+
+ return VK_TIMEOUT;
+ } else {
+ for (uint32_t i = 0; i < wait_count; i++) {
+ VkResult result = __vk_sync_wait(device, waits[i].sync,
+ waits[i].wait_value,
+ wait_flags, abs_timeout_ns);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ return VK_SUCCESS;
+ }
+}
+
+VkResult
+vk_sync_wait_many(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ uint64_t max_abs_timeout_ns = get_max_abs_timeout_ns();
+ if (abs_timeout_ns > max_abs_timeout_ns) {
+ VkResult result =
+ __vk_sync_wait_many(device, wait_count, waits, wait_flags,
+ max_abs_timeout_ns);
+ if (unlikely(result == VK_TIMEOUT))
+ return vk_device_set_lost(device, "Maximum timeout exceeded!");
+ return result;
+ } else {
+ return __vk_sync_wait_many(device, wait_count, waits, wait_flags,
+ abs_timeout_ns);
+ }
+}
+
+VkResult
+vk_sync_import_opaque_fd(struct vk_device *device,
+ struct vk_sync *sync,
+ int fd)
+{
+ VkResult result = sync->type->import_opaque_fd(device, sync, fd);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ sync->flags |= VK_SYNC_IS_SHAREABLE |
+ VK_SYNC_IS_SHARED;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_sync_export_opaque_fd(struct vk_device *device,
+ struct vk_sync *sync,
+ int *fd)
+{
+ assert(sync->flags & VK_SYNC_IS_SHAREABLE);
+
+ VkResult result = sync->type->export_opaque_fd(device, sync, fd);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ sync->flags |= VK_SYNC_IS_SHARED;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_sync_import_sync_file(struct vk_device *device,
+ struct vk_sync *sync,
+ int sync_file)
+{
+ assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
+
+ /* Silently handle negative file descriptors in case the driver doesn't
+ * want to bother.
+ */
+ if (sync_file < 0 && sync->type->signal)
+ return sync->type->signal(device, sync, 0);
+
+ return sync->type->import_sync_file(device, sync, sync_file);
+}
+
+VkResult
+vk_sync_export_sync_file(struct vk_device *device,
+ struct vk_sync *sync,
+ int *sync_file)
+{
+ assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
+ return sync->type->export_sync_file(device, sync, sync_file);
+}
+
+VkResult
+vk_sync_import_win32_handle(struct vk_device *device,
+ struct vk_sync *sync,
+ void *handle,
+ const wchar_t *name)
+{
+ VkResult result = sync->type->import_win32_handle(device, sync, handle, name);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ sync->flags |= VK_SYNC_IS_SHAREABLE |
+ VK_SYNC_IS_SHARED;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_sync_export_win32_handle(struct vk_device *device,
+ struct vk_sync *sync,
+ void **handle)
+{
+ assert(sync->flags & VK_SYNC_IS_SHAREABLE);
+
+ VkResult result = sync->type->export_win32_handle(device, sync, handle);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ sync->flags |= VK_SYNC_IS_SHARED;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_sync_set_win32_export_params(struct vk_device *device,
+ struct vk_sync *sync,
+ const void *security_attributes,
+ uint32_t access,
+ const wchar_t *name)
+{
+ assert(sync->flags & VK_SYNC_IS_SHARED);
+
+ return sync->type->set_win32_export_params(device, sync, security_attributes, access, name);
+}
diff --git a/src/vulkan/runtime/vk_sync.h b/src/vulkan/runtime/vk_sync.h
new file mode 100644
index 00000000000..15d85dc9253
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_SYNC_H
+#define VK_SYNC_H
+
+#include <stdbool.h>
+#include <vulkan/vulkan_core.h>
+
+#include "util/macros.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_device;
+struct vk_sync;
+
+enum vk_sync_features {
+ /** Set if a sync type supports the binary mode of operation
+ *
+ * In binary mode, a vk_sync has two modes: signaled and unsignaled. If
+ * it supports CPU_RESET, it can be changed from signaled to unsignaled on
+ * the CPU via vk_sync_reset(). If it supports CPU_SIGNAL, it can be
+ * changed from unsignaled to signaled on the CPU via vk_sync_signal().
+ *
+ * Binary vk_sync types may also support WAIT_PENDING in which they have a
+ * third hidden pending state. Once such a vk_sync has been submitted to
+ * the kernel driver for signaling, it is in the pending state and remains
+ * there until the work is complete at which point it enters the signaled
+ * state. This pending state is visible across processes for shared
+ * vk_sync types. This is used to by the threaded submit mode to ensure
+ * that everything gets submitted to the kernel driver in-order.
+ *
+ * A vk_sync operates in binary mode if VK_SYNC_IS_TIMELINE is not set
+ * in vk_sync::flags.
+ */
+ VK_SYNC_FEATURE_BINARY = (1 << 0),
+
+ /** Set if a sync type supports the timeline mode of operation
+ *
+ * In timeline mode, a vk_sync has a monotonically increasing 64-bit value
+ * which represents most recently signaled time point. Waits are relative
+ * to time points. Instead of waiting for the vk_sync to enter a signaled
+ * state, you wait for its 64-bit value to be at least some wait value.
+ *
+ * Timeline vk_sync types can also support WAIT_PENDING. In this case, the
+ * wait is not for a pending state, as such, but rather for someone to have
+ * submitted a kernel request which will signal a time point with at least
+ * that value. Logically, you can think of this as having two timelines,
+ * the real timeline and a pending timeline which runs slightly ahead of
+ * the real one. As with binary vk_sync types, this is used by threaded
+ * submit to re-order things so that the kernel requests happen in a valid
+ * linear order.
+ *
+ * A vk_sync operates in timeline mode if VK_SYNC_IS_TIMELINE is set in
+ * vk_sync::flags.
+ */
+ VK_SYNC_FEATURE_TIMELINE = (1 << 1),
+
+ /** Set if this sync supports GPU waits */
+ VK_SYNC_FEATURE_GPU_WAIT = (1 << 2),
+
+ /** Set if a sync type supports multiple GPU waits on one signal state
+ *
+ * The Vulkan spec for VkSemaphore requires GPU wait and signal operations
+ * to have a one-to-one relationship. This formally described by saying
+ * that the VkSemaphore gets implicitly reset on wait. However, it is
+ * often useful to have well-defined multi-wait. If binary vk_sync
+ * supports multi-wait then any number of kernel requests can be submitted
+ * which wait on one signal operation. This also implies that you can
+ * signal twice back-to-back (there are 0 waits on the first signal).
+ *
+ * This feature only applies to binary vk_sync objects.
+ */
+ VK_SYNC_FEATURE_GPU_MULTI_WAIT = (1 << 3),
+
+ /** Set if a sync type supports vk_sync_wait() and vk_sync_wait_many() */
+ VK_SYNC_FEATURE_CPU_WAIT = (1 << 4),
+
+ /** Set if a sync type supports vk_sync_reset()
+ *
+ * This feature only applies to binary vk_sync objects.
+ */
+ VK_SYNC_FEATURE_CPU_RESET = (1 << 5),
+
+ /** Set if a sync type supports vk_sync_signal() */
+ VK_SYNC_FEATURE_CPU_SIGNAL = (1 << 6),
+
+ /** Set if sync_type::wait_many supports the VK_SYNC_WAIT_ANY bit
+ *
+ * vk_sync_wait_many() will support the bit regardless. If the sync type
+ * doesn't support it natively, it will be emulated.
+ */
+ VK_SYNC_FEATURE_WAIT_ANY = (1 << 7),
+
+ /** Set if a sync type supports the VK_SYNC_WAIT_PENDING bit
+ *
+ * See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions
+ * of what this does in each case.
+ */
+ VK_SYNC_FEATURE_WAIT_PENDING = (1 << 8),
+
+ /** Set if a sync type natively supports wait-before-signal
+ *
+ * If this is set then the underlying OS primitive supports submitting
+ * kernel requests which wait on the vk_sync before submitting a kernel
+ * request which would cause that wait to unblock.
+ */
+ VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL = (1 << 9),
+};
+
+struct vk_sync_wait;
+
+enum vk_sync_wait_flags {
+ /** Placeholder for 0 to make vk_sync_wait() calls more clear */
+ VK_SYNC_WAIT_COMPLETE = 0,
+
+ /** If set, only wait for the vk_sync operation to be pending
+ *
+ * See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions
+ * of what this does in each case.
+ */
+ VK_SYNC_WAIT_PENDING = (1 << 0),
+
+ /** If set, wait for any of of the vk_sync operations to complete
+ *
+ * This is as opposed to waiting for all of them. There is no guarantee
+ * that vk_sync_wait_many() will return immediately after the first
+ * operation completes but it will make a best effort to return as soon as
+ * possible.
+ */
+ VK_SYNC_WAIT_ANY = (1 << 1),
+};
+
+struct vk_sync_type {
+ /** Size of this sync type */
+ size_t size;
+
+ /** Features supported by this sync type */
+ enum vk_sync_features features;
+
+ /** Initialize a vk_sync
+ *
+ * The base vk_sync will already be initialized and the sync type set
+ * before this function is called. If any OS primitives need to be
+ * allocated, that should be done here.
+ */
+ VkResult (*init)(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value);
+
+ /** Finish a vk_sync
+ *
+ * This should free any internal data stored in this vk_sync.
+ */
+ void (*finish)(struct vk_device *device,
+ struct vk_sync *sync);
+
+ /** Signal a vk_sync
+ *
+ * For non-timeline sync types, value == 0.
+ */
+ VkResult (*signal)(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t value);
+
+ /** Get the timeline value for a vk_sync */
+ VkResult (*get_value)(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t *value);
+
+ /** Reset a non-timeline vk_sync */
+ VkResult (*reset)(struct vk_device *device,
+ struct vk_sync *sync);
+
+ /** Moves the guts of one binary vk_sync to another
+ *
+ * This moves the current binary vk_sync event from src to dst and resets
+ * src. If dst contained an event, it is discarded.
+ *
+ * This is required for all binary vk_sync types that can be used for a
+ * semaphore wait in conjunction with real timeline semaphores.
+ */
+ VkResult (*move)(struct vk_device *device,
+ struct vk_sync *dst,
+ struct vk_sync *src);
+
+ /** Wait on a vk_sync
+ *
+ * For a timeline vk_sync, wait_value is the timeline value to wait for.
+ * This function should not return VK_SUCCESS until get_value on that
+ * vk_sync would return a value >= wait_value. A wait_value of zero is
+ * allowed in which case the wait is a no-op. For a non-timeline vk_sync,
+ * wait_value should be ignored.
+ *
+ * This function is optional. If the sync type needs to support CPU waits,
+ * at least one of wait or wait_many must be provided. If one is missing,
+ * it will be implemented in terms of the other.
+ */
+ VkResult (*wait)(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns);
+
+ /** Wait for multiple vk_sync events
+ *
+ * If VK_SYNC_WAIT_ANY is set, it will return after at least one of the
+ * wait events is complete instead of waiting for all of them.
+ *
+ * See wait for more details.
+ */
+ VkResult (*wait_many)(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns);
+
+ /** Permanently imports the given FD into this vk_sync
+ *
+ * This replaces the guts of the given vk_sync with whatever is in the FD.
+ * In a sense, this vk_sync now aliases whatever vk_sync the FD was
+ * exported from.
+ */
+ VkResult (*import_opaque_fd)(struct vk_device *device,
+ struct vk_sync *sync,
+ int fd);
+
+ /** Export the guts of this vk_sync to an FD */
+ VkResult (*export_opaque_fd)(struct vk_device *device,
+ struct vk_sync *sync,
+ int *fd);
+
+ /** Imports a sync file into this binary vk_sync
+ *
+ * If this completes successfully, the vk_sync will now signal whenever
+ * the sync file signals.
+ *
+ * If sync_file == -1, the vk_sync should be signaled immediately. If
+ * the vk_sync_type implements signal, sync_file will never be -1.
+ */
+ VkResult (*import_sync_file)(struct vk_device *device,
+ struct vk_sync *sync,
+ int sync_file);
+
+ /** Exports the current binary vk_sync state as a sync file.
+ *
+ * The resulting sync file will contain the current event stored in this
+ * binary vk_sync must be turned into a sync file. If the vk_sync is later
+ * modified to contain a new event, the sync file is unaffected.
+ */
+ VkResult (*export_sync_file)(struct vk_device *device,
+ struct vk_sync *sync,
+ int *sync_file);
+
+ /** Permanently imports the given handle or name into this vk_sync
+ *
+ * This replaces the guts of the given vk_sync with whatever is in the object.
+ * In a sense, this vk_sync now aliases whatever vk_sync the handle was
+ * exported from.
+ */
+ VkResult (*import_win32_handle)(struct vk_device *device,
+ struct vk_sync *sync,
+ void *handle,
+ const wchar_t *name);
+
+ /** Export the guts of this vk_sync to a handle and/or name */
+ VkResult (*export_win32_handle)(struct vk_device *device,
+ struct vk_sync *sync,
+ void **handle);
+
+ /** Vulkan puts these as creation params instead of export params */
+ VkResult (*set_win32_export_params)(struct vk_device *device,
+ struct vk_sync *sync,
+ const void *security_attributes,
+ uint32_t access,
+ const wchar_t *name);
+};
+
+enum vk_sync_flags {
+ /** Set if the vk_sync is a timeline */
+ VK_SYNC_IS_TIMELINE = (1 << 0),
+
+ /** Set if the vk_sync can have its payload shared */
+ VK_SYNC_IS_SHAREABLE = (1 << 1),
+
+ /** Set if the vk_sync has a shared payload */
+ VK_SYNC_IS_SHARED = (1 << 2),
+};
+
+struct vk_sync {
+ const struct vk_sync_type *type;
+ enum vk_sync_flags flags;
+};
+
+/* See VkSemaphoreSubmitInfo */
+struct vk_sync_wait {
+ struct vk_sync *sync;
+ VkPipelineStageFlags2 stage_mask;
+ uint64_t wait_value;
+};
+
+/* See VkSemaphoreSubmitInfo */
+struct vk_sync_signal {
+ struct vk_sync *sync;
+ VkPipelineStageFlags2 stage_mask;
+ uint64_t signal_value;
+};
+
+VkResult MUST_CHECK vk_sync_init(struct vk_device *device,
+ struct vk_sync *sync,
+ const struct vk_sync_type *type,
+ enum vk_sync_flags flags,
+ uint64_t initial_value);
+
+void vk_sync_finish(struct vk_device *device,
+ struct vk_sync *sync);
+
+VkResult MUST_CHECK vk_sync_create(struct vk_device *device,
+ const struct vk_sync_type *type,
+ enum vk_sync_flags flags,
+ uint64_t initial_value,
+ struct vk_sync **sync_out);
+
+void vk_sync_destroy(struct vk_device *device,
+ struct vk_sync *sync);
+
+VkResult MUST_CHECK vk_sync_signal(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t value);
+
+VkResult MUST_CHECK vk_sync_get_value(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t *value);
+
+VkResult MUST_CHECK vk_sync_reset(struct vk_device *device,
+ struct vk_sync *sync);
+
+VkResult MUST_CHECK vk_sync_wait(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns);
+
+VkResult MUST_CHECK vk_sync_wait_many(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns);
+
+VkResult MUST_CHECK vk_sync_import_opaque_fd(struct vk_device *device,
+ struct vk_sync *sync,
+ int fd);
+
+VkResult MUST_CHECK vk_sync_export_opaque_fd(struct vk_device *device,
+ struct vk_sync *sync,
+ int *fd);
+
+VkResult MUST_CHECK vk_sync_import_sync_file(struct vk_device *device,
+ struct vk_sync *sync,
+ int sync_file);
+
+VkResult MUST_CHECK vk_sync_export_sync_file(struct vk_device *device,
+ struct vk_sync *sync,
+ int *sync_file);
+
+VkResult MUST_CHECK vk_sync_import_win32_handle(struct vk_device *device,
+ struct vk_sync *sync,
+ void *handle,
+ const wchar_t *name);
+
+VkResult MUST_CHECK vk_sync_export_win32_handle(struct vk_device *device,
+ struct vk_sync *sync,
+ void **handle);
+
+VkResult MUST_CHECK vk_sync_set_win32_export_params(struct vk_device *device,
+ struct vk_sync *sync,
+ const void *security_attributes,
+ uint32_t access,
+ const wchar_t *name);
+
+VkResult MUST_CHECK vk_sync_move(struct vk_device *device,
+ struct vk_sync *dst,
+ struct vk_sync *src);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_SYNC_H */
diff --git a/src/vulkan/runtime/vk_sync_binary.c b/src/vulkan/runtime/vk_sync_binary.c
new file mode 100644
index 00000000000..c10cabe348a
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync_binary.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_sync_binary.h"
+
+#include "vk_util.h"
+
+static struct vk_sync_binary *
+to_vk_sync_binary(struct vk_sync *sync)
+{
+ assert(sync->type->init == vk_sync_binary_init);
+
+ return container_of(sync, struct vk_sync_binary, sync);
+}
+
+VkResult
+vk_sync_binary_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value)
+{
+ struct vk_sync_binary *binary = to_vk_sync_binary(sync);
+
+ const struct vk_sync_binary_type *btype =
+ container_of(binary->sync.type, struct vk_sync_binary_type, sync);
+
+ assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
+ assert(!(sync->flags & VK_SYNC_IS_SHAREABLE));
+
+ binary->next_point = (initial_value == 0);
+
+ return vk_sync_init(device, &binary->timeline, btype->timeline_type,
+ VK_SYNC_IS_TIMELINE, 0 /* initial_value */);
+}
+
+static void
+vk_sync_binary_finish(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ struct vk_sync_binary *binary = to_vk_sync_binary(sync);
+
+ vk_sync_finish(device, &binary->timeline);
+}
+
+static VkResult
+vk_sync_binary_reset(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ struct vk_sync_binary *binary = to_vk_sync_binary(sync);
+
+ binary->next_point++;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_sync_binary_signal(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t value)
+{
+ struct vk_sync_binary *binary = to_vk_sync_binary(sync);
+
+ assert(value == 0);
+
+ return vk_sync_signal(device, &binary->timeline, binary->next_point);
+}
+
+static VkResult
+vk_sync_binary_wait_many(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ if (wait_count == 0)
+ return VK_SUCCESS;
+
+ STACK_ARRAY(struct vk_sync_wait, timeline_waits, wait_count);
+
+ for (uint32_t i = 0; i < wait_count; i++) {
+ struct vk_sync_binary *binary = to_vk_sync_binary(waits[i].sync);
+
+ timeline_waits[i] = (struct vk_sync_wait) {
+ .sync = &binary->timeline,
+ .stage_mask = waits[i].stage_mask,
+ .wait_value = binary->next_point,
+ };
+ }
+
+ VkResult result = vk_sync_wait_many(device, wait_count, timeline_waits,
+ wait_flags, abs_timeout_ns);
+
+ STACK_ARRAY_FINISH(timeline_waits);
+
+ return result;
+}
+
+struct vk_sync_binary_type
+vk_sync_binary_get_type(const struct vk_sync_type *timeline_type)
+{
+ assert(timeline_type->features & VK_SYNC_FEATURE_TIMELINE);
+
+ return (struct vk_sync_binary_type) {
+ .sync = {
+ .size = offsetof(struct vk_sync_binary, timeline) +
+ timeline_type->size,
+ .features = VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_GPU_WAIT |
+ VK_SYNC_FEATURE_CPU_WAIT |
+ VK_SYNC_FEATURE_CPU_RESET |
+ VK_SYNC_FEATURE_CPU_SIGNAL |
+ VK_SYNC_FEATURE_WAIT_ANY |
+ VK_SYNC_FEATURE_WAIT_PENDING,
+ .init = vk_sync_binary_init,
+ .finish = vk_sync_binary_finish,
+ .reset = vk_sync_binary_reset,
+ .signal = vk_sync_binary_signal,
+ .wait_many = vk_sync_binary_wait_many,
+ },
+ .timeline_type = timeline_type,
+ };
+}
diff --git a/src/vulkan/runtime/vk_sync_binary.h b/src/vulkan/runtime/vk_sync_binary.h
new file mode 100644
index 00000000000..8a4ceebd77d
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync_binary.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_SYNC_BINARY_H
+#define VK_SYNC_BINARY_H
+
+#include "vk_sync.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_sync_binary_type {
+ struct vk_sync_type sync;
+ const struct vk_sync_type *timeline_type;
+};
+
+struct vk_sync_binary_type
+vk_sync_binary_get_type(const struct vk_sync_type *timeline_type);
+
+/** Implements a binary vk_sync type on top of a timeline vk_sync
+ *
+ * This is useful when targeting Windows APIs such as D3D12 which only have
+ * timelines and have no concept of a binary synchronization object. Because
+ * binary vk_sync emulation requires tracking additional state (the next time
+ * point), fences and semaphores created from this type cannot support any of
+ * the sharing APIs.
+ */
+struct vk_sync_binary {
+ struct vk_sync sync;
+
+ uint64_t next_point;
+
+ struct vk_sync timeline;
+};
+
+VkResult vk_sync_binary_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value);
+
+static inline bool
+vk_sync_type_is_vk_sync_binary(const struct vk_sync_type *type)
+{
+ return type->init == vk_sync_binary_init;
+}
+
+static inline struct vk_sync_binary *
+vk_sync_as_binary(struct vk_sync *sync)
+{
+ if (!vk_sync_type_is_vk_sync_binary(sync->type))
+ return NULL;
+
+ return container_of(sync, struct vk_sync_binary, sync);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_TIMELINE_H */
diff --git a/src/vulkan/runtime/vk_sync_dummy.c b/src/vulkan/runtime/vk_sync_dummy.c
new file mode 100644
index 00000000000..1cab72f491b
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync_dummy.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_sync_dummy.h"
+
+static VkResult
+vk_sync_dummy_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value)
+{
+ return VK_SUCCESS;
+}
+
+static void
+vk_sync_dummy_finish(struct vk_device *device,
+ struct vk_sync *sync)
+{ }
+
+static VkResult
+vk_sync_dummy_wait_many(struct vk_device *device,
+ uint32_t wait_count,
+ const struct vk_sync_wait *waits,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ return VK_SUCCESS;
+}
+
+const struct vk_sync_type vk_sync_dummy_type = {
+ .size = sizeof(struct vk_sync),
+ .features = VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_GPU_WAIT |
+ VK_SYNC_FEATURE_CPU_WAIT |
+ VK_SYNC_FEATURE_WAIT_ANY |
+ VK_SYNC_FEATURE_WAIT_PENDING,
+ .init = vk_sync_dummy_init,
+ .finish = vk_sync_dummy_finish,
+ .wait_many = vk_sync_dummy_wait_many,
+};
diff --git a/src/vulkan/wsi/wsi_common_wayland.h b/src/vulkan/runtime/vk_sync_dummy.h
index effba0ebba4..55c6f169d30 100644
--- a/src/vulkan/wsi/wsi_common_wayland.h
+++ b/src/vulkan/runtime/vk_sync_dummy.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2015 Intel Corporation
+ * Copyright © 2021 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,16 +20,25 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef WSI_COMMON_WAYLAND_H
-#define WSI_COMMON_WAYLAND_H
+#ifndef VK_SYNC_DUMMY_H
+#define VK_SYNC_DUMMY_H
-#include "wsi_common.h"
+#include "vk_sync.h"
-VkBool32
-wsi_wl_get_presentation_support(struct wsi_device *wsi_device,
- struct wl_display *wl_display);
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const struct vk_sync_type vk_sync_dummy_type;
-VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
- const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface);
+static inline bool
+vk_sync_type_is_dummy(const struct vk_sync_type *type)
+{
+ return type == &vk_sync_dummy_type;
+}
+
+#ifdef __cplusplus
+}
#endif
+
+#endif /* VK_SYNC_H */
diff --git a/src/vulkan/runtime/vk_sync_timeline.c b/src/vulkan/runtime/vk_sync_timeline.c
new file mode 100644
index 00000000000..d2d712daa84
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync_timeline.c
@@ -0,0 +1,541 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_sync_timeline.h"
+
+#include <inttypes.h>
+
+#include "util/os_time.h"
+#include "util/timespec.h"
+
+#include "vk_alloc.h"
+#include "vk_device.h"
+#include "vk_log.h"
+
+static struct vk_sync_timeline *
+to_vk_sync_timeline(struct vk_sync *sync)
+{
+ assert(sync->type->init == vk_sync_timeline_init);
+
+ return container_of(sync, struct vk_sync_timeline, sync);
+}
+
+static void
+vk_sync_timeline_type_validate(const struct vk_sync_timeline_type *ttype)
+{
+ ASSERTED const enum vk_sync_features req_features =
+ VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_GPU_WAIT |
+ VK_SYNC_FEATURE_GPU_MULTI_WAIT |
+ VK_SYNC_FEATURE_CPU_WAIT |
+ VK_SYNC_FEATURE_CPU_RESET;
+
+ assert(!(req_features & ~ttype->point_sync_type->features));
+}
+
+VkResult
+vk_sync_timeline_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value)
+{
+ struct vk_sync_timeline *timeline = to_vk_sync_timeline(sync);
+ int ret;
+
+ ASSERTED const struct vk_sync_timeline_type *ttype =
+ container_of(timeline->sync.type, struct vk_sync_timeline_type, sync);
+ vk_sync_timeline_type_validate(ttype);
+
+ ret = mtx_init(&timeline->mutex, mtx_plain);
+ if (ret != thrd_success)
+ return vk_errorf(device, VK_ERROR_UNKNOWN, "mtx_init failed");
+
+ ret = cnd_init(&timeline->cond);
+ if (ret != thrd_success) {
+ mtx_destroy(&timeline->mutex);
+ return vk_errorf(device, VK_ERROR_UNKNOWN, "cnd_init failed");
+ }
+
+ timeline->highest_past =
+ timeline->highest_pending = initial_value;
+ list_inithead(&timeline->pending_points);
+ list_inithead(&timeline->free_points);
+
+ return VK_SUCCESS;
+}
+
+static void
+vk_sync_timeline_finish(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ struct vk_sync_timeline *timeline = to_vk_sync_timeline(sync);
+
+ list_for_each_entry_safe(struct vk_sync_timeline_point, point,
+ &timeline->free_points, link) {
+ list_del(&point->link);
+ vk_sync_finish(device, &point->sync);
+ vk_free(&device->alloc, point);
+ }
+ list_for_each_entry_safe(struct vk_sync_timeline_point, point,
+ &timeline->pending_points, link) {
+ list_del(&point->link);
+ vk_sync_finish(device, &point->sync);
+ vk_free(&device->alloc, point);
+ }
+
+ cnd_destroy(&timeline->cond);
+ mtx_destroy(&timeline->mutex);
+}
+
+static struct vk_sync_timeline_point *
+vk_sync_timeline_first_point(struct vk_sync_timeline *timeline)
+{
+ struct vk_sync_timeline_point *point =
+ list_first_entry(&timeline->pending_points,
+ struct vk_sync_timeline_point, link);
+
+ assert(point->value <= timeline->highest_pending);
+ assert(point->value > timeline->highest_past);
+
+ return point;
+}
+
+static VkResult
+vk_sync_timeline_gc_locked(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ bool drain);
+
+static VkResult
+vk_sync_timeline_alloc_point_locked(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t value,
+ struct vk_sync_timeline_point **point_out)
+{
+ struct vk_sync_timeline_point *point;
+ VkResult result;
+
+ result = vk_sync_timeline_gc_locked(device, timeline, false);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ if (list_is_empty(&timeline->free_points)) {
+ const struct vk_sync_timeline_type *ttype =
+ container_of(timeline->sync.type, struct vk_sync_timeline_type, sync);
+ const struct vk_sync_type *point_sync_type = ttype->point_sync_type;
+
+ size_t size = offsetof(struct vk_sync_timeline_point, sync) +
+ point_sync_type->size;
+
+ point = vk_zalloc(&device->alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ if (!point)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ point->timeline = timeline;
+
+ result = vk_sync_init(device, &point->sync, point_sync_type,
+ 0 /* flags */, 0 /* initial_value */);
+ if (unlikely(result != VK_SUCCESS)) {
+ vk_free(&device->alloc, point);
+ return result;
+ }
+ } else {
+ point = list_first_entry(&timeline->free_points,
+ struct vk_sync_timeline_point, link);
+
+ if (point->sync.type->reset) {
+ result = vk_sync_reset(device, &point->sync);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+ }
+
+ list_del(&point->link);
+ }
+
+ point->value = value;
+ *point_out = point;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_sync_timeline_alloc_point(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t value,
+ struct vk_sync_timeline_point **point_out)
+{
+ VkResult result;
+
+ mtx_lock(&timeline->mutex);
+ result = vk_sync_timeline_alloc_point_locked(device, timeline, value, point_out);
+ mtx_unlock(&timeline->mutex);
+
+ return result;
+}
+
+static void
+vk_sync_timeline_point_free_locked(struct vk_sync_timeline *timeline,
+ struct vk_sync_timeline_point *point)
+{
+ assert(point->refcount == 0 && !point->pending);
+ list_add(&point->link, &timeline->free_points);
+}
+
+void
+vk_sync_timeline_point_free(struct vk_device *device,
+ struct vk_sync_timeline_point *point)
+{
+ struct vk_sync_timeline *timeline = point->timeline;
+
+ mtx_lock(&timeline->mutex);
+ vk_sync_timeline_point_free_locked(timeline, point);
+ mtx_unlock(&timeline->mutex);
+}
+
+static void
+vk_sync_timeline_point_ref(struct vk_sync_timeline_point *point)
+{
+ point->refcount++;
+}
+
+static void
+vk_sync_timeline_point_unref(struct vk_sync_timeline *timeline,
+ struct vk_sync_timeline_point *point)
+{
+ assert(point->refcount > 0);
+ point->refcount--;
+ if (point->refcount == 0 && !point->pending)
+ vk_sync_timeline_point_free_locked(timeline, point);
+}
+
+static void
+vk_sync_timeline_point_complete(struct vk_sync_timeline *timeline,
+ struct vk_sync_timeline_point *point)
+{
+ if (!point->pending)
+ return;
+
+ assert(timeline->highest_past < point->value);
+ timeline->highest_past = point->value;
+
+ point->pending = false;
+ list_del(&point->link);
+
+ if (point->refcount == 0)
+ vk_sync_timeline_point_free_locked(timeline, point);
+}
+
+static VkResult
+vk_sync_timeline_gc_locked(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ bool drain)
+{
+ list_for_each_entry_safe(struct vk_sync_timeline_point, point,
+ &timeline->pending_points, link) {
+ /* timeline->higest_pending is only incremented once submission has
+ * happened. If this point has a greater serial, it means the point
+ * hasn't been submitted yet.
+ */
+ if (point->value > timeline->highest_pending)
+ return VK_SUCCESS;
+
+ /* If someone is waiting on this time point, consider it busy and don't
+ * try to recycle it. There's a slim possibility that it's no longer
+ * busy by the time we look at it but we would be recycling it out from
+ * under a waiter and that can lead to weird races.
+ *
+ * We walk the list in-order so if this time point is still busy so is
+ * every following time point
+ */
+ assert(point->refcount >= 0);
+ if (point->refcount > 0 && !drain)
+ return VK_SUCCESS;
+
+ /* Garbage collect any signaled point. */
+ VkResult result = vk_sync_wait(device, &point->sync, 0,
+ VK_SYNC_WAIT_COMPLETE,
+ 0 /* abs_timeout_ns */);
+ if (result == VK_TIMEOUT) {
+ /* We walk the list in-order so if this time point is still busy so
+ * is every following time point
+ */
+ return VK_SUCCESS;
+ } else if (result != VK_SUCCESS) {
+ return result;
+ }
+
+ vk_sync_timeline_point_complete(timeline, point);
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_sync_timeline_point_install(struct vk_device *device,
+ struct vk_sync_timeline_point *point)
+{
+ struct vk_sync_timeline *timeline = point->timeline;
+
+ mtx_lock(&timeline->mutex);
+
+ assert(point->value > timeline->highest_pending);
+ timeline->highest_pending = point->value;
+
+ assert(point->refcount == 0);
+ point->pending = true;
+ list_addtail(&point->link, &timeline->pending_points);
+
+ int ret = cnd_broadcast(&timeline->cond);
+
+ mtx_unlock(&timeline->mutex);
+
+ if (ret == thrd_error)
+ return vk_errorf(device, VK_ERROR_UNKNOWN, "cnd_broadcast failed");
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_sync_timeline_get_point_locked(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t wait_value,
+ struct vk_sync_timeline_point **point_out)
+{
+ if (timeline->highest_past >= wait_value) {
+ /* Nothing to wait on */
+ *point_out = NULL;
+ return VK_SUCCESS;
+ }
+
+ list_for_each_entry(struct vk_sync_timeline_point, point,
+ &timeline->pending_points, link) {
+ if (point->value >= wait_value) {
+ vk_sync_timeline_point_ref(point);
+ *point_out = point;
+ return VK_SUCCESS;
+ }
+ }
+
+ return VK_NOT_READY;
+}
+
+VkResult
+vk_sync_timeline_get_point(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t wait_value,
+ struct vk_sync_timeline_point **point_out)
+{
+ mtx_lock(&timeline->mutex);
+ VkResult result = vk_sync_timeline_get_point_locked(device, timeline,
+ wait_value, point_out);
+ mtx_unlock(&timeline->mutex);
+
+ return result;
+}
+
+void
+vk_sync_timeline_point_release(struct vk_device *device,
+ struct vk_sync_timeline_point *point)
+{
+ struct vk_sync_timeline *timeline = point->timeline;
+
+ mtx_lock(&timeline->mutex);
+ vk_sync_timeline_point_unref(timeline, point);
+ mtx_unlock(&timeline->mutex);
+}
+
+static VkResult
+vk_sync_timeline_signal_locked(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t value)
+{
+ VkResult result = vk_sync_timeline_gc_locked(device, timeline, true);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ if (unlikely(value <= timeline->highest_past)) {
+ return vk_device_set_lost(device, "Timeline values must only ever "
+ "strictly increase.");
+ }
+
+ assert(list_is_empty(&timeline->pending_points));
+ assert(timeline->highest_pending == timeline->highest_past);
+ timeline->highest_pending = timeline->highest_past = value;
+
+ int ret = cnd_broadcast(&timeline->cond);
+ if (ret == thrd_error)
+ return vk_errorf(device, VK_ERROR_UNKNOWN, "cnd_broadcast failed");
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_sync_timeline_signal(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t value)
+{
+ struct vk_sync_timeline *timeline = to_vk_sync_timeline(sync);
+
+ mtx_lock(&timeline->mutex);
+ VkResult result = vk_sync_timeline_signal_locked(device, timeline, value);
+ mtx_unlock(&timeline->mutex);
+
+ return result;
+}
+
+static VkResult
+vk_sync_timeline_get_value(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t *value)
+{
+ struct vk_sync_timeline *timeline = to_vk_sync_timeline(sync);
+
+ mtx_lock(&timeline->mutex);
+ VkResult result = vk_sync_timeline_gc_locked(device, timeline, true);
+ mtx_unlock(&timeline->mutex);
+
+ if (result != VK_SUCCESS)
+ return result;
+
+ *value = timeline->highest_past;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_sync_timeline_wait_locked(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ /* Wait on the queue_submit condition variable until the timeline has a
+ * time point pending that's at least as high as wait_value.
+ */
+ uint64_t now_ns = os_time_get_nano();
+ while (timeline->highest_pending < wait_value) {
+ if (now_ns >= abs_timeout_ns)
+ return VK_TIMEOUT;
+
+ int ret;
+ if (abs_timeout_ns >= INT64_MAX) {
+ /* Common infinite wait case */
+ ret = cnd_wait(&timeline->cond, &timeline->mutex);
+ } else {
+ /* This is really annoying. The C11 threads API uses CLOCK_REALTIME
+ * while all our absolute timeouts are in CLOCK_MONOTONIC. Best
+ * thing we can do is to convert and hope the system admin doesn't
+ * change the time out from under us.
+ */
+ uint64_t rel_timeout_ns = abs_timeout_ns - now_ns;
+
+ struct timespec now_ts, abs_timeout_ts;
+ timespec_get(&now_ts, TIME_UTC);
+ if (timespec_add_nsec(&abs_timeout_ts, &now_ts, rel_timeout_ns)) {
+ /* Overflowed; may as well be infinite */
+ ret = cnd_wait(&timeline->cond, &timeline->mutex);
+ } else {
+ ret = cnd_timedwait(&timeline->cond, &timeline->mutex,
+ &abs_timeout_ts);
+ }
+ }
+ if (ret == thrd_error)
+ return vk_errorf(device, VK_ERROR_UNKNOWN, "cnd_timedwait failed");
+
+ /* We don't trust the timeout condition on cnd_timedwait() because of
+ * the potential clock issues caused by using CLOCK_REALTIME. Instead,
+ * update now_ns, go back to the top of the loop, and re-check.
+ */
+ now_ns = os_time_get_nano();
+ }
+
+ if (wait_flags & VK_SYNC_WAIT_PENDING)
+ return VK_SUCCESS;
+
+ VkResult result = vk_sync_timeline_gc_locked(device, timeline, false);
+ if (result != VK_SUCCESS)
+ return result;
+
+ while (timeline->highest_past < wait_value) {
+ struct vk_sync_timeline_point *point = vk_sync_timeline_first_point(timeline);
+
+ /* Drop the lock while we wait. */
+ vk_sync_timeline_point_ref(point);
+ mtx_unlock(&timeline->mutex);
+
+ result = vk_sync_wait(device, &point->sync, 0,
+ VK_SYNC_WAIT_COMPLETE,
+ abs_timeout_ns);
+
+ /* Pick the mutex back up */
+ mtx_lock(&timeline->mutex);
+ vk_sync_timeline_point_unref(timeline, point);
+
+ /* This covers both VK_TIMEOUT and VK_ERROR_DEVICE_LOST */
+ if (result != VK_SUCCESS)
+ return result;
+
+ vk_sync_timeline_point_complete(timeline, point);
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vk_sync_timeline_wait(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ struct vk_sync_timeline *timeline = to_vk_sync_timeline(sync);
+
+ mtx_lock(&timeline->mutex);
+ VkResult result = vk_sync_timeline_wait_locked(device, timeline,
+ wait_value, wait_flags,
+ abs_timeout_ns);
+ mtx_unlock(&timeline->mutex);
+
+ return result;
+}
+
+struct vk_sync_timeline_type
+vk_sync_timeline_get_type(const struct vk_sync_type *point_sync_type)
+{
+ return (struct vk_sync_timeline_type) {
+ .sync = {
+ .size = sizeof(struct vk_sync_timeline),
+ .features = VK_SYNC_FEATURE_TIMELINE |
+ VK_SYNC_FEATURE_GPU_WAIT |
+ VK_SYNC_FEATURE_CPU_WAIT |
+ VK_SYNC_FEATURE_CPU_SIGNAL |
+ VK_SYNC_FEATURE_WAIT_ANY |
+ VK_SYNC_FEATURE_WAIT_PENDING,
+ .init = vk_sync_timeline_init,
+ .finish = vk_sync_timeline_finish,
+ .signal = vk_sync_timeline_signal,
+ .get_value = vk_sync_timeline_get_value,
+ .wait = vk_sync_timeline_wait,
+ },
+ .point_sync_type = point_sync_type,
+ };
+}
diff --git a/src/vulkan/runtime/vk_sync_timeline.h b/src/vulkan/runtime/vk_sync_timeline.h
new file mode 100644
index 00000000000..d1fcf8c12ea
--- /dev/null
+++ b/src/vulkan/runtime/vk_sync_timeline.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_SYNC_TIMELINE_H
+#define VK_SYNC_TIMELINE_H
+
+#include "c11/threads.h"
+#include "util/list.h"
+#include "util/macros.h"
+
+#include "vk_sync.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_sync_timeline_type {
+ struct vk_sync_type sync;
+
+ /* Type of each individual time point */
+ const struct vk_sync_type *point_sync_type;
+};
+
+struct vk_sync_timeline_type
+vk_sync_timeline_get_type(const struct vk_sync_type *point_sync_type);
+
+struct vk_sync_timeline_point {
+ struct vk_sync_timeline *timeline;
+
+ struct list_head link;
+
+ uint64_t value;
+
+ int refcount;
+ bool pending;
+
+ struct vk_sync sync;
+};
+
+/** Implements a timeline vk_sync type on top of a binary vk_sync
+ *
+ * This is used for emulating VK_KHR_timeline_semaphores for implementations
+ * whose kernel driver do not yet support timeline syncobj. Since it's a
+ * requirement for Vulkan 1.2, it's useful to have an emulation like this.
+ *
+ * The driver should never see a vk_sync_timeline object. Instead, converting
+ * from vk_sync_timeline to a binary vk_sync for a particular time point is
+ * handled by common code. All a driver needs to do is declare its preferred
+ * binary vk_sync_type for emulation as follows:
+ *
+ * const struct vk_sync_type anv_bo_sync_type = {
+ * ...
+ * };
+ * VK_DECL_TIMELINE_TYPE(anv_bo_timeline_sync_type, &anv_bo_sync_type);
+ *
+ * and then anv_bo_timeline_sync_type.sync can be used as a sync type to
+ * provide timelines.
+ */
+struct vk_sync_timeline {
+ struct vk_sync sync;
+
+ mtx_t mutex;
+ cnd_t cond;
+
+ uint64_t highest_past;
+ uint64_t highest_pending;
+
+ struct list_head pending_points;
+ struct list_head free_points;
+};
+
+VkResult vk_sync_timeline_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value);
+
+VkResult vk_sync_timeline_alloc_point(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t value,
+ struct vk_sync_timeline_point **point_out);
+
+void vk_sync_timeline_point_free(struct vk_device *device,
+ struct vk_sync_timeline_point *point);
+
+VkResult vk_sync_timeline_point_install(struct vk_device *device,
+ struct vk_sync_timeline_point *point);
+
+VkResult vk_sync_timeline_get_point(struct vk_device *device,
+ struct vk_sync_timeline *timeline,
+ uint64_t wait_value,
+ struct vk_sync_timeline_point **point_out);
+
+void vk_sync_timeline_point_release(struct vk_device *device,
+ struct vk_sync_timeline_point *point);
+
+static inline bool
+vk_sync_type_is_vk_sync_timeline(const struct vk_sync_type *type)
+{
+ return type->init == vk_sync_timeline_init;
+}
+
+static inline struct vk_sync_timeline *
+vk_sync_as_timeline(struct vk_sync *sync)
+{
+ if (!vk_sync_type_is_vk_sync_timeline(sync->type))
+ return NULL;
+
+ return container_of(sync, struct vk_sync_timeline, sync);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_SYNC_TIMELINE_H */
diff --git a/src/vulkan/runtime/vk_synchronization.c b/src/vulkan/runtime/vk_synchronization.c
new file mode 100644
index 00000000000..701474164e4
--- /dev/null
+++ b/src/vulkan/runtime/vk_synchronization.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_synchronization.h"
+
+#include "vk_alloc.h"
+#include "vk_command_buffer.h"
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_queue.h"
+#include "vk_util.h"
+#include "../wsi/wsi_common.h"
+
+VkAccessFlags2
+vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,
+ VkAccessFlags2 access)
+{
+ const VkPipelineStageFlags2 all_write_access =
+ vk_write_access2_for_pipeline_stage_flags2(stages);
+
+ if (access & VK_ACCESS_2_MEMORY_WRITE_BIT)
+ access |= all_write_access;
+
+ if (access & VK_ACCESS_2_SHADER_WRITE_BIT)
+ access |= VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT;
+
+ /* We only care about write access in src flags */
+ return access & all_write_access;
+}
+
+VkAccessFlags2
+vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,
+ VkAccessFlags2 access)
+{
+ const VkPipelineStageFlags2 all_read_access =
+ vk_read_access2_for_pipeline_stage_flags2(stages);
+
+ if (access & VK_ACCESS_2_MEMORY_READ_BIT)
+ access |= all_read_access;
+
+ if (access & VK_ACCESS_2_SHADER_READ_BIT)
+ access |= VK_ACCESS_2_SHADER_SAMPLED_READ_BIT |
+ VK_ACCESS_2_SHADER_STORAGE_READ_BIT |
+ VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR;
+
+ /* We only care about read access in dst flags */
+ return access & all_read_access;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdWriteTimestamp(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+
+ device->dispatch_table.CmdWriteTimestamp2(commandBuffer,
+ (VkPipelineStageFlags2) pipelineStage,
+ queryPool,
+ query);
+}
+
+static VkMemoryBarrier2
+upgrade_memory_barrier(const VkMemoryBarrier *barrier,
+ VkPipelineStageFlags2 src_stage_mask2,
+ VkPipelineStageFlags2 dst_stage_mask2)
+{
+ return (VkMemoryBarrier2) {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ .pNext = barrier->pNext,
+ .srcStageMask = src_stage_mask2,
+ .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
+ .dstStageMask = dst_stage_mask2,
+ .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
+ };
+}
+
+static VkBufferMemoryBarrier2
+upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier *barrier,
+ VkPipelineStageFlags2 src_stage_mask2,
+ VkPipelineStageFlags2 dst_stage_mask2)
+{
+ return (VkBufferMemoryBarrier2) {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,
+ .pNext = barrier->pNext,
+ .srcStageMask = src_stage_mask2,
+ .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
+ .dstStageMask = dst_stage_mask2,
+ .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
+ .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
+ .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
+ .buffer = barrier->buffer,
+ .offset = barrier->offset,
+ .size = barrier->size,
+ };
+}
+
+static VkImageMemoryBarrier2
+upgrade_image_memory_barrier(const VkImageMemoryBarrier *barrier,
+ VkPipelineStageFlags2 src_stage_mask2,
+ VkPipelineStageFlags2 dst_stage_mask2)
+{
+ return (VkImageMemoryBarrier2) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
+ .pNext = barrier->pNext,
+ .srcStageMask = src_stage_mask2,
+ .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
+ .dstStageMask = dst_stage_mask2,
+ .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
+ .oldLayout = barrier->oldLayout,
+ .newLayout = barrier->newLayout,
+ .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
+ .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
+ .image = barrier->image,
+ .subresourceRange = barrier->subresourceRange,
+ };
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+
+ STACK_ARRAY(VkMemoryBarrier2, memory_barriers, memoryBarrierCount);
+ STACK_ARRAY(VkBufferMemoryBarrier2, buffer_barriers, bufferMemoryBarrierCount);
+ STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, imageMemoryBarrierCount);
+
+ VkPipelineStageFlags2 src_stage_mask2 = (VkPipelineStageFlags2) srcStageMask;
+ VkPipelineStageFlags2 dst_stage_mask2 = (VkPipelineStageFlags2) dstStageMask;
+
+ for (uint32_t i = 0; i < memoryBarrierCount; i++) {
+ memory_barriers[i] = upgrade_memory_barrier(&pMemoryBarriers[i],
+ src_stage_mask2,
+ dst_stage_mask2);
+ }
+ for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
+ buffer_barriers[i] = upgrade_buffer_memory_barrier(&pBufferMemoryBarriers[i],
+ src_stage_mask2,
+ dst_stage_mask2);
+ }
+ for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
+ image_barriers[i] = upgrade_image_memory_barrier(&pImageMemoryBarriers[i],
+ src_stage_mask2,
+ dst_stage_mask2);
+ }
+
+ VkDependencyInfo dep_info = {
+ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ .memoryBarrierCount = memoryBarrierCount,
+ .pMemoryBarriers = memory_barriers,
+ .bufferMemoryBarrierCount = bufferMemoryBarrierCount,
+ .pBufferMemoryBarriers = buffer_barriers,
+ .imageMemoryBarrierCount = imageMemoryBarrierCount,
+ .pImageMemoryBarriers = image_barriers,
+ };
+
+ device->dispatch_table.CmdPipelineBarrier2(commandBuffer, &dep_info);
+
+ STACK_ARRAY_FINISH(memory_barriers);
+ STACK_ARRAY_FINISH(buffer_barriers);
+ STACK_ARRAY_FINISH(image_barriers);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdSetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+
+ VkMemoryBarrier2 mem_barrier = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ .srcStageMask = (VkPipelineStageFlags2) stageMask,
+ .dstStageMask = (VkPipelineStageFlags2) stageMask,
+ };
+ VkDependencyInfo dep_info = {
+ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ .memoryBarrierCount = 1,
+ .pMemoryBarriers = &mem_barrier,
+ };
+
+ device->dispatch_table.CmdSetEvent2(commandBuffer, event, &dep_info);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdResetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+
+ device->dispatch_table.CmdResetEvent2(commandBuffer,
+ event,
+ (VkPipelineStageFlags2) stageMask);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdWaitEvents(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+
+ if (eventCount == 0)
+ return;
+
+ STACK_ARRAY(VkDependencyInfo, deps, eventCount);
+
+ /* Note that dstStageMask and srcStageMask in the CmdWaitEvent2() call
+ * are the same. This is to match the CmdSetEvent2() call from
+ * vk_common_CmdSetEvent(). The actual src->dst stage barrier will
+ * happen as part of the CmdPipelineBarrier() call below.
+ */
+ VkMemoryBarrier2 stage_barrier = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ .srcStageMask = srcStageMask,
+ .dstStageMask = srcStageMask,
+ };
+
+ for (uint32_t i = 0; i < eventCount; i++) {
+ deps[i] = (VkDependencyInfo) {
+ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ .memoryBarrierCount = 1,
+ .pMemoryBarriers = &stage_barrier,
+ };
+ }
+ device->dispatch_table.CmdWaitEvents2(commandBuffer, eventCount, pEvents, deps);
+
+ STACK_ARRAY_FINISH(deps);
+
+ /* Setting dependency to 0 because :
+ *
+ * - For BY_REGION_BIT and VIEW_LOCAL_BIT, events are not allowed inside a
+ * render pass so these don't apply.
+ *
+ * - For DEVICE_GROUP_BIT, we have the following bit of spec text:
+ *
+ * "Semaphore and event dependencies are device-local and only
+ * execute on the one physical device that performs the
+ * dependency."
+ */
+ const VkDependencyFlags dep_flags = 0;
+
+ device->dispatch_table.CmdPipelineBarrier(commandBuffer,
+ srcStageMask, destStageMask,
+ dep_flags,
+ memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_CmdWriteBufferMarkerAMD(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ uint32_t marker)
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+ struct vk_device *device = cmd_buffer->base.device;
+
+ device->dispatch_table.CmdWriteBufferMarker2AMD(commandBuffer,
+ (VkPipelineStageFlags2) pipelineStage,
+ dstBuffer,
+ dstOffset,
+ marker);
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetQueueCheckpointDataNV(
+ VkQueue queue,
+ uint32_t* pCheckpointDataCount,
+ VkCheckpointDataNV* pCheckpointData)
+{
+ unreachable("Entrypoint not implemented");
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_QueueSubmit(
+ VkQueue _queue,
+ uint32_t submitCount,
+ const VkSubmitInfo* pSubmits,
+ VkFence fence)
+{
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+ struct vk_device *device = queue->base.device;
+
+ STACK_ARRAY(VkSubmitInfo2, submit_info_2, submitCount);
+ STACK_ARRAY(VkPerformanceQuerySubmitInfoKHR, perf_query_submit_info, submitCount);
+ STACK_ARRAY(struct wsi_memory_signal_submit_info, wsi_mem_submit_info, submitCount);
+
+ uint32_t n_wait_semaphores = 0;
+ uint32_t n_command_buffers = 0;
+ uint32_t n_signal_semaphores = 0;
+ for (uint32_t s = 0; s < submitCount; s++) {
+ n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
+ n_command_buffers += pSubmits[s].commandBufferCount;
+ n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
+ }
+
+ STACK_ARRAY(VkSemaphoreSubmitInfo, wait_semaphores, n_wait_semaphores);
+ STACK_ARRAY(VkCommandBufferSubmitInfo, command_buffers, n_command_buffers);
+ STACK_ARRAY(VkSemaphoreSubmitInfo, signal_semaphores, n_signal_semaphores);
+
+ n_wait_semaphores = 0;
+ n_command_buffers = 0;
+ n_signal_semaphores = 0;
+
+ for (uint32_t s = 0; s < submitCount; s++) {
+ const VkTimelineSemaphoreSubmitInfo *timeline_info =
+ vk_find_struct_const(pSubmits[s].pNext,
+ TIMELINE_SEMAPHORE_SUBMIT_INFO);
+ const uint64_t *wait_values = NULL;
+ const uint64_t *signal_values = NULL;
+
+ if (timeline_info && timeline_info->waitSemaphoreValueCount) {
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkSubmitInfo-pNext-03240
+ *
+ * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
+ * and any element of pSignalSemaphores was created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
+ * signalSemaphoreCount"
+ */
+ assert(timeline_info->waitSemaphoreValueCount == pSubmits[s].waitSemaphoreCount);
+ wait_values = timeline_info->pWaitSemaphoreValues;
+ }
+
+ if (timeline_info && timeline_info->signalSemaphoreValueCount) {
+ /* From the Vulkan 1.3.204 spec:
+ *
+ * VUID-VkSubmitInfo-pNext-03241
+ *
+ * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
+ * and any element of pWaitSemaphores was created with a VkSemaphoreType of
+ * VK_SEMAPHORE_TYPE_TIMELINE, then its waitSemaphoreValueCount member must equal
+ * waitSemaphoreCount"
+ */
+ assert(timeline_info->signalSemaphoreValueCount == pSubmits[s].signalSemaphoreCount);
+ signal_values = timeline_info->pSignalSemaphoreValues;
+ }
+
+ const VkDeviceGroupSubmitInfo *group_info =
+ vk_find_struct_const(pSubmits[s].pNext, DEVICE_GROUP_SUBMIT_INFO);
+
+ for (uint32_t i = 0; i < pSubmits[s].waitSemaphoreCount; i++) {
+ wait_semaphores[n_wait_semaphores + i] = (VkSemaphoreSubmitInfo) {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
+ .semaphore = pSubmits[s].pWaitSemaphores[i],
+ .value = wait_values ? wait_values[i] : 0,
+ .stageMask = pSubmits[s].pWaitDstStageMask[i],
+ .deviceIndex = group_info ? group_info->pWaitSemaphoreDeviceIndices[i] : 0,
+ };
+ }
+ for (uint32_t i = 0; i < pSubmits[s].commandBufferCount; i++) {
+ command_buffers[n_command_buffers + i] = (VkCommandBufferSubmitInfo) {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
+ .commandBuffer = pSubmits[s].pCommandBuffers[i],
+ .deviceMask = group_info ? group_info->pCommandBufferDeviceMasks[i] : 0,
+ };
+ }
+ for (uint32_t i = 0; i < pSubmits[s].signalSemaphoreCount; i++) {
+ signal_semaphores[n_signal_semaphores + i] = (VkSemaphoreSubmitInfo) {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
+ .semaphore = pSubmits[s].pSignalSemaphores[i],
+ .value = signal_values ? signal_values[i] : 0,
+ .stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
+ .deviceIndex = group_info ? group_info->pSignalSemaphoreDeviceIndices[i] : 0,
+ };
+ }
+
+ const VkProtectedSubmitInfo *protected_info =
+ vk_find_struct_const(pSubmits[s].pNext, PROTECTED_SUBMIT_INFO);
+
+ submit_info_2[s] = (VkSubmitInfo2) {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
+ .flags = ((protected_info && protected_info->protectedSubmit) ?
+ VK_SUBMIT_PROTECTED_BIT : 0),
+ .waitSemaphoreInfoCount = pSubmits[s].waitSemaphoreCount,
+ .pWaitSemaphoreInfos = &wait_semaphores[n_wait_semaphores],
+ .commandBufferInfoCount = pSubmits[s].commandBufferCount,
+ .pCommandBufferInfos = &command_buffers[n_command_buffers],
+ .signalSemaphoreInfoCount = pSubmits[s].signalSemaphoreCount,
+ .pSignalSemaphoreInfos = &signal_semaphores[n_signal_semaphores],
+ };
+
+ const VkPerformanceQuerySubmitInfoKHR *query_info =
+ vk_find_struct_const(pSubmits[s].pNext,
+ PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
+ if (query_info) {
+ perf_query_submit_info[s] = *query_info;
+ perf_query_submit_info[s].pNext = NULL;
+ __vk_append_struct(&submit_info_2[s], &perf_query_submit_info[s]);
+ }
+
+ const struct wsi_memory_signal_submit_info *mem_signal_info =
+ vk_find_struct_const(pSubmits[s].pNext,
+ WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
+ if (mem_signal_info) {
+ wsi_mem_submit_info[s] = *mem_signal_info;
+ wsi_mem_submit_info[s].pNext = NULL;
+ __vk_append_struct(&submit_info_2[s], &wsi_mem_submit_info[s]);
+ }
+
+ n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
+ n_command_buffers += pSubmits[s].commandBufferCount;
+ n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
+ }
+
+ VkResult result = device->dispatch_table.QueueSubmit2(_queue,
+ submitCount,
+ submit_info_2,
+ fence);
+
+ STACK_ARRAY_FINISH(wait_semaphores);
+ STACK_ARRAY_FINISH(command_buffers);
+ STACK_ARRAY_FINISH(signal_semaphores);
+ STACK_ARRAY_FINISH(submit_info_2);
+ STACK_ARRAY_FINISH(perf_query_submit_info);
+ STACK_ARRAY_FINISH(wsi_mem_submit_info);
+
+ return result;
+}
diff --git a/src/vulkan/runtime/vk_synchronization.h b/src/vulkan/runtime/vk_synchronization.h
new file mode 100644
index 00000000000..5c3fd1f4992
--- /dev/null
+++ b/src/vulkan/runtime/vk_synchronization.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2023 Collabora, Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_SYNCHRONIZATION_H
+#define VK_SYNCHRONIZATION_H
+
+#include <vulkan/vulkan_core.h>
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline bool
+vk_pipeline_stage_flags2_has_graphics_shader(VkPipelineStageFlags2 stages)
+{
+ return stages & (VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT |
+ VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT |
+ VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT |
+ VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT);
+}
+
+static inline bool
+vk_pipeline_stage_flags2_has_compute_shader(VkPipelineStageFlags2 stages)
+{
+ return stages & (VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT |
+ VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT);
+}
+
+/** Expands pipeline stage group flags
+ *
+ * Some stages like VK_PIPELINE_SHADER_STAGE_2_ALL_GRAPHICS_BIT represent more
+ * than one stage. This helper expands any such bits out to the full set of
+ * individual stages bits they represent.
+ *
+ * Note: This helper does not handle BOTTOM/TOP_OF_PIPE. You probably want to
+ * use vk_expand_src/dst_stage_flags2() instead.
+ */
+VkPipelineStageFlags2
+vk_expand_pipeline_stage_flags2(VkPipelineStageFlags2 stages);
+
+static inline VkPipelineStageFlags2
+vk_expand_src_stage_flags2(VkPipelineStageFlags2 stages)
+{
+ if (stages & VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT)
+ stages |= VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
+
+ return vk_expand_pipeline_stage_flags2(stages);
+}
+
+static inline VkPipelineStageFlags2
+vk_expand_dst_stage_flags2(VkPipelineStageFlags2 stages)
+{
+ if (stages & VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT)
+ stages |= VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
+
+ return vk_expand_pipeline_stage_flags2(stages);
+}
+
+/** Returns the set of read accesses allowed in the given stages */
+VkAccessFlags2
+vk_read_access2_for_pipeline_stage_flags2(VkPipelineStageFlags2 stages);
+
+/** Returns the set of write accesses allowed in the given stages */
+VkAccessFlags2
+vk_write_access2_for_pipeline_stage_flags2(VkPipelineStageFlags2 stages);
+
+VkAccessFlags2
+vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,
+ VkAccessFlags2 access);
+
+VkAccessFlags2
+vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,
+ VkAccessFlags2 access);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_SYNCHRONIZATION_H */
diff --git a/src/vulkan/runtime/vk_texcompress_astc.c b/src/vulkan/runtime/vk_texcompress_astc.c
new file mode 100644
index 00000000000..a11bdca89f9
--- /dev/null
+++ b/src/vulkan/runtime/vk_texcompress_astc.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vk_texcompress_astc.h"
+#include "util/texcompress_astc_luts_wrap.h"
+#include "vk_alloc.h"
+#include "vk_buffer.h"
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_image.h"
+#include "vk_physical_device.h"
+
+/* type_indexes_mask bits are set/clear for support memory type index as per
+ * struct VkPhysicalDeviceMemoryProperties.memoryTypes[] */
+static uint32_t
+get_mem_type_index(struct vk_device *device, uint32_t type_indexes_mask,
+ VkMemoryPropertyFlags mem_property)
+{
+ const struct vk_physical_device_dispatch_table *disp = &device->physical->dispatch_table;
+ VkPhysicalDevice _phy_device = vk_physical_device_to_handle(device->physical);
+
+ VkPhysicalDeviceMemoryProperties2 props2 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2,
+ .pNext = NULL,
+ };
+ disp->GetPhysicalDeviceMemoryProperties2(_phy_device, &props2);
+
+ for (uint32_t i = 0; i < props2.memoryProperties.memoryTypeCount; i++) {
+ if ((type_indexes_mask & (1 << i)) &&
+ ((props2.memoryProperties.memoryTypes[i].propertyFlags & mem_property) == mem_property)) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static VkResult
+vk_create_buffer(struct vk_device *device, VkAllocationCallbacks *allocator,
+ VkDeviceSize size, VkMemoryPropertyFlags mem_prop_flags,
+ VkBufferUsageFlags usage_flags, VkBuffer *vk_buf,
+ VkDeviceMemory *vk_mem)
+{
+ VkResult result;
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+
+ VkBufferCreateInfo buffer_create_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = size,
+ .usage = usage_flags,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ };
+ result =
+ disp->CreateBuffer(_device, &buffer_create_info, allocator, vk_buf);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ VkBufferMemoryRequirementsInfo2 mem_req_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
+ .buffer = *vk_buf,
+ };
+ VkMemoryRequirements2 mem_req = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
+ };
+ disp->GetBufferMemoryRequirements2(_device, &mem_req_info, &mem_req);
+
+ uint32_t mem_type_index = get_mem_type_index(
+ device, mem_req.memoryRequirements.memoryTypeBits, mem_prop_flags);
+ if (mem_type_index == -1)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ VkMemoryAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .allocationSize = mem_req.memoryRequirements.size,
+ .memoryTypeIndex = mem_type_index,
+ };
+ result = disp->AllocateMemory(_device, &alloc_info, allocator, vk_mem);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ disp->BindBufferMemory(_device, *vk_buf, *vk_mem, 0);
+
+ return result;
+}
+
+static VkResult
+create_buffer_view(struct vk_device *device, VkAllocationCallbacks *allocator,
+ VkBufferView *buf_view, VkBuffer buf, VkFormat format, VkDeviceSize size,
+ VkDeviceSize offset)
+{
+ VkResult result;
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+
+ VkBufferViewCreateInfo buffer_view_create_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ .buffer = buf,
+ .format = format,
+ .offset = offset,
+ .range = size,
+ };
+ result = disp->CreateBufferView(_device, &buffer_view_create_info,
+ allocator, buf_view);
+ return result;
+}
+
+static uint8_t
+get_partition_table_index(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ return 0;
+ case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ return 1;
+ case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ return 2;
+ case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ return 3;
+ case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ return 4;
+ case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ return 5;
+ case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ return 6;
+ case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ return 7;
+ case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ return 8;
+ case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ return 9;
+ case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ return 10;
+ case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ return 11;
+ case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ return 12;
+ case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ return 13;
+ default:
+ unreachable("bad astc format\n");
+ return 0;
+ }
+}
+
+static VkResult
+astc_prepare_buffer(struct vk_device *device,
+ struct vk_texcompress_astc_state *astc,
+ VkAllocationCallbacks *allocator,
+ VkDeviceSize minTexelBufferOffsetAlignment,
+ uint8_t *single_buf_ptr,
+ VkDeviceSize *single_buf_size)
+{
+ VkResult result;
+ astc_decoder_lut_holder astc_lut_holder;
+ VkDeviceSize offset = 0;
+
+ _mesa_init_astc_decoder_luts(&astc_lut_holder);
+
+ const astc_decoder_lut *luts[] = {
+ &astc_lut_holder.color_endpoint,
+ &astc_lut_holder.color_endpoint_unquant,
+ &astc_lut_holder.weights,
+ &astc_lut_holder.weights_unquant,
+ &astc_lut_holder.trits_quints,
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(luts); i++) {
+ offset = align(offset, minTexelBufferOffsetAlignment);
+ if (single_buf_ptr) {
+ memcpy(single_buf_ptr + offset, luts[i]->data, luts[i]->size_B);
+ result = create_buffer_view(device, allocator, &astc->luts_buf_view[i], astc->luts_buf,
+ vk_format_from_pipe_format(luts[i]->format), luts[i]->size_B,
+ offset);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ offset += luts[i]->size_B;
+ }
+
+ const VkFormat formats[] = {
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ };
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(formats); i++) {
+ unsigned lut_width;
+ unsigned lut_height;
+ const void *lut_data = _mesa_get_astc_decoder_partition_table(
+ vk_format_get_blockwidth(formats[i]),
+ vk_format_get_blockheight(formats[i]),
+ &lut_width, &lut_height);
+ const unsigned lut_size = lut_width * lut_height;
+
+ offset = align(offset, minTexelBufferOffsetAlignment);
+ if (single_buf_ptr) {
+ memcpy(single_buf_ptr + offset, lut_data, lut_width * lut_height);
+
+ result = create_buffer_view(device, allocator, &astc->partition_tbl_buf_view[i],
+ astc->luts_buf, VK_FORMAT_R8_UINT, lut_width * lut_height,
+ offset);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ offset += lut_size;
+ }
+
+ *single_buf_size = offset;
+ return result;
+}
+
+static VkResult
+create_fill_all_luts_vulkan(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc)
+{
+ VkResult result;
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkPhysicalDevice _phy_device = vk_physical_device_to_handle(device->physical);
+ const struct vk_physical_device_dispatch_table *phy_disp = &device->physical->dispatch_table;
+ VkDeviceSize single_buf_size;
+ uint8_t *single_buf_ptr;
+
+ VkPhysicalDeviceProperties2 phy_dev_prop = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
+ .pNext = NULL,
+ };
+ phy_disp->GetPhysicalDeviceProperties2(_phy_device, &phy_dev_prop);
+
+ /* get the single_buf_size */
+ result = astc_prepare_buffer(device, astc, allocator,
+ phy_dev_prop.properties.limits.minTexelBufferOffsetAlignment,
+ NULL, &single_buf_size);
+
+ /* create gpu buffer for all the luts */
+ result = vk_create_buffer(device, allocator, single_buf_size,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT,
+ &astc->luts_buf, &astc->luts_mem);
+ if (unlikely(result != VK_SUCCESS))
+ return result;
+
+ disp->MapMemory(_device, astc->luts_mem, 0, VK_WHOLE_SIZE, 0, (void*)&single_buf_ptr);
+
+ /* fill all the luts and create views */
+ result = astc_prepare_buffer(device, astc, allocator,
+ phy_dev_prop.properties.limits.minTexelBufferOffsetAlignment,
+ single_buf_ptr, &single_buf_size);
+
+ disp->UnmapMemory(_device, astc->luts_mem);
+ return result;
+}
+
+static VkResult
+create_layout(struct vk_device *device, VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc)
+{
+ VkResult result;
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+
+ VkDescriptorSetLayoutBinding bindings[] = {
+ {
+ .binding = 0, /* OutputImage2DArray */
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 1, /* PayloadInput2DArray */
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 2, /* LUTRemainingBitsToEndpointQuantizer */
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 3, /* LUTEndpointUnquantize */
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 4, /* LUTWeightQuantizer */
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 5, /* LUTWeightUnquantize */
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 6, /* LUTTritQuintDecode */
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ {
+ .binding = 7, /* LUTPartitionTable */
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = NULL,
+ },
+ };
+
+ VkDescriptorSetLayoutCreateInfo ds_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
+ .bindingCount = ARRAY_SIZE(bindings),
+ .pBindings = bindings,
+ };
+
+ result = disp->CreateDescriptorSetLayout(_device, &ds_create_info,
+ allocator, &astc->ds_layout);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ VkPipelineLayoutCreateInfo pl_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = 1,
+ .pSetLayouts = &astc->ds_layout,
+ .pushConstantRangeCount = 1,
+ .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 20},
+ };
+ result = disp->CreatePipelineLayout(_device, &pl_create_info, allocator,
+ &astc->p_layout);
+fail:
+ return result;
+}
+
+static const uint32_t astc_spv[] = {
+#include "astc_spv.h"
+};
+
+static VkResult
+vk_astc_create_shader_module(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc)
+{
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+
+ VkShaderModuleCreateInfo shader_module_create_info = {
+ .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ .pNext = NULL,
+ .flags = 0,
+ .codeSize = sizeof(astc_spv),
+ .pCode = astc_spv,
+ };
+
+ return disp->CreateShaderModule(_device, &shader_module_create_info,
+ allocator, &astc->shader_module);
+}
+
+static VkResult
+create_astc_decode_pipeline(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc,
+ VkPipelineCache pipeline_cache, VkFormat format)
+{
+ VkResult result;
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkPipeline pipeline;
+ uint8_t t_i;
+
+ t_i = get_partition_table_index(format);
+
+ uint32_t special_data[3] = {
+ vk_format_get_blockwidth(format),
+ vk_format_get_blockheight(format),
+ true,
+ };
+ VkSpecializationMapEntry special_map_entry[3] = {{
+ .constantID = 0,
+ .offset = 0,
+ .size = 4,
+ },
+ {
+ .constantID = 1,
+ .offset = 4,
+ .size = 4,
+ },
+ {
+ .constantID = 2,
+ .offset = 8,
+ .size = 4,
+ }};
+
+ VkSpecializationInfo specialization_info = {
+ .mapEntryCount = 3,
+ .pMapEntries = special_map_entry,
+ .dataSize = 12,
+ .pData = special_data,
+ };
+
+ /* compute shader */
+ VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_COMPUTE_BIT,
+ .module = astc->shader_module,
+ .pName = "main",
+ .pSpecializationInfo = &specialization_info,
+ };
+
+ VkComputePipelineCreateInfo vk_pipeline_info = {
+ .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ .stage = pipeline_shader_stage,
+ .flags = 0,
+ .layout = astc->p_layout,
+ };
+
+ result = disp->CreateComputePipelines(
+ _device, pipeline_cache, 1, &vk_pipeline_info, allocator, &pipeline);
+ if (result != VK_SUCCESS)
+ return result;
+
+ astc->pipeline[t_i] = pipeline;
+ astc->pipeline_mask |= (1 << t_i);
+
+ return result;
+}
+
+VkPipeline
+vk_texcompress_astc_get_decode_pipeline(struct vk_device *device, VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc, VkPipelineCache pipeline_cache,
+ VkFormat format)
+{
+ VkResult result;
+ uint8_t t_i = get_partition_table_index(format);
+
+ simple_mtx_lock(&astc->mutex);
+
+ if (astc->pipeline[t_i])
+ goto unlock;
+
+ if (!astc->shader_module) {
+ result = vk_astc_create_shader_module(device, allocator, astc);
+ if (result != VK_SUCCESS)
+ goto unlock;
+ }
+
+ create_astc_decode_pipeline(device, allocator, astc, pipeline_cache, format);
+
+unlock:
+ simple_mtx_unlock(&astc->mutex);
+ return astc->pipeline[t_i];
+}
+
+static inline void
+fill_desc_image_info_struct(VkDescriptorImageInfo *info, VkImageView img_view,
+ VkImageLayout img_layout)
+{
+ info->sampler = VK_NULL_HANDLE;
+ info->imageView = img_view;
+ info->imageLayout = img_layout;
+}
+
+static inline void
+fill_write_descriptor_set_image(VkWriteDescriptorSet *set, uint8_t bind_i,
+ VkDescriptorType desc_type, VkDescriptorImageInfo *image_info)
+{
+ set->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ set->pNext = NULL;
+ set->dstSet = VK_NULL_HANDLE;
+ set->dstBinding = bind_i;
+ set->dstArrayElement = 0;
+ set->descriptorCount = 1;
+ set->descriptorType = desc_type;
+ set->pImageInfo = image_info;
+ set->pBufferInfo = NULL;
+ set->pTexelBufferView = NULL;
+}
+
+static inline void
+fill_write_descriptor_set_uniform_texel(VkWriteDescriptorSet *set,
+ uint8_t bind_i,
+ VkBufferView *buf_view)
+{
+ set->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ set->pNext = NULL;
+ set->dstSet = VK_NULL_HANDLE;
+ set->dstBinding = bind_i;
+ set->dstArrayElement = 0;
+ set->descriptorCount = 1;
+ set->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ set->pImageInfo = NULL;
+ set->pBufferInfo = NULL;
+ set->pTexelBufferView = buf_view;
+}
+
+void
+vk_texcompress_astc_fill_write_descriptor_sets(struct vk_texcompress_astc_state *astc,
+ struct vk_texcompress_astc_write_descriptor_set *set,
+ VkImageView src_img_view, VkImageLayout src_img_layout,
+ VkImageView dst_img_view,
+ VkFormat format)
+{
+ unsigned desc_i;
+
+ desc_i = 0;
+ fill_desc_image_info_struct(&set->dst_desc_image_info, dst_img_view, VK_IMAGE_LAYOUT_GENERAL);
+ fill_write_descriptor_set_image(&set->descriptor_set[desc_i], desc_i,
+ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &set->dst_desc_image_info);
+ desc_i++;
+ fill_desc_image_info_struct(&set->src_desc_image_info, src_img_view, src_img_layout);
+ fill_write_descriptor_set_image(&set->descriptor_set[desc_i], desc_i,
+ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &set->src_desc_image_info);
+ /* fill luts descriptor */
+ desc_i++;
+ for (unsigned i = 0; i < VK_TEXCOMPRESS_ASTC_NUM_LUTS; i++) {
+ fill_write_descriptor_set_uniform_texel(&set->descriptor_set[desc_i + i], desc_i + i,
+ &astc->luts_buf_view[i]);
+ }
+ desc_i += VK_TEXCOMPRESS_ASTC_NUM_LUTS;
+ uint8_t t_i = get_partition_table_index(format);
+ fill_write_descriptor_set_uniform_texel(&set->descriptor_set[desc_i], desc_i,
+ &astc->partition_tbl_buf_view[t_i]);
+ desc_i++;
+ assert(desc_i == ARRAY_SIZE(set->descriptor_set));
+}
+
+VkResult
+vk_texcompress_astc_init(struct vk_device *device, VkAllocationCallbacks *allocator,
+ VkPipelineCache pipeline_cache,
+ struct vk_texcompress_astc_state **astc)
+{
+ VkResult result;
+
+ /* astc memory to be freed as part of vk_astc_decode_finish() */
+ *astc = vk_zalloc(allocator, sizeof(struct vk_texcompress_astc_state), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (*astc == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ simple_mtx_init(&(*astc)->mutex, mtx_plain);
+
+ result = create_fill_all_luts_vulkan(device, allocator, *astc);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ result = create_layout(device, allocator, *astc);
+
+fail:
+ return result;
+}
+
+void
+vk_texcompress_astc_finish(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc)
+{
+ VkDevice _device = vk_device_to_handle(device);
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+
+ while (astc->pipeline_mask) {
+ uint8_t t_i = u_bit_scan(&astc->pipeline_mask);
+ disp->DestroyPipeline(_device, astc->pipeline[t_i], allocator);
+ }
+
+ disp->DestroyPipelineLayout(_device, astc->p_layout, allocator);
+ disp->DestroyShaderModule(_device, astc->shader_module, allocator);
+ disp->DestroyDescriptorSetLayout(_device, astc->ds_layout, allocator);
+
+ for (unsigned i = 0; i < VK_TEXCOMPRESS_ASTC_NUM_LUTS; i++)
+ disp->DestroyBufferView(_device, astc->luts_buf_view[i], allocator);
+
+ for (unsigned i = 0; i < VK_TEXCOMPRESS_ASTC_NUM_PARTITION_TABLES; i++)
+ disp->DestroyBufferView(_device, astc->partition_tbl_buf_view[i], allocator);
+
+ disp->DestroyBuffer(_device, astc->luts_buf, allocator);
+ disp->FreeMemory(_device, astc->luts_mem, allocator);
+
+ vk_free(allocator, astc);
+}
diff --git a/src/vulkan/runtime/vk_texcompress_astc.h b/src/vulkan/runtime/vk_texcompress_astc.h
new file mode 100644
index 00000000000..e307af5c84e
--- /dev/null
+++ b/src/vulkan/runtime/vk_texcompress_astc.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VK_TEXCOMPRESS_ASTC_H
+#define VK_TEXCOMPRESS_ASTC_H
+
+#include "vk_device.h"
+
+/* luts order matching astc glsl shader below,
+ * 0 - color endpoint
+ * 1 - color endpoint unquant
+ * 2 - weights
+ * 3 - weights unquant
+ * 4 - trits quints
+ */
+#define VK_TEXCOMPRESS_ASTC_NUM_LUTS 5
+#define VK_TEXCOMPRESS_ASTC_NUM_PARTITION_TABLES 14
+#define VK_TEXCOMPRESS_ASTC_WRITE_DESC_SET_COUNT 8
+
+struct vk_texcompress_astc_state {
+ /* single buffer is allocated for all luts */
+ VkDeviceMemory luts_mem;
+ VkBuffer luts_buf;
+
+ VkBufferView luts_buf_view[VK_TEXCOMPRESS_ASTC_NUM_LUTS];
+ VkBufferView partition_tbl_buf_view[VK_TEXCOMPRESS_ASTC_NUM_PARTITION_TABLES];
+
+ simple_mtx_t mutex;
+ VkDescriptorSetLayout ds_layout;
+ VkPipelineLayout p_layout;
+ VkPipeline pipeline[VK_TEXCOMPRESS_ASTC_NUM_PARTITION_TABLES];
+ uint32_t pipeline_mask;
+ VkShaderModule shader_module;
+};
+
+struct vk_texcompress_astc_write_descriptor_set {
+ VkWriteDescriptorSet descriptor_set[VK_TEXCOMPRESS_ASTC_WRITE_DESC_SET_COUNT];
+ VkDescriptorImageInfo dst_desc_image_info;
+ VkDescriptorImageInfo src_desc_image_info;
+};
+
+void
+vk_texcompress_astc_fill_write_descriptor_sets(struct vk_texcompress_astc_state *astc,
+ struct vk_texcompress_astc_write_descriptor_set *set,
+ VkImageView src_img_view, VkImageLayout src_img_layout,
+ VkImageView dst_img_view,
+ VkFormat format);
+VkPipeline vk_texcompress_astc_get_decode_pipeline(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc,
+ VkPipelineCache pipeline_cache,
+ VkFormat format);
+VkResult vk_texcompress_astc_init(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ VkPipelineCache pipeline_cache,
+ struct vk_texcompress_astc_state **astc);
+void vk_texcompress_astc_finish(struct vk_device *device,
+ VkAllocationCallbacks *allocator,
+ struct vk_texcompress_astc_state *astc);
+
+static inline VkFormat
+vk_texcompress_astc_emulation_format(VkFormat format)
+{
+ /* TODO: From VK_EXT_astc_Decode_mode spec, VK_FORMAT_R16G16B16A16_SFLOAT is the default
+ * option. VK_FORMAT_R8G8B8A8_UNORM is only acceptable image quality option.
+ */
+ switch (format) {
+ case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ return VK_FORMAT_R8G8B8A8_SRGB;
+ default:
+ return VK_FORMAT_UNDEFINED;
+ }
+}
+
+#endif /* VK_TEXCOMPRESS_ASTC_H */
diff --git a/src/vulkan/runtime/vk_texcompress_etc2.c b/src/vulkan/runtime/vk_texcompress_etc2.c
new file mode 100644
index 00000000000..558d91e95dd
--- /dev/null
+++ b/src/vulkan/runtime/vk_texcompress_etc2.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2023 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "vk_texcompress_etc2.h"
+
+#include "compiler/nir/nir_builder.h"
+#include "vk_shader_module.h"
+
+/* Based on
+ * https://github.com/Themaister/Granite/blob/master/assets/shaders/decode/etc2.comp
+ * https://github.com/Themaister/Granite/blob/master/assets/shaders/decode/eac.comp
+ *
+ * With some differences:
+ * - Use the vk format to do all the settings.
+ * - Combine the ETC2 and EAC shaders.
+ * - Since we combined the above, reuse the function for the ETC2 A8 component.
+ * - the EAC shader doesn't do SNORM correctly, so this has that fixed.
+ */
+
+static nir_def *
+flip_endian(nir_builder *b, nir_def *src, unsigned cnt)
+{
+ nir_def *v[2];
+ for (unsigned i = 0; i < cnt; ++i) {
+ nir_def *intermediate[4];
+ nir_def *chan = cnt == 1 ? src : nir_channel(b, src, i);
+ for (unsigned j = 0; j < 4; ++j)
+ intermediate[j] = nir_ubfe_imm(b, chan, 8 * j, 8);
+ v[i] = nir_ior(b, nir_ior(b, nir_ishl_imm(b, intermediate[0], 24), nir_ishl_imm(b, intermediate[1], 16)),
+ nir_ior(b, nir_ishl_imm(b, intermediate[2], 8), nir_ishl_imm(b, intermediate[3], 0)));
+ }
+ return cnt == 1 ? v[0] : nir_vec(b, v, cnt);
+}
+
+static nir_def *
+etc1_color_modifier_lookup(nir_builder *b, nir_def *x, nir_def *y)
+{
+ const unsigned table[8][2] = {{2, 8}, {5, 17}, {9, 29}, {13, 42}, {18, 60}, {24, 80}, {33, 106}, {47, 183}};
+ nir_def *upper = nir_ieq_imm(b, y, 1);
+ nir_def *result = NULL;
+ for (unsigned i = 0; i < 8; ++i) {
+ nir_def *tmp = nir_bcsel(b, upper, nir_imm_int(b, table[i][1]), nir_imm_int(b, table[i][0]));
+ if (result)
+ result = nir_bcsel(b, nir_ieq_imm(b, x, i), tmp, result);
+ else
+ result = tmp;
+ }
+ return result;
+}
+
+static nir_def *
+etc2_distance_lookup(nir_builder *b, nir_def *x)
+{
+ const unsigned table[8] = {3, 6, 11, 16, 23, 32, 41, 64};
+ nir_def *result = NULL;
+ for (unsigned i = 0; i < 8; ++i) {
+ if (result)
+ result = nir_bcsel(b, nir_ieq_imm(b, x, i), nir_imm_int(b, table[i]), result);
+ else
+ result = nir_imm_int(b, table[i]);
+ }
+ return result;
+}
+
+static nir_def *
+etc1_alpha_modifier_lookup(nir_builder *b, nir_def *x, nir_def *y)
+{
+ const unsigned table[16] = {0xe852, 0xc962, 0xc741, 0xc531, 0xb752, 0xa862, 0xa763, 0xa742,
+ 0x9751, 0x9741, 0x9731, 0x9641, 0x9632, 0x9210, 0x8753, 0x8642};
+ nir_def *result = NULL;
+ for (unsigned i = 0; i < 16; ++i) {
+ nir_def *tmp = nir_imm_int(b, table[i]);
+ if (result)
+ result = nir_bcsel(b, nir_ieq_imm(b, x, i), tmp, result);
+ else
+ result = tmp;
+ }
+ return nir_ubfe(b, result, nir_imul_imm(b, y, 4), nir_imm_int(b, 4));
+}
+
+static nir_def *
+etc_extend(nir_builder *b, nir_def *v, int bits)
+{
+ if (bits == 4)
+ return nir_imul_imm(b, v, 0x11);
+ return nir_ior(b, nir_ishl_imm(b, v, 8 - bits), nir_ushr_imm(b, v, bits - (8 - bits)));
+}
+
+static nir_def *
+decode_etc2_alpha(struct nir_builder *b, nir_def *alpha_payload, nir_def *linear_pixel, bool eac, nir_def *is_signed)
+{
+ alpha_payload = flip_endian(b, alpha_payload, 2);
+ nir_def *alpha_x = nir_channel(b, alpha_payload, 1);
+ nir_def *alpha_y = nir_channel(b, alpha_payload, 0);
+ nir_def *bit_offset = nir_isub_imm(b, 45, nir_imul_imm(b, linear_pixel, 3));
+ nir_def *base = nir_ubfe_imm(b, alpha_y, 24, 8);
+ nir_def *multiplier = nir_ubfe_imm(b, alpha_y, 20, 4);
+ nir_def *table = nir_ubfe_imm(b, alpha_y, 16, 4);
+
+ if (eac) {
+ nir_def *signed_base = nir_ibfe_imm(b, alpha_y, 24, 8);
+ signed_base = nir_imul_imm(b, signed_base, 8);
+ base = nir_iadd_imm(b, nir_imul_imm(b, base, 8), 4);
+ base = nir_bcsel(b, is_signed, signed_base, base);
+ multiplier = nir_imax(b, nir_imul_imm(b, multiplier, 8), nir_imm_int(b, 1));
+ }
+
+ nir_def *lsb_index = nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x),
+ nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 2));
+ bit_offset = nir_iadd_imm(b, bit_offset, 2);
+ nir_def *msb = nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x),
+ nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 1));
+ nir_def *mod = nir_ixor(b, etc1_alpha_modifier_lookup(b, table, lsb_index), nir_iadd_imm(b, msb, -1));
+ nir_def *a = nir_iadd(b, base, nir_imul(b, mod, multiplier));
+
+ nir_def *low_bound = nir_imm_int(b, 0);
+ nir_def *high_bound = nir_imm_int(b, 255);
+ nir_def *final_mult = nir_imm_float(b, 1 / 255.0);
+ if (eac) {
+ low_bound = nir_bcsel(b, is_signed, nir_imm_int(b, -1023), low_bound);
+ high_bound = nir_bcsel(b, is_signed, nir_imm_int(b, 1023), nir_imm_int(b, 2047));
+ final_mult = nir_bcsel(b, is_signed, nir_imm_float(b, 1 / 1023.0), nir_imm_float(b, 1 / 2047.0));
+ }
+
+ return nir_fmul(b, nir_i2f32(b, nir_iclamp(b, a, low_bound, high_bound)), final_mult);
+}
+
+static nir_def *
+get_global_ids(nir_builder *b, unsigned num_components)
+{
+ unsigned mask = BITFIELD_MASK(num_components);
+
+ nir_def *local_ids = nir_channels(b, nir_load_local_invocation_id(b), mask);
+ nir_def *block_ids = nir_channels(b, nir_load_workgroup_id(b), mask);
+ nir_def *block_size =
+ nir_channels(b,
+ nir_imm_ivec4(b, b->shader->info.workgroup_size[0], b->shader->info.workgroup_size[1],
+ b->shader->info.workgroup_size[2], 0),
+ mask);
+
+ return nir_iadd(b, nir_imul(b, block_ids, block_size), local_ids);
+}
+
+static nir_shader *
+etc2_build_shader(struct vk_device *dev, const struct nir_shader_compiler_options *nir_options)
+{
+ const struct glsl_type *sampler_type_2d = glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, true, GLSL_TYPE_UINT);
+ const struct glsl_type *sampler_type_3d = glsl_sampler_type(GLSL_SAMPLER_DIM_3D, false, false, GLSL_TYPE_UINT);
+ const struct glsl_type *img_type_2d = glsl_image_type(GLSL_SAMPLER_DIM_2D, true, GLSL_TYPE_FLOAT);
+ const struct glsl_type *img_type_3d = glsl_image_type(GLSL_SAMPLER_DIM_3D, false, GLSL_TYPE_FLOAT);
+ nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, nir_options, "meta_decode_etc");
+ b.shader->info.workgroup_size[0] = 8;
+ b.shader->info.workgroup_size[1] = 8;
+
+ nir_variable *input_img_2d = nir_variable_create(b.shader, nir_var_uniform, sampler_type_2d, "s_tex_2d");
+ input_img_2d->data.descriptor_set = 0;
+ input_img_2d->data.binding = 0;
+
+ nir_variable *input_img_3d = nir_variable_create(b.shader, nir_var_uniform, sampler_type_3d, "s_tex_3d");
+ input_img_3d->data.descriptor_set = 0;
+ input_img_3d->data.binding = 0;
+
+ nir_variable *output_img_2d = nir_variable_create(b.shader, nir_var_image, img_type_2d, "out_img_2d");
+ output_img_2d->data.descriptor_set = 0;
+ output_img_2d->data.binding = 1;
+
+ nir_variable *output_img_3d = nir_variable_create(b.shader, nir_var_image, img_type_3d, "out_img_3d");
+ output_img_3d->data.descriptor_set = 0;
+ output_img_3d->data.binding = 1;
+
+ nir_def *global_id = get_global_ids(&b, 3);
+
+ nir_def *consts = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+ nir_def *consts2 = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
+ nir_def *offset = nir_channels(&b, consts, 7);
+ nir_def *format = nir_channel(&b, consts, 3);
+ nir_def *image_type = nir_channel(&b, consts2, 0);
+ nir_def *is_3d = nir_ieq_imm(&b, image_type, VK_IMAGE_TYPE_3D);
+ nir_def *coord = nir_iadd(&b, global_id, offset);
+ nir_def *src_coord = nir_vec3(&b, nir_ushr_imm(&b, nir_channel(&b, coord, 0), 2),
+ nir_ushr_imm(&b, nir_channel(&b, coord, 1), 2), nir_channel(&b, coord, 2));
+
+ nir_variable *payload_var = nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "payload");
+ nir_push_if(&b, is_3d);
+ {
+ nir_def *color = nir_txf_deref(&b, nir_build_deref_var(&b, input_img_3d), src_coord, nir_imm_int(&b, 0));
+ nir_store_var(&b, payload_var, color, 0xf);
+ }
+ nir_push_else(&b, NULL);
+ {
+ nir_def *color = nir_txf_deref(&b, nir_build_deref_var(&b, input_img_2d), src_coord, nir_imm_int(&b, 0));
+ nir_store_var(&b, payload_var, color, 0xf);
+ }
+ nir_pop_if(&b, NULL);
+
+ nir_def *pixel_coord = nir_iand_imm(&b, nir_channels(&b, coord, 3), 3);
+ nir_def *linear_pixel =
+ nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, pixel_coord, 0), 4), nir_channel(&b, pixel_coord, 1));
+
+ nir_def *payload = nir_load_var(&b, payload_var);
+ nir_variable *color = nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "color");
+ nir_store_var(&b, color, nir_imm_vec4(&b, 1.0, 0.0, 0.0, 1.0), 0xf);
+ nir_push_if(&b, nir_ilt_imm(&b, format, VK_FORMAT_EAC_R11_UNORM_BLOCK));
+ {
+ nir_def *alpha_bits_8 = nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK);
+ nir_def *alpha_bits_1 = nir_iand(&b, nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK),
+ nir_ilt_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK));
+
+ nir_def *color_payload =
+ nir_bcsel(&b, alpha_bits_8, nir_channels(&b, payload, 0xC), nir_channels(&b, payload, 3));
+ color_payload = flip_endian(&b, color_payload, 2);
+ nir_def *color_y = nir_channel(&b, color_payload, 0);
+ nir_def *color_x = nir_channel(&b, color_payload, 1);
+ nir_def *flip = nir_test_mask(&b, color_y, 1);
+ nir_def *subblock =
+ nir_ushr_imm(&b, nir_bcsel(&b, flip, nir_channel(&b, pixel_coord, 1), nir_channel(&b, pixel_coord, 0)), 1);
+
+ nir_variable *punchthrough = nir_variable_create(b.shader, nir_var_shader_temp, glsl_bool_type(), "punchthrough");
+ nir_def *punchthrough_init = nir_iand(&b, alpha_bits_1, nir_inot(&b, nir_test_mask(&b, color_y, 2)));
+ nir_store_var(&b, punchthrough, punchthrough_init, 0x1);
+
+ nir_variable *etc1_compat = nir_variable_create(b.shader, nir_var_shader_temp, glsl_bool_type(), "etc1_compat");
+ nir_store_var(&b, etc1_compat, nir_imm_false(&b), 0x1);
+
+ nir_variable *alpha_result =
+ nir_variable_create(b.shader, nir_var_shader_temp, glsl_float_type(), "alpha_result");
+ nir_push_if(&b, alpha_bits_8);
+ {
+ nir_store_var(&b, alpha_result, decode_etc2_alpha(&b, nir_channels(&b, payload, 3), linear_pixel, false, NULL),
+ 1);
+ }
+ nir_push_else(&b, NULL);
+ {
+ nir_store_var(&b, alpha_result, nir_imm_float(&b, 1.0), 1);
+ }
+ nir_pop_if(&b, NULL);
+
+ const struct glsl_type *uvec3_type = glsl_vector_type(GLSL_TYPE_UINT, 3);
+ nir_variable *rgb_result = nir_variable_create(b.shader, nir_var_shader_temp, uvec3_type, "rgb_result");
+ nir_variable *base_rgb = nir_variable_create(b.shader, nir_var_shader_temp, uvec3_type, "base_rgb");
+ nir_store_var(&b, rgb_result, nir_imm_ivec3(&b, 255, 0, 0), 0x7);
+
+ nir_def *msb = nir_iand_imm(&b, nir_ushr(&b, color_x, nir_iadd_imm(&b, linear_pixel, 15)), 2);
+ nir_def *lsb = nir_iand_imm(&b, nir_ushr(&b, color_x, linear_pixel), 1);
+
+ nir_push_if(&b, nir_iand(&b, nir_inot(&b, alpha_bits_1), nir_inot(&b, nir_test_mask(&b, color_y, 2))));
+ {
+ nir_store_var(&b, etc1_compat, nir_imm_true(&b), 1);
+ nir_def *tmp[3];
+ for (unsigned i = 0; i < 3; ++i)
+ tmp[i] = etc_extend(
+ &b,
+ nir_iand_imm(&b, nir_ushr(&b, color_y, nir_isub_imm(&b, 28 - 8 * i, nir_imul_imm(&b, subblock, 4))),
+ 0xf),
+ 4);
+ nir_store_var(&b, base_rgb, nir_vec(&b, tmp, 3), 0x7);
+ }
+ nir_push_else(&b, NULL);
+ {
+ nir_def *rb = nir_ubfe_imm(&b, color_y, 27, 5);
+ nir_def *rd = nir_ibfe_imm(&b, color_y, 24, 3);
+ nir_def *gb = nir_ubfe_imm(&b, color_y, 19, 5);
+ nir_def *gd = nir_ibfe_imm(&b, color_y, 16, 3);
+ nir_def *bb = nir_ubfe_imm(&b, color_y, 11, 5);
+ nir_def *bd = nir_ibfe_imm(&b, color_y, 8, 3);
+ nir_def *r1 = nir_iadd(&b, rb, rd);
+ nir_def *g1 = nir_iadd(&b, gb, gd);
+ nir_def *b1 = nir_iadd(&b, bb, bd);
+
+ nir_push_if(&b, nir_ugt_imm(&b, r1, 31));
+ {
+ nir_def *r0 =
+ nir_ior(&b, nir_ubfe_imm(&b, color_y, 24, 2), nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 27, 2), 2));
+ nir_def *g0 = nir_ubfe_imm(&b, color_y, 20, 4);
+ nir_def *b0 = nir_ubfe_imm(&b, color_y, 16, 4);
+ nir_def *r2 = nir_ubfe_imm(&b, color_y, 12, 4);
+ nir_def *g2 = nir_ubfe_imm(&b, color_y, 8, 4);
+ nir_def *b2 = nir_ubfe_imm(&b, color_y, 4, 4);
+ nir_def *da =
+ nir_ior(&b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 2, 2), 1), nir_iand_imm(&b, color_y, 1));
+ nir_def *dist = etc2_distance_lookup(&b, da);
+ nir_def *index = nir_ior(&b, lsb, msb);
+
+ nir_store_var(&b, punchthrough,
+ nir_iand(&b, nir_load_var(&b, punchthrough), nir_ieq_imm(&b, nir_iadd(&b, lsb, msb), 2)),
+ 0x1);
+ nir_push_if(&b, nir_ieq_imm(&b, index, 0));
+ {
+ nir_store_var(&b, rgb_result, etc_extend(&b, nir_vec3(&b, r0, g0, b0), 4), 0x7);
+ }
+ nir_push_else(&b, NULL);
+ {
+
+ nir_def *tmp = nir_iadd(&b, etc_extend(&b, nir_vec3(&b, r2, g2, b2), 4),
+ nir_imul(&b, dist, nir_isub_imm(&b, 2, index)));
+ nir_store_var(&b, rgb_result, tmp, 0x7);
+ }
+ nir_pop_if(&b, NULL);
+ }
+ nir_push_else(&b, NULL);
+ nir_push_if(&b, nir_ugt_imm(&b, g1, 31));
+ {
+ nir_def *r0 = nir_ubfe_imm(&b, color_y, 27, 4);
+ nir_def *g0 = nir_ior(&b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 24, 3), 1),
+ nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 20), 1));
+ nir_def *b0 =
+ nir_ior(&b, nir_ubfe_imm(&b, color_y, 15, 3), nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 16), 8));
+ nir_def *r2 = nir_ubfe_imm(&b, color_y, 11, 4);
+ nir_def *g2 = nir_ubfe_imm(&b, color_y, 7, 4);
+ nir_def *b2 = nir_ubfe_imm(&b, color_y, 3, 4);
+ nir_def *da = nir_iand_imm(&b, color_y, 4);
+ nir_def *db = nir_iand_imm(&b, color_y, 1);
+ nir_def *d = nir_iadd(&b, da, nir_imul_imm(&b, db, 2));
+ nir_def *d0 = nir_iadd(&b, nir_ishl_imm(&b, r0, 16), nir_iadd(&b, nir_ishl_imm(&b, g0, 8), b0));
+ nir_def *d2 = nir_iadd(&b, nir_ishl_imm(&b, r2, 16), nir_iadd(&b, nir_ishl_imm(&b, g2, 8), b2));
+ d = nir_bcsel(&b, nir_uge(&b, d0, d2), nir_iadd_imm(&b, d, 1), d);
+ nir_def *dist = etc2_distance_lookup(&b, d);
+ nir_def *base = nir_bcsel(&b, nir_ine_imm(&b, msb, 0), nir_vec3(&b, r2, g2, b2), nir_vec3(&b, r0, g0, b0));
+ base = etc_extend(&b, base, 4);
+ base = nir_iadd(&b, base, nir_imul(&b, dist, nir_isub_imm(&b, 1, nir_imul_imm(&b, lsb, 2))));
+ nir_store_var(&b, rgb_result, base, 0x7);
+ nir_store_var(&b, punchthrough,
+ nir_iand(&b, nir_load_var(&b, punchthrough), nir_ieq_imm(&b, nir_iadd(&b, lsb, msb), 2)),
+ 0x1);
+ }
+ nir_push_else(&b, NULL);
+ nir_push_if(&b, nir_ugt_imm(&b, b1, 31));
+ {
+ nir_def *r0 = nir_ubfe_imm(&b, color_y, 25, 6);
+ nir_def *g0 =
+ nir_ior(&b, nir_ubfe_imm(&b, color_y, 17, 6), nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 18), 0x40));
+ nir_def *b0 = nir_ior(
+ &b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 11, 2), 3),
+ nir_ior(&b, nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 11), 0x20), nir_ubfe_imm(&b, color_y, 7, 3)));
+ nir_def *rh =
+ nir_ior(&b, nir_iand_imm(&b, color_y, 1), nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 2, 5), 1));
+ nir_def *rv = nir_ubfe_imm(&b, color_x, 13, 6);
+ nir_def *gh = nir_ubfe_imm(&b, color_x, 25, 7);
+ nir_def *gv = nir_ubfe_imm(&b, color_x, 6, 7);
+ nir_def *bh = nir_ubfe_imm(&b, color_x, 19, 6);
+ nir_def *bv = nir_ubfe_imm(&b, color_x, 0, 6);
+
+ r0 = etc_extend(&b, r0, 6);
+ g0 = etc_extend(&b, g0, 7);
+ b0 = etc_extend(&b, b0, 6);
+ rh = etc_extend(&b, rh, 6);
+ rv = etc_extend(&b, rv, 6);
+ gh = etc_extend(&b, gh, 7);
+ gv = etc_extend(&b, gv, 7);
+ bh = etc_extend(&b, bh, 6);
+ bv = etc_extend(&b, bv, 6);
+
+ nir_def *rgb = nir_vec3(&b, r0, g0, b0);
+ nir_def *dx = nir_imul(&b, nir_isub(&b, nir_vec3(&b, rh, gh, bh), rgb), nir_channel(&b, pixel_coord, 0));
+ nir_def *dy = nir_imul(&b, nir_isub(&b, nir_vec3(&b, rv, gv, bv), rgb), nir_channel(&b, pixel_coord, 1));
+ rgb = nir_iadd(&b, rgb, nir_ishr_imm(&b, nir_iadd_imm(&b, nir_iadd(&b, dx, dy), 2), 2));
+ nir_store_var(&b, rgb_result, rgb, 0x7);
+ nir_store_var(&b, punchthrough, nir_imm_false(&b), 0x1);
+ }
+ nir_push_else(&b, NULL);
+ {
+ nir_store_var(&b, etc1_compat, nir_imm_true(&b), 1);
+ nir_def *subblock_b = nir_ine_imm(&b, subblock, 0);
+ nir_def *tmp[] = {
+ nir_bcsel(&b, subblock_b, r1, rb),
+ nir_bcsel(&b, subblock_b, g1, gb),
+ nir_bcsel(&b, subblock_b, b1, bb),
+ };
+ nir_store_var(&b, base_rgb, etc_extend(&b, nir_vec(&b, tmp, 3), 5), 0x7);
+ }
+ nir_pop_if(&b, NULL);
+ nir_pop_if(&b, NULL);
+ nir_pop_if(&b, NULL);
+ }
+ nir_pop_if(&b, NULL);
+ nir_push_if(&b, nir_load_var(&b, etc1_compat));
+ {
+ nir_def *etc1_table_index =
+ nir_ubfe(&b, color_y, nir_isub_imm(&b, 5, nir_imul_imm(&b, subblock, 3)), nir_imm_int(&b, 3));
+ nir_def *sgn = nir_isub_imm(&b, 1, msb);
+ sgn = nir_bcsel(&b, nir_load_var(&b, punchthrough), nir_imul(&b, sgn, lsb), sgn);
+ nir_store_var(&b, punchthrough,
+ nir_iand(&b, nir_load_var(&b, punchthrough), nir_ieq_imm(&b, nir_iadd(&b, lsb, msb), 2)), 0x1);
+ nir_def *off = nir_imul(&b, etc1_color_modifier_lookup(&b, etc1_table_index, lsb), sgn);
+ nir_def *result = nir_iadd(&b, nir_load_var(&b, base_rgb), off);
+ nir_store_var(&b, rgb_result, result, 0x7);
+ }
+ nir_pop_if(&b, NULL);
+ nir_push_if(&b, nir_load_var(&b, punchthrough));
+ {
+ nir_store_var(&b, alpha_result, nir_imm_float(&b, 0), 0x1);
+ nir_store_var(&b, rgb_result, nir_imm_ivec3(&b, 0, 0, 0), 0x7);
+ }
+ nir_pop_if(&b, NULL);
+ nir_def *col[4];
+ for (unsigned i = 0; i < 3; ++i)
+ col[i] = nir_fdiv_imm(&b, nir_i2f32(&b, nir_channel(&b, nir_load_var(&b, rgb_result), i)), 255.0);
+ col[3] = nir_load_var(&b, alpha_result);
+ nir_store_var(&b, color, nir_vec(&b, col, 4), 0xf);
+ }
+ nir_push_else(&b, NULL);
+ { /* EAC */
+ nir_def *is_signed = nir_ior(&b, nir_ieq_imm(&b, format, VK_FORMAT_EAC_R11_SNORM_BLOCK),
+ nir_ieq_imm(&b, format, VK_FORMAT_EAC_R11G11_SNORM_BLOCK));
+ nir_def *val[4];
+ for (int i = 0; i < 2; ++i) {
+ val[i] = decode_etc2_alpha(&b, nir_channels(&b, payload, 3 << (2 * i)), linear_pixel, true, is_signed);
+ }
+ val[2] = nir_imm_float(&b, 0.0);
+ val[3] = nir_imm_float(&b, 1.0);
+ nir_store_var(&b, color, nir_vec(&b, val, 4), 0xf);
+ }
+ nir_pop_if(&b, NULL);
+
+ nir_def *outval = nir_load_var(&b, color);
+ nir_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), nir_channel(&b, coord, 2),
+ nir_undef(&b, 1, 32));
+
+ nir_push_if(&b, is_3d);
+ {
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->def, img_coord, nir_undef(&b, 1, 32), outval,
+ nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D);
+ }
+ nir_push_else(&b, NULL);
+ {
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->def, img_coord, nir_undef(&b, 1, 32), outval,
+ nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
+ }
+ nir_pop_if(&b, NULL);
+ return b.shader;
+}
+
+static VkResult
+etc2_init_pipeline(struct vk_device *device, struct vk_texcompress_etc2_state *etc2)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ nir_shader *cs = etc2_build_shader(device, etc2->nir_options);
+
+ const VkComputePipelineCreateInfo pipeline_create_info = {
+ .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ .stage =
+ (VkPipelineShaderStageCreateInfo){
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_COMPUTE_BIT,
+ .module = vk_shader_module_handle_from_nir(cs),
+ .pName = "main",
+ },
+ .layout = etc2->pipeline_layout,
+ };
+
+ return disp->CreateComputePipelines(_device, etc2->pipeline_cache, 1, &pipeline_create_info, etc2->allocator,
+ &etc2->pipeline);
+}
+
+static VkResult
+etc2_init_pipeline_layout(struct vk_device *device, struct vk_texcompress_etc2_state *etc2)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ const VkPipelineLayoutCreateInfo pipeline_layout_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = 1,
+ .pSetLayouts = &etc2->ds_layout,
+ .pushConstantRangeCount = 1,
+ .pPushConstantRanges =
+ &(VkPushConstantRange){
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .size = 20,
+ },
+ };
+
+ return disp->CreatePipelineLayout(_device, &pipeline_layout_create_info, etc2->allocator, &etc2->pipeline_layout);
+}
+
+static VkResult
+etc2_init_ds_layout(struct vk_device *device, struct vk_texcompress_etc2_state *etc2)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ const VkDescriptorSetLayoutCreateInfo ds_layout_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
+ .bindingCount = 2,
+ .pBindings =
+ (VkDescriptorSetLayoutBinding[]){
+ {
+ .binding = 0,
+ .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ },
+ {
+ .binding = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ },
+ },
+ };
+
+ return disp->CreateDescriptorSetLayout(_device, &ds_layout_create_info, etc2->allocator, &etc2->ds_layout);
+}
+
+void
+vk_texcompress_etc2_init(struct vk_device *device, struct vk_texcompress_etc2_state *etc2)
+{
+ simple_mtx_init(&etc2->mutex, mtx_plain);
+}
+
+VkResult
+vk_texcompress_etc2_late_init(struct vk_device *device, struct vk_texcompress_etc2_state *etc2)
+{
+ VkResult result = VK_SUCCESS;
+
+ simple_mtx_lock(&etc2->mutex);
+
+ if (!etc2->pipeline) {
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ result = etc2_init_ds_layout(device, etc2);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ result = etc2_init_pipeline_layout(device, etc2);
+ if (result != VK_SUCCESS) {
+ disp->DestroyDescriptorSetLayout(_device, etc2->ds_layout, etc2->allocator);
+ goto out;
+ }
+
+ result = etc2_init_pipeline(device, etc2);
+ if (result != VK_SUCCESS) {
+ disp->DestroyPipelineLayout(_device, etc2->pipeline_layout, etc2->allocator);
+ disp->DestroyDescriptorSetLayout(_device, etc2->ds_layout, etc2->allocator);
+ goto out;
+ }
+ }
+
+out:
+ simple_mtx_unlock(&etc2->mutex);
+ return result;
+}
+
+void
+vk_texcompress_etc2_finish(struct vk_device *device, struct vk_texcompress_etc2_state *etc2)
+{
+ const struct vk_device_dispatch_table *disp = &device->dispatch_table;
+ VkDevice _device = vk_device_to_handle(device);
+
+ if (etc2->pipeline != VK_NULL_HANDLE)
+ disp->DestroyPipeline(_device, etc2->pipeline, etc2->allocator);
+
+ if (etc2->pipeline_layout != VK_NULL_HANDLE)
+ disp->DestroyPipelineLayout(_device, etc2->pipeline_layout, etc2->allocator);
+ if (etc2->ds_layout != VK_NULL_HANDLE)
+ disp->DestroyDescriptorSetLayout(_device, etc2->ds_layout, etc2->allocator);
+
+ simple_mtx_destroy(&etc2->mutex);
+}
diff --git a/src/vulkan/runtime/vk_texcompress_etc2.h b/src/vulkan/runtime/vk_texcompress_etc2.h
new file mode 100644
index 00000000000..f8e6269ebec
--- /dev/null
+++ b/src/vulkan/runtime/vk_texcompress_etc2.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2023 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef VK_TEXCOMPRESS_ETC2_H
+#define VK_TEXCOMPRESS_ETC2_H
+
+#include "util/simple_mtx.h"
+
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct nir_shader_compiler_options;
+
+struct vk_texcompress_etc2_state {
+ /* these are specified by the driver */
+ const VkAllocationCallbacks *allocator;
+ const struct nir_shader_compiler_options *nir_options;
+ VkPipelineCache pipeline_cache;
+
+ /*
+ * The pipeline is a compute pipeline with
+ *
+ * - layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+ * - layout(set = 0, binding = 0) uniform utexture2DArray s_tex_2d;
+ * - layout(set = 0, binding = 0) uniform utexture3D s_tex_3d;
+ * - layout(set = 0, binding = 1) uniform image2DArray out_img_2d;
+ * - layout(set = 0, binding = 1) uniform image3D out_img_3d;
+ * - layout(push_constant) uniform Registers {
+ * ivec3 offset;
+ * int vk_format;
+ * int vk_image_type;
+ * } registers;
+ *
+ * There are other implications, such as
+ *
+ * - to make sure vkCmdCopyBufferToImage and vkCmdCopyImage are the only
+ * means to initialize the image data,
+ * - the format feature flags should not include flags that allow
+ * modifying the image data
+ * - the image tiling should be VK_IMAGE_TILING_OPTIMAL
+ * - the image usage flags should not include
+ * VK_IMAGE_USAGE_STORAGE_BIT, which can be made valid via
+ * VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
+ * - the image create flags are assumed to include
+ * VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT and
+ * VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT
+ * - the image usage flags are assumed to include
+ * VK_IMAGE_USAGE_SAMPLED_BIT (for src) or VK_IMAGE_USAGE_STORAGE_BIT
+ * (for dst)
+ */
+ simple_mtx_t mutex;
+ VkDescriptorSetLayout ds_layout;
+ VkPipelineLayout pipeline_layout;
+ VkPipeline pipeline;
+};
+
+void vk_texcompress_etc2_init(struct vk_device *device, struct vk_texcompress_etc2_state *etc2);
+
+VkResult vk_texcompress_etc2_late_init(struct vk_device *device, struct vk_texcompress_etc2_state *etc2);
+
+void vk_texcompress_etc2_finish(struct vk_device *device, struct vk_texcompress_etc2_state *etc2);
+
+static inline VkImageViewType
+vk_texcompress_etc2_image_view_type(VkImageType image_type)
+{
+ switch (image_type) {
+ case VK_IMAGE_TYPE_2D:
+ return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case VK_IMAGE_TYPE_3D:
+ return VK_IMAGE_VIEW_TYPE_3D;
+ default:
+ unreachable("bad image type");
+ }
+}
+
+static inline VkFormat
+vk_texcompress_etc2_emulation_format(VkFormat etc2_format)
+{
+ switch (etc2_format) {
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ return VK_FORMAT_R8G8B8A8_SRGB;
+ case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+ return VK_FORMAT_R16_UNORM;
+ case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+ return VK_FORMAT_R16_SNORM;
+ case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ return VK_FORMAT_R16G16_UNORM;
+ case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+ return VK_FORMAT_R16G16_SNORM;
+ default:
+ return VK_FORMAT_UNDEFINED;
+ }
+}
+
+static inline VkFormat
+vk_texcompress_etc2_load_format(VkFormat etc2_format)
+{
+ return vk_format_get_blocksize(etc2_format) == 16 ? VK_FORMAT_R32G32B32A32_UINT : VK_FORMAT_R32G32_UINT;
+}
+
+static inline VkFormat
+vk_texcompress_etc2_store_format(VkFormat etc2_format)
+{
+ VkFormat format = vk_texcompress_etc2_emulation_format(etc2_format);
+ if (format == VK_FORMAT_R8G8B8A8_SRGB)
+ format = VK_FORMAT_R8G8B8A8_UNORM;
+ return format;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_TEXCOMPRESS_ETC2_H */
diff --git a/src/vulkan/runtime/vk_video.c b/src/vulkan/runtime/vk_video.c
new file mode 100644
index 00000000000..cf96c1e64de
--- /dev/null
+++ b/src/vulkan/runtime/vk_video.c
@@ -0,0 +1,2072 @@
+/*
+ * Copyright © 2021 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_video.h"
+#include "vk_util.h"
+#include "vk_log.h"
+#include "vk_alloc.h"
+#include "vk_device.h"
+#include "util/vl_rbsp.h"
+#include "util/vl_bitstream.h"
+
+VkResult
+vk_video_session_init(struct vk_device *device,
+ struct vk_video_session *vid,
+ const VkVideoSessionCreateInfoKHR *create_info)
+{
+ vk_object_base_init(device, &vid->base, VK_OBJECT_TYPE_VIDEO_SESSION_KHR);
+
+ vid->flags = create_info->flags;
+ vid->op = create_info->pVideoProfile->videoCodecOperation;
+ vid->max_coded = create_info->maxCodedExtent;
+ vid->picture_format = create_info->pictureFormat;
+ vid->ref_format = create_info->referencePictureFormat;
+ vid->max_dpb_slots = create_info->maxDpbSlots;
+ vid->max_active_ref_pics = create_info->maxActiveReferencePictures;
+
+ switch (vid->op) {
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR: {
+ const struct VkVideoDecodeH264ProfileInfoKHR *h264_profile =
+ vk_find_struct_const(create_info->pVideoProfile->pNext,
+ VIDEO_DECODE_H264_PROFILE_INFO_KHR);
+ vid->h264.profile_idc = h264_profile->stdProfileIdc;
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR: {
+ const struct VkVideoDecodeH265ProfileInfoKHR *h265_profile =
+ vk_find_struct_const(create_info->pVideoProfile->pNext,
+ VIDEO_DECODE_H265_PROFILE_INFO_KHR);
+ vid->h265.profile_idc = h265_profile->stdProfileIdc;
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_DECODE_AV1_BIT_KHR: {
+ const struct VkVideoDecodeAV1ProfileInfoKHR *av1_profile =
+ vk_find_struct_const(create_info->pVideoProfile->pNext,
+ VIDEO_DECODE_AV1_PROFILE_INFO_KHR);
+ vid->av1.profile = av1_profile->stdProfile;
+ vid->av1.film_grain_support = av1_profile->filmGrainSupport;
+ break;
+ };
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR: {
+ const struct VkVideoEncodeH264ProfileInfoKHR *h264_profile =
+ vk_find_struct_const(create_info->pVideoProfile->pNext, VIDEO_ENCODE_H264_PROFILE_INFO_KHR);
+ vid->h264.profile_idc = h264_profile->stdProfileIdc;
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR: {
+ const struct VkVideoEncodeH265ProfileInfoKHR *h265_profile =
+ vk_find_struct_const(create_info->pVideoProfile->pNext, VIDEO_ENCODE_H265_PROFILE_INFO_KHR);
+ vid->h265.profile_idc = h265_profile->stdProfileIdc;
+ break;
+ }
+ default:
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ if (vid->op == VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR ||
+ vid->op == VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR) {
+ const struct VkVideoEncodeUsageInfoKHR *encode_usage_profile =
+ vk_find_struct_const(create_info->pVideoProfile->pNext, VIDEO_ENCODE_USAGE_INFO_KHR);
+ if (encode_usage_profile) {
+ vid->enc_usage.video_usage_hints = encode_usage_profile->videoUsageHints;
+ vid->enc_usage.video_content_hints = encode_usage_profile->videoContentHints;
+ vid->enc_usage.tuning_mode = encode_usage_profile->tuningMode;
+ } else {
+ vid->enc_usage.video_usage_hints = VK_VIDEO_ENCODE_USAGE_DEFAULT_KHR;
+ vid->enc_usage.video_content_hints = VK_VIDEO_ENCODE_CONTENT_DEFAULT_KHR;
+ vid->enc_usage.tuning_mode = VK_VIDEO_ENCODE_TUNING_MODE_DEFAULT_KHR;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static void
+vk_video_deep_copy_h264_sps(struct vk_video_h264_sps *dst,
+ const StdVideoH264SequenceParameterSet *src)
+{
+ memcpy(&dst->base, src, sizeof(StdVideoH264SequenceParameterSet));
+ if (src->num_ref_frames_in_pic_order_cnt_cycle && src->pOffsetForRefFrame) {
+ memcpy(dst->offsets_for_ref_frame, src->pOffsetForRefFrame, sizeof(int32_t) * src->num_ref_frames_in_pic_order_cnt_cycle);
+ dst->base.pOffsetForRefFrame = dst->offsets_for_ref_frame;
+ }
+ if (src->flags.seq_scaling_matrix_present_flag && src->pScalingLists) {
+ memcpy(&dst->scaling_lists, src->pScalingLists, sizeof(StdVideoH264ScalingLists));
+ dst->base.pScalingLists = &dst->scaling_lists;
+ }
+ if (src->flags.vui_parameters_present_flag && src->pSequenceParameterSetVui) {
+ memcpy(&dst->vui, src->pSequenceParameterSetVui, sizeof(StdVideoH264SequenceParameterSetVui));
+ dst->base.pSequenceParameterSetVui = &dst->vui;
+
+ if (src->pSequenceParameterSetVui->pHrdParameters) {
+ memcpy(&dst->vui_hrd_parameters, src->pSequenceParameterSetVui->pHrdParameters,
+ sizeof(StdVideoH264HrdParameters));
+ dst->vui.pHrdParameters = &dst->vui_hrd_parameters;
+ }
+ }
+}
+
+static void
+vk_video_deep_copy_h264_pps(struct vk_video_h264_pps *dst,
+ const StdVideoH264PictureParameterSet *src)
+{
+ memcpy(&dst->base, src, sizeof(StdVideoH264PictureParameterSet));
+ if (src->flags.pic_scaling_matrix_present_flag && src->pScalingLists) {
+ memcpy(&dst->scaling_lists, src->pScalingLists, sizeof(StdVideoH264ScalingLists));
+ dst->base.pScalingLists = &dst->scaling_lists;
+ }
+}
+
+static void
+vk_video_deep_copy_h265_vps(struct vk_video_h265_vps *dst,
+ const StdVideoH265VideoParameterSet *src)
+{
+ memcpy(&dst->base, src, sizeof(StdVideoH265VideoParameterSet));
+ if (src->pDecPicBufMgr) {
+ memcpy(&dst->dec_pic_buf_mgr, src->pDecPicBufMgr, sizeof(StdVideoH265DecPicBufMgr));
+ dst->base.pDecPicBufMgr = &dst->dec_pic_buf_mgr;
+ }
+ if (src->pHrdParameters) {
+ memcpy(&dst->hrd_parameters, src->pHrdParameters, sizeof(StdVideoH265HrdParameters));
+ dst->base.pHrdParameters = &dst->hrd_parameters;
+ if (src->pHrdParameters->pSubLayerHrdParametersNal) {
+ memcpy(&dst->hrd_parameters_nal, src->pHrdParameters->pSubLayerHrdParametersNal,
+ sizeof(StdVideoH265SubLayerHrdParameters));
+ dst->hrd_parameters.pSubLayerHrdParametersNal = &dst->hrd_parameters_nal;
+ }
+ if (src->pHrdParameters->pSubLayerHrdParametersVcl) {
+ memcpy(&dst->hrd_parameters_vcl, src->pHrdParameters->pSubLayerHrdParametersVcl,
+ sizeof(StdVideoH265SubLayerHrdParameters));
+ dst->hrd_parameters.pSubLayerHrdParametersVcl = &dst->hrd_parameters_vcl;
+ }
+ }
+
+ if (src->pProfileTierLevel) {
+ memcpy(&dst->tier_level, src->pProfileTierLevel, sizeof(StdVideoH265ProfileTierLevel));
+ dst->base.pProfileTierLevel = &dst->tier_level;
+ }
+}
+
+static void
+vk_video_deep_copy_h265_sps(struct vk_video_h265_sps *dst,
+ const StdVideoH265SequenceParameterSet *src)
+{
+ memcpy(&dst->base, src, sizeof(StdVideoH265SequenceParameterSet));
+ if (src->pProfileTierLevel) {
+ memcpy(&dst->tier_level, src->pProfileTierLevel, sizeof(StdVideoH265ProfileTierLevel));
+ dst->base.pProfileTierLevel = &dst->tier_level;
+ }
+ if (src->pDecPicBufMgr) {
+ memcpy(&dst->dec_pic_buf_mgr, src->pDecPicBufMgr, sizeof(StdVideoH265DecPicBufMgr));
+ dst->base.pDecPicBufMgr = &dst->dec_pic_buf_mgr;
+ }
+ if (src->flags.sps_scaling_list_data_present_flag && src->pScalingLists) {
+ memcpy(&dst->scaling_lists, src->pScalingLists, sizeof(StdVideoH265ScalingLists));
+ dst->base.pScalingLists = &dst->scaling_lists;
+ }
+
+ if (src->pShortTermRefPicSet) {
+ memcpy(&dst->short_term_ref_pic_set, src->pShortTermRefPicSet, sizeof(StdVideoH265ShortTermRefPicSet));
+ dst->base.pShortTermRefPicSet = &dst->short_term_ref_pic_set;
+ }
+
+ if (src->pLongTermRefPicsSps) {
+ memcpy(&dst->long_term_ref_pics_sps, src->pLongTermRefPicsSps, sizeof(StdVideoH265LongTermRefPicsSps));
+ dst->base.pLongTermRefPicsSps = &dst->long_term_ref_pics_sps;
+ }
+
+ if (src->pSequenceParameterSetVui) {
+ memcpy(&dst->vui, src->pSequenceParameterSetVui, sizeof(StdVideoH265SequenceParameterSetVui));
+ dst->base.pSequenceParameterSetVui = &dst->vui;
+
+ if (src->pSequenceParameterSetVui->pHrdParameters) {
+ memcpy(&dst->hrd_parameters, src->pSequenceParameterSetVui->pHrdParameters, sizeof(StdVideoH265HrdParameters));
+ dst->vui.pHrdParameters = &dst->hrd_parameters;
+ if (src->pSequenceParameterSetVui->pHrdParameters->pSubLayerHrdParametersNal) {
+ memcpy(&dst->hrd_parameters_nal, src->pSequenceParameterSetVui->pHrdParameters->pSubLayerHrdParametersNal,
+ sizeof(StdVideoH265SubLayerHrdParameters));
+ dst->hrd_parameters.pSubLayerHrdParametersNal = &dst->hrd_parameters_nal;
+ }
+ if (src->pSequenceParameterSetVui->pHrdParameters->pSubLayerHrdParametersVcl) {
+ memcpy(&dst->hrd_parameters_vcl, src->pSequenceParameterSetVui->pHrdParameters->pSubLayerHrdParametersVcl,
+ sizeof(StdVideoH265SubLayerHrdParameters));
+ dst->hrd_parameters.pSubLayerHrdParametersVcl = &dst->hrd_parameters_vcl;
+ }
+ }
+ }
+ if (src->flags.sps_palette_predictor_initializers_present_flag && src->pPredictorPaletteEntries) {
+ memcpy(&dst->palette_entries, src->pPredictorPaletteEntries, sizeof(StdVideoH265PredictorPaletteEntries));
+ dst->base.pPredictorPaletteEntries = &dst->palette_entries;
+ }
+}
+
+static void
+vk_video_deep_copy_h265_pps(struct vk_video_h265_pps *dst,
+ const StdVideoH265PictureParameterSet *src)
+{
+ memcpy(&dst->base, src, sizeof(StdVideoH265PictureParameterSet));
+ if (src->flags.pps_scaling_list_data_present_flag && src->pScalingLists) {
+ memcpy(&dst->scaling_lists, src->pScalingLists, sizeof(StdVideoH265ScalingLists));
+ dst->base.pScalingLists = &dst->scaling_lists;
+ }
+
+ if (src->flags.pps_palette_predictor_initializers_present_flag && src->pPredictorPaletteEntries) {
+ memcpy(&dst->palette_entries, src->pPredictorPaletteEntries, sizeof(StdVideoH265PredictorPaletteEntries));
+ dst->base.pPredictorPaletteEntries = &dst->palette_entries;
+ }
+}
+
+
+#define FIND(PARAMSET, SS, SET, ID) \
+ static struct vk_video_##SET *find_##SS##_##SET(const struct vk_video_session_parameters *params, uint32_t id) { \
+ for (unsigned i = 0; i < params->SS.SET##_count; i++) { \
+ if (params->SS.SET[i].base.ID == id) \
+ return &params->SS.SET[i]; \
+ } \
+ return NULL; \
+ } \
+ \
+ static void add_##SS##_##SET(struct vk_video_session_parameters *params, \
+ const PARAMSET *new_set, bool noreplace) { \
+ struct vk_video_##SET *set = find_##SS##_##SET(params, new_set->ID); \
+ if (set) { \
+ if (noreplace) \
+ return; \
+ vk_video_deep_copy_##SET(set, new_set); \
+ } else \
+ vk_video_deep_copy_##SET(&params->SS.SET[params->SS.SET##_count++], new_set); \
+ } \
+ \
+ static VkResult update_##SS##_##SET(struct vk_video_session_parameters *params, \
+ uint32_t count, const PARAMSET *updates) { \
+ if (params->SS.SET##_count + count >= params->SS.max_##SET##_count) \
+ return VK_ERROR_TOO_MANY_OBJECTS; \
+ for (unsigned _c = 0; _c < count; _c++) \
+ vk_video_deep_copy_##SET(&params->SS.SET[params->SS.SET##_count + _c], &updates[_c]); \
+ params->SS.SET##_count += count; \
+ return VK_SUCCESS; \
+ }
+
+FIND(StdVideoH264SequenceParameterSet, h264_dec, h264_sps, seq_parameter_set_id)
+FIND(StdVideoH264PictureParameterSet, h264_dec, h264_pps, pic_parameter_set_id)
+FIND(StdVideoH265VideoParameterSet, h265_dec, h265_vps, vps_video_parameter_set_id)
+FIND(StdVideoH265SequenceParameterSet, h265_dec, h265_sps, sps_seq_parameter_set_id)
+FIND(StdVideoH265PictureParameterSet, h265_dec, h265_pps, pps_pic_parameter_set_id)
+
+FIND(StdVideoH264SequenceParameterSet, h264_enc, h264_sps, seq_parameter_set_id)
+FIND(StdVideoH264PictureParameterSet, h264_enc, h264_pps, pic_parameter_set_id)
+
+FIND(StdVideoH265VideoParameterSet, h265_enc, h265_vps, vps_video_parameter_set_id)
+FIND(StdVideoH265SequenceParameterSet, h265_enc, h265_sps, sps_seq_parameter_set_id)
+FIND(StdVideoH265PictureParameterSet, h265_enc, h265_pps, pps_pic_parameter_set_id)
+
+static void
+init_add_h264_dec_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoDecodeH264SessionParametersAddInfoKHR *h264_add,
+ const struct vk_video_session_parameters *templ)
+{
+ unsigned i;
+
+ if (h264_add) {
+ for (i = 0; i < h264_add->stdSPSCount; i++) {
+ add_h264_dec_h264_sps(params, &h264_add->pStdSPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h264_dec.h264_sps_count; i++) {
+ add_h264_dec_h264_sps(params, &templ->h264_dec.h264_sps[i].base, true);
+ }
+ }
+
+ if (h264_add) {
+ for (i = 0; i < h264_add->stdPPSCount; i++) {
+ add_h264_dec_h264_pps(params, &h264_add->pStdPPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h264_dec.h264_pps_count; i++) {
+ add_h264_dec_h264_pps(params, &templ->h264_dec.h264_pps[i].base, true);
+ }
+ }
+}
+
+static void
+init_add_h264_enc_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoEncodeH264SessionParametersAddInfoKHR *h264_add,
+ const struct vk_video_session_parameters *templ)
+{
+ unsigned i;
+ if (h264_add) {
+ for (i = 0; i < h264_add->stdSPSCount; i++) {
+ add_h264_enc_h264_sps(params, &h264_add->pStdSPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h264_dec.h264_sps_count; i++) {
+ add_h264_enc_h264_sps(params, &templ->h264_enc.h264_sps[i].base, true);
+ }
+ }
+
+ if (h264_add) {
+ for (i = 0; i < h264_add->stdPPSCount; i++) {
+ add_h264_enc_h264_pps(params, &h264_add->pStdPPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h264_enc.h264_pps_count; i++) {
+ add_h264_enc_h264_pps(params, &templ->h264_enc.h264_pps[i].base, true);
+ }
+ }
+}
+
+static void
+init_add_h265_dec_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoDecodeH265SessionParametersAddInfoKHR *h265_add,
+ const struct vk_video_session_parameters *templ)
+{
+ unsigned i;
+
+ if (h265_add) {
+ for (i = 0; i < h265_add->stdVPSCount; i++) {
+ add_h265_dec_h265_vps(params, &h265_add->pStdVPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h265_dec.h265_vps_count; i++) {
+ add_h265_dec_h265_vps(params, &templ->h265_dec.h265_vps[i].base, true);
+ }
+ }
+ if (h265_add) {
+ for (i = 0; i < h265_add->stdSPSCount; i++) {
+ add_h265_dec_h265_sps(params, &h265_add->pStdSPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h265_dec.h265_sps_count; i++) {
+ add_h265_dec_h265_sps(params, &templ->h265_dec.h265_sps[i].base, true);
+ }
+ }
+
+ if (h265_add) {
+ for (i = 0; i < h265_add->stdPPSCount; i++) {
+ add_h265_dec_h265_pps(params, &h265_add->pStdPPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h265_dec.h265_pps_count; i++) {
+ add_h265_dec_h265_pps(params, &templ->h265_dec.h265_pps[i].base, true);
+ }
+ }
+}
+
+static void
+init_add_h265_enc_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoEncodeH265SessionParametersAddInfoKHR *h265_add,
+ const struct vk_video_session_parameters *templ)
+{
+ unsigned i;
+
+ if (h265_add) {
+ for (i = 0; i < h265_add->stdVPSCount; i++) {
+ add_h265_enc_h265_vps(params, &h265_add->pStdVPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h265_enc.h265_vps_count; i++) {
+ add_h265_enc_h265_vps(params, &templ->h265_enc.h265_vps[i].base, true);
+ }
+ }
+ if (h265_add) {
+ for (i = 0; i < h265_add->stdSPSCount; i++) {
+ add_h265_enc_h265_sps(params, &h265_add->pStdSPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h265_enc.h265_sps_count; i++) {
+ add_h265_enc_h265_sps(params, &templ->h265_enc.h265_sps[i].base, true);
+ }
+ }
+
+ if (h265_add) {
+ for (i = 0; i < h265_add->stdPPSCount; i++) {
+ add_h265_enc_h265_pps(params, &h265_add->pStdPPSs[i], false);
+ }
+ }
+ if (templ) {
+ for (i = 0; i < templ->h265_enc.h265_pps_count; i++) {
+ add_h265_enc_h265_pps(params, &templ->h265_enc.h265_pps[i].base, true);
+ }
+ }
+}
+
+static void
+vk_video_deep_copy_av1_seq_hdr(struct vk_video_av1_seq_hdr *dst,
+ const StdVideoAV1SequenceHeader *src)
+{
+ memcpy(&dst->base, src, sizeof(StdVideoAV1SequenceHeader));
+ if (src->pColorConfig) {
+ memcpy(&dst->color_config, src->pColorConfig, sizeof(StdVideoAV1ColorConfig));
+ dst->base.pColorConfig = &dst->color_config;
+ }
+ if (src->pTimingInfo) {
+ memcpy(&dst->timing_info, src->pTimingInfo, sizeof(StdVideoAV1TimingInfo));
+ dst->base.pTimingInfo = &dst->timing_info;
+ }
+}
+
+VkResult
+vk_video_session_parameters_init(struct vk_device *device,
+ struct vk_video_session_parameters *params,
+ const struct vk_video_session *vid,
+ const struct vk_video_session_parameters *templ,
+ const VkVideoSessionParametersCreateInfoKHR *create_info)
+{
+ memset(params, 0, sizeof(*params));
+ vk_object_base_init(device, &params->base, VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR);
+
+ params->op = vid->op;
+
+ switch (vid->op) {
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR: {
+ const struct VkVideoDecodeH264SessionParametersCreateInfoKHR *h264_create =
+ vk_find_struct_const(create_info->pNext, VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR);
+
+ params->h264_dec.max_h264_sps_count = h264_create->maxStdSPSCount;
+ params->h264_dec.max_h264_pps_count = h264_create->maxStdPPSCount;
+
+ uint32_t sps_size = params->h264_dec.max_h264_sps_count * sizeof(struct vk_video_h264_sps);
+ uint32_t pps_size = params->h264_dec.max_h264_pps_count * sizeof(struct vk_video_h264_pps);
+
+ params->h264_dec.h264_sps = vk_alloc(&device->alloc, sps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ params->h264_dec.h264_pps = vk_alloc(&device->alloc, pps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!params->h264_dec.h264_sps || !params->h264_dec.h264_pps) {
+ vk_free(&device->alloc, params->h264_dec.h264_sps);
+ vk_free(&device->alloc, params->h264_dec.h264_pps);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ init_add_h264_dec_session_parameters(params, h264_create->pParametersAddInfo, templ);
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR: {
+ const struct VkVideoDecodeH265SessionParametersCreateInfoKHR *h265_create =
+ vk_find_struct_const(create_info->pNext, VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR);
+
+ params->h265_dec.max_h265_vps_count = h265_create->maxStdVPSCount;
+ params->h265_dec.max_h265_sps_count = h265_create->maxStdSPSCount;
+ params->h265_dec.max_h265_pps_count = h265_create->maxStdPPSCount;
+
+ uint32_t vps_size = params->h265_dec.max_h265_vps_count * sizeof(struct vk_video_h265_vps);
+ uint32_t sps_size = params->h265_dec.max_h265_sps_count * sizeof(struct vk_video_h265_sps);
+ uint32_t pps_size = params->h265_dec.max_h265_pps_count * sizeof(struct vk_video_h265_pps);
+
+ params->h265_dec.h265_vps = vk_alloc(&device->alloc, vps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ params->h265_dec.h265_sps = vk_alloc(&device->alloc, sps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ params->h265_dec.h265_pps = vk_alloc(&device->alloc, pps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!params->h265_dec.h265_sps || !params->h265_dec.h265_pps || !params->h265_dec.h265_vps) {
+ vk_free(&device->alloc, params->h265_dec.h265_vps);
+ vk_free(&device->alloc, params->h265_dec.h265_sps);
+ vk_free(&device->alloc, params->h265_dec.h265_pps);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ init_add_h265_dec_session_parameters(params, h265_create->pParametersAddInfo, templ);
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_DECODE_AV1_BIT_KHR: {
+ const struct VkVideoDecodeAV1SessionParametersCreateInfoKHR *av1_create =
+ vk_find_struct_const(create_info->pNext, VIDEO_DECODE_AV1_SESSION_PARAMETERS_CREATE_INFO_KHR);
+ if (av1_create && av1_create->pStdSequenceHeader) {
+ vk_video_deep_copy_av1_seq_hdr(&params->av1_dec.seq_hdr,
+ av1_create->pStdSequenceHeader);
+ }
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR: {
+ const struct VkVideoEncodeH264SessionParametersCreateInfoKHR *h264_create =
+ vk_find_struct_const(create_info->pNext, VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR);
+
+ params->h264_enc.max_h264_sps_count = h264_create->maxStdSPSCount;
+ params->h264_enc.max_h264_pps_count = h264_create->maxStdPPSCount;
+
+ uint32_t sps_size = params->h264_enc.max_h264_sps_count * sizeof(struct vk_video_h264_sps);
+ uint32_t pps_size = params->h264_enc.max_h264_pps_count * sizeof(struct vk_video_h264_pps);
+
+ params->h264_enc.h264_sps = vk_alloc(&device->alloc, sps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ params->h264_enc.h264_pps = vk_alloc(&device->alloc, pps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!params->h264_enc.h264_sps || !params->h264_enc.h264_pps) {
+ vk_free(&device->alloc, params->h264_enc.h264_sps);
+ vk_free(&device->alloc, params->h264_enc.h264_pps);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ params->h264_enc.profile_idc = vid->h264.profile_idc;
+ init_add_h264_enc_session_parameters(params, h264_create->pParametersAddInfo, templ);
+ break;
+ }
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR: {
+ const struct VkVideoEncodeH265SessionParametersCreateInfoKHR *h265_create =
+ vk_find_struct_const(create_info->pNext, VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR);
+
+ params->h265_enc.max_h265_vps_count = h265_create->maxStdVPSCount;
+ params->h265_enc.max_h265_sps_count = h265_create->maxStdSPSCount;
+ params->h265_enc.max_h265_pps_count = h265_create->maxStdPPSCount;
+
+ uint32_t vps_size = params->h265_enc.max_h265_vps_count * sizeof(struct vk_video_h265_vps);
+ uint32_t sps_size = params->h265_enc.max_h265_sps_count * sizeof(struct vk_video_h265_sps);
+ uint32_t pps_size = params->h265_enc.max_h265_pps_count * sizeof(struct vk_video_h265_pps);
+
+ params->h265_enc.h265_vps = vk_alloc(&device->alloc, vps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ params->h265_enc.h265_sps = vk_alloc(&device->alloc, sps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ params->h265_enc.h265_pps = vk_alloc(&device->alloc, pps_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!params->h265_enc.h265_sps || !params->h265_enc.h265_pps || !params->h265_enc.h265_vps) {
+ vk_free(&device->alloc, params->h265_enc.h265_vps);
+ vk_free(&device->alloc, params->h265_enc.h265_sps);
+ vk_free(&device->alloc, params->h265_enc.h265_pps);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ init_add_h265_enc_session_parameters(params, h265_create->pParametersAddInfo, templ);
+ break;
+ }
+ default:
+ unreachable("Unsupported video codec operation");
+ break;
+ }
+ return VK_SUCCESS;
+}
+
+void
+vk_video_session_parameters_finish(struct vk_device *device,
+ struct vk_video_session_parameters *params)
+{
+ switch (params->op) {
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR:
+ vk_free(&device->alloc, params->h264_dec.h264_sps);
+ vk_free(&device->alloc, params->h264_dec.h264_pps);
+ break;
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR:
+ vk_free(&device->alloc, params->h265_dec.h265_vps);
+ vk_free(&device->alloc, params->h265_dec.h265_sps);
+ vk_free(&device->alloc, params->h265_dec.h265_pps);
+ break;
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR:
+ vk_free(&device->alloc, params->h264_enc.h264_sps);
+ vk_free(&device->alloc, params->h264_enc.h264_pps);
+ break;
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR:
+ vk_free(&device->alloc, params->h265_enc.h265_vps);
+ vk_free(&device->alloc, params->h265_enc.h265_sps);
+ vk_free(&device->alloc, params->h265_enc.h265_pps);
+ break;
+ default:
+ break;
+ }
+ vk_object_base_finish(&params->base);
+}
+
+static VkResult
+update_h264_dec_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoDecodeH264SessionParametersAddInfoKHR *h264_add)
+{
+ VkResult result = VK_SUCCESS;
+
+ result = update_h264_dec_h264_sps(params, h264_add->stdSPSCount, h264_add->pStdSPSs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = update_h264_dec_h264_pps(params, h264_add->stdPPSCount, h264_add->pStdPPSs);
+ return result;
+}
+
+static VkResult
+update_h264_enc_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoEncodeH264SessionParametersAddInfoKHR *h264_add)
+{
+ VkResult result = VK_SUCCESS;
+ result = update_h264_enc_h264_sps(params, h264_add->stdSPSCount, h264_add->pStdSPSs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = update_h264_enc_h264_pps(params, h264_add->stdPPSCount, h264_add->pStdPPSs);
+ return result;
+}
+
+static VkResult
+update_h265_enc_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoEncodeH265SessionParametersAddInfoKHR *h265_add)
+{
+ VkResult result = VK_SUCCESS;
+
+ result = update_h265_enc_h265_vps(params, h265_add->stdVPSCount, h265_add->pStdVPSs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = update_h265_enc_h265_sps(params, h265_add->stdSPSCount, h265_add->pStdSPSs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = update_h265_enc_h265_pps(params, h265_add->stdPPSCount, h265_add->pStdPPSs);
+ return result;
+}
+
+static VkResult
+update_h265_session_parameters(struct vk_video_session_parameters *params,
+ const struct VkVideoDecodeH265SessionParametersAddInfoKHR *h265_add)
+{
+ VkResult result = VK_SUCCESS;
+ result = update_h265_dec_h265_vps(params, h265_add->stdVPSCount, h265_add->pStdVPSs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = update_h265_dec_h265_sps(params, h265_add->stdSPSCount, h265_add->pStdSPSs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = update_h265_dec_h265_pps(params, h265_add->stdPPSCount, h265_add->pStdPPSs);
+ return result;
+}
+
+VkResult
+vk_video_session_parameters_update(struct vk_video_session_parameters *params,
+ const VkVideoSessionParametersUpdateInfoKHR *update)
+{
+ /* 39.6.5. Decoder Parameter Sets -
+ * "The provided H.264 SPS/PPS parameters must be within the limits specified during decoder
+ * creation for the decoder specified in VkVideoSessionParametersCreateInfoKHR."
+ */
+
+ /*
+ * There is no need to deduplicate here.
+ * videoSessionParameters must not already contain a StdVideoH264PictureParameterSet entry with
+ * both seq_parameter_set_id and pic_parameter_set_id matching any of the elements of
+ * VkVideoDecodeH264SessionParametersAddInfoKHR::pStdPPS
+ */
+ VkResult result = VK_SUCCESS;
+
+ switch (params->op) {
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR: {
+ const struct VkVideoDecodeH264SessionParametersAddInfoKHR *h264_add =
+ vk_find_struct_const(update->pNext, VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR);
+ return update_h264_dec_session_parameters(params, h264_add);
+ }
+ case VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR: {
+ const struct VkVideoDecodeH265SessionParametersAddInfoKHR *h265_add =
+ vk_find_struct_const(update->pNext, VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR);
+
+ return update_h265_session_parameters(params, h265_add);
+ }
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR: {
+ const struct VkVideoEncodeH264SessionParametersAddInfoKHR *h264_add =
+ vk_find_struct_const(update->pNext, VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR);
+ return update_h264_enc_session_parameters(params, h264_add);
+ }
+ case VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR: {
+ const struct VkVideoEncodeH265SessionParametersAddInfoKHR *h265_add =
+ vk_find_struct_const(update->pNext, VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR);
+ return update_h265_enc_session_parameters(params, h265_add);
+ }
+ default:
+ unreachable("Unknown codec\n");
+ }
+ return result;
+}
+
+const uint8_t h264_scaling_list_default_4x4_intra[] =
+{
+ /* Table 7-3 - Default_4x4_Intra */
+ 6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42
+};
+
+const uint8_t h264_scaling_list_default_4x4_inter[] =
+{
+ /* Table 7-3 - Default_4x4_Inter */
+ 10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34
+};
+
+const uint8_t h264_scaling_list_default_8x8_intra[] =
+{
+ /* Table 7-4 - Default_8x8_Intra */
+ 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, 18, 18, 18, 23,
+ 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27,
+ 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42,
+};
+
+const uint8_t h264_scaling_list_default_8x8_inter[] =
+{
+ /* Table 7-4 - Default_8x8_Inter */
+ 9 , 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, 19, 19, 19, 21,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27,
+ 27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35,
+};
+
+void
+vk_video_derive_h264_scaling_list(const StdVideoH264SequenceParameterSet *sps,
+ const StdVideoH264PictureParameterSet *pps,
+ StdVideoH264ScalingLists *list)
+{
+ StdVideoH264ScalingLists temp;
+
+ /* derive SPS scaling list first, because PPS may depend on it in fall-back
+ * rule B */
+ if (sps->flags.seq_scaling_matrix_present_flag)
+ {
+ for (int i = 0; i < STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS; i++)
+ {
+ if (sps->pScalingLists->scaling_list_present_mask & (1 << i))
+ memcpy(temp.ScalingList4x4[i],
+ sps->pScalingLists->ScalingList4x4[i],
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else /* fall-back rule A */
+ {
+ if (i == 0)
+ memcpy(temp.ScalingList4x4[i],
+ h264_scaling_list_default_4x4_intra,
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else if (i == 3)
+ memcpy(temp.ScalingList4x4[i],
+ h264_scaling_list_default_4x4_inter,
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else
+ memcpy(temp.ScalingList4x4[i],
+ temp.ScalingList4x4[i - 1],
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ }
+ }
+
+ for (int j = 0; j < STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS; j++)
+ {
+ int i = j + STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS;
+ if (sps->pScalingLists->scaling_list_present_mask & (1 << i))
+ memcpy(temp.ScalingList8x8[j], sps->pScalingLists->ScalingList8x8[j],
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else /* fall-back rule A */
+ {
+ if (i == 6)
+ memcpy(temp.ScalingList8x8[j],
+ h264_scaling_list_default_8x8_intra,
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else if (i == 7)
+ memcpy(temp.ScalingList8x8[j],
+ h264_scaling_list_default_8x8_inter,
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else
+ memcpy(temp.ScalingList8x8[j], temp.ScalingList8x8[j - 2],
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ }
+ }
+ }
+ else
+ {
+ memset(temp.ScalingList4x4, 0x10,
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS *
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ memset(temp.ScalingList8x8, 0x10,
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS *
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ }
+
+ if (pps->flags.pic_scaling_matrix_present_flag)
+ {
+ for (int i = 0; i < STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS; i++)
+ {
+ if (pps->pScalingLists->scaling_list_present_mask & (1 << i))
+ memcpy(list->ScalingList4x4[i], pps->pScalingLists->ScalingList4x4[i],
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else if (sps->flags.seq_scaling_matrix_present_flag) /* fall-back rule B */
+ {
+ if (i == 0 || i == 3)
+ memcpy(list->ScalingList4x4[i], temp.ScalingList4x4[i],
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else
+ memcpy(list->ScalingList4x4[i], list->ScalingList4x4[i - 1],
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ }
+ else /* fall-back rule A */
+ {
+ if (i == 0)
+ memcpy(list->ScalingList4x4[i],
+ h264_scaling_list_default_4x4_intra,
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else if (i == 3)
+ memcpy(list->ScalingList4x4[i],
+ h264_scaling_list_default_4x4_inter,
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ else
+ memcpy(list->ScalingList4x4[i],
+ list->ScalingList4x4[i - 1],
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ }
+ }
+
+ for (int j = 0; j < STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS; j++)
+ {
+ int i = j + STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS;
+ if (pps->pScalingLists->scaling_list_present_mask & (1 << i))
+ memcpy(list->ScalingList8x8[j], pps->pScalingLists->ScalingList8x8[j],
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else if (sps->flags.seq_scaling_matrix_present_flag) /* fall-back rule B */
+ {
+ if (i == 6 || i == 7)
+ memcpy(list->ScalingList8x8[j], temp.ScalingList8x8[j],
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else
+ memcpy(list->ScalingList8x8[j], list->ScalingList8x8[j - 2],
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ }
+ else /* fall-back rule A */
+ {
+ if (i == 6)
+ memcpy(list->ScalingList8x8[j],
+ h264_scaling_list_default_8x8_intra,
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else if (i == 7)
+ memcpy(list->ScalingList8x8[j],
+ h264_scaling_list_default_8x8_inter,
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ else
+ memcpy(list->ScalingList8x8[j], list->ScalingList8x8[j - 2],
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ }
+ }
+ }
+ else
+ {
+ memcpy(list->ScalingList4x4, temp.ScalingList4x4,
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS *
+ STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS);
+ memcpy(list->ScalingList8x8, temp.ScalingList8x8,
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS *
+ STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS);
+ }
+}
+
+const StdVideoH264SequenceParameterSet *
+vk_video_find_h264_dec_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h264_dec_h264_sps(params, id)->base;
+}
+
+const StdVideoH264PictureParameterSet *
+vk_video_find_h264_dec_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h264_dec_h264_pps(params, id)->base;
+}
+
+const StdVideoH265VideoParameterSet *
+vk_video_find_h265_dec_std_vps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h265_dec_h265_vps(params, id)->base;
+}
+
+const StdVideoH265SequenceParameterSet *
+vk_video_find_h265_dec_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h265_dec_h265_sps(params, id)->base;
+}
+
+const StdVideoH265PictureParameterSet *
+vk_video_find_h265_dec_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h265_dec_h265_pps(params, id)->base;
+}
+
+int
+vk_video_h265_poc_by_slot(const struct VkVideoDecodeInfoKHR *frame_info, int slot)
+{
+ for (unsigned i = 0; i < frame_info->referenceSlotCount; i++) {
+ const VkVideoDecodeH265DpbSlotInfoKHR *dpb_slot_info =
+ vk_find_struct_const(frame_info->pReferenceSlots[i].pNext, VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR);
+ if (frame_info->pReferenceSlots[i].slotIndex == slot)
+ return dpb_slot_info->pStdReferenceInfo->PicOrderCntVal;
+ }
+
+ assert(0);
+
+ return 0;
+}
+
+void
+vk_fill_video_h265_reference_info(const VkVideoDecodeInfoKHR *frame_info,
+ const struct VkVideoDecodeH265PictureInfoKHR *pic,
+ const struct vk_video_h265_slice_params *slice_params,
+ struct vk_video_h265_reference ref_slots[][8])
+{
+ uint8_t list_cnt = slice_params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B ? 2 : 1;
+ uint8_t list_idx;
+ int i, j;
+
+ for (list_idx = 0; list_idx < list_cnt; list_idx++) {
+ /* The order is
+ * L0: Short term current before set - Short term current after set - long term current
+ * L1: Short term current after set - short term current before set - long term current
+ */
+ const uint8_t *rps[3] = {
+ list_idx ? pic->pStdPictureInfo->RefPicSetStCurrAfter : pic->pStdPictureInfo->RefPicSetStCurrBefore,
+ list_idx ? pic->pStdPictureInfo->RefPicSetStCurrBefore : pic->pStdPictureInfo->RefPicSetStCurrAfter,
+ pic->pStdPictureInfo->RefPicSetLtCurr
+ };
+
+ uint8_t ref_idx = 0;
+ for (i = 0; i < 3; i++) {
+ const uint8_t *cur_rps = rps[i];
+
+ for (j = 0; (cur_rps[j] != 0xff) && ((j + ref_idx) < 8); j++) {
+ ref_slots[list_idx][j + ref_idx].slot_index = cur_rps[j];
+ ref_slots[list_idx][j + ref_idx].pic_order_cnt = vk_video_h265_poc_by_slot(frame_info, cur_rps[j]);
+ }
+ ref_idx += j;
+ }
+
+ /* TODO: should handle cases where rpl_modification_flag is true. */
+ assert(!slice_params->rpl_modification_flag[0] && !slice_params->rpl_modification_flag[1]);
+ }
+}
+
+static void
+h265_pred_weight_table(struct vk_video_h265_slice_params *params,
+ struct vl_rbsp *rbsp,
+ const StdVideoH265SequenceParameterSet *sps,
+ StdVideoH265SliceType slice_type)
+{
+ unsigned chroma_array_type = sps->flags.separate_colour_plane_flag ? 0 : sps->chroma_format_idc;
+ unsigned i, j;
+
+ params->luma_log2_weight_denom = vl_rbsp_ue(rbsp);
+
+ assert(params->luma_log2_weight_denom >= 0 && params->luma_log2_weight_denom < 8);
+
+ if (chroma_array_type != 0) {
+ params->chroma_log2_weight_denom = params->luma_log2_weight_denom + vl_rbsp_se(rbsp);
+ assert(params->chroma_log2_weight_denom >= 0 && params->chroma_log2_weight_denom < 8);
+ }
+
+ for (i = 0; i < params->num_ref_idx_l0_active; ++i) {
+ params->luma_weight_l0_flag[i] = vl_rbsp_u(rbsp, 1);
+ if (!params->luma_weight_l0_flag[i]) {
+ params->luma_weight_l0[i] = 1 << params->luma_log2_weight_denom;
+ params->luma_offset_l0[i] = 0;
+ }
+ }
+
+ for (i = 0; i < params->num_ref_idx_l0_active; ++i) {
+ if (chroma_array_type == 0) {
+ params->chroma_weight_l0_flag[i] = 0;
+ } else {
+ params->chroma_weight_l0_flag[i] = vl_rbsp_u(rbsp, 1);
+ }
+ }
+
+ for (i = 0; i < params->num_ref_idx_l0_active; ++i) {
+ if (params->luma_weight_l0_flag[i]) {
+ params->delta_luma_weight_l0[i] = vl_rbsp_se(rbsp);
+ params->luma_weight_l0[i] = (1 << params->luma_log2_weight_denom) + params->delta_luma_weight_l0[i];
+ params->luma_offset_l0[i] = vl_rbsp_se(rbsp);
+ }
+
+ if (params->chroma_weight_l0_flag[i]) {
+ for (j = 0; j < 2; j++) {
+ params->delta_chroma_weight_l0[i][j] = vl_rbsp_se(rbsp);
+ params->delta_chroma_offset_l0[i][j] = vl_rbsp_se(rbsp);
+
+ params->chroma_weight_l0[i][j] =
+ (1 << params->chroma_log2_weight_denom) + params->delta_chroma_weight_l0[i][j];
+ params->chroma_offset_l0[i][j] = CLAMP(params->delta_chroma_offset_l0[i][j] -
+ ((128 * params->chroma_weight_l0[i][j]) >> params->chroma_log2_weight_denom) + 128, -128, 127);
+ }
+ } else {
+ for (j = 0; j < 2; j++) {
+ params->chroma_weight_l0[i][j] = 1 << params->chroma_log2_weight_denom;
+ params->chroma_offset_l0[i][j] = 0;
+ }
+ }
+ }
+
+ if (slice_type == STD_VIDEO_H265_SLICE_TYPE_B) {
+ for (i = 0; i < params->num_ref_idx_l1_active; ++i) {
+ params->luma_weight_l1_flag[i] = vl_rbsp_u(rbsp, 1);
+ if (!params->luma_weight_l1_flag[i]) {
+ params->luma_weight_l1[i] = 1 << params->luma_log2_weight_denom;
+ params->luma_offset_l1[i] = 0;
+ }
+ }
+
+ for (i = 0; i < params->num_ref_idx_l1_active; ++i) {
+ if (chroma_array_type == 0) {
+ params->chroma_weight_l1_flag[i] = 0;
+ } else {
+ params->chroma_weight_l1_flag[i] = vl_rbsp_u(rbsp, 1);
+ }
+ }
+
+ for (i = 0; i < params->num_ref_idx_l1_active; ++i) {
+ if (params->luma_weight_l1_flag[i]) {
+ params->delta_luma_weight_l1[i] = vl_rbsp_se(rbsp);
+ params->luma_weight_l1[i] =
+ (1 << params->luma_log2_weight_denom) + params->delta_luma_weight_l1[i];
+ params->luma_offset_l1[i] = vl_rbsp_se(rbsp);
+ }
+
+ if (params->chroma_weight_l1_flag[i]) {
+ for (j = 0; j < 2; j++) {
+ params->delta_chroma_weight_l1[i][j] = vl_rbsp_se(rbsp);
+ params->delta_chroma_offset_l1[i][j] = vl_rbsp_se(rbsp);
+
+ params->chroma_weight_l1[i][j] =
+ (1 << params->chroma_log2_weight_denom) + params->delta_chroma_weight_l1[i][j];
+ params->chroma_offset_l1[i][j] = CLAMP(params->delta_chroma_offset_l1[i][j] -
+ ((128 * params->chroma_weight_l1[i][j]) >> params->chroma_log2_weight_denom) + 128, -128, 127);
+ }
+ } else {
+ for (j = 0; j < 2; j++) {
+ params->chroma_weight_l1[i][j] = 1 << params->chroma_log2_weight_denom;
+ params->chroma_offset_l1[i][j] = 0;
+ }
+ }
+ }
+ }
+}
+
+void
+vk_video_parse_h265_slice_header(const struct VkVideoDecodeInfoKHR *frame_info,
+ const VkVideoDecodeH265PictureInfoKHR *pic_info,
+ const StdVideoH265SequenceParameterSet *sps,
+ const StdVideoH265PictureParameterSet *pps,
+ void *slice_data,
+ uint32_t slice_size,
+ struct vk_video_h265_slice_params *params)
+{
+ struct vl_vlc vlc;
+ const void *slice_headers[1] = { slice_data };
+ vl_vlc_init(&vlc, 1, slice_headers, &slice_size);
+
+ assert(vl_vlc_peekbits(&vlc, 24) == 0x000001);
+
+ vl_vlc_eatbits(&vlc, 24);
+
+ /* forbidden_zero_bit */
+ vl_vlc_eatbits(&vlc, 1);
+
+ if (vl_vlc_valid_bits(&vlc) < 15)
+ vl_vlc_fillbits(&vlc);
+
+ vl_vlc_get_uimsbf(&vlc, 6); /* nal_unit_type */
+ vl_vlc_get_uimsbf(&vlc, 6); /* nuh_layer_id */
+ vl_vlc_get_uimsbf(&vlc, 3); /* nuh_temporal_id_plus1 */
+
+ struct vl_rbsp rbsp;
+ vl_rbsp_init(&rbsp, &vlc, 128, /* emulation_bytes */ true);
+
+ memset(params, 0, sizeof(*params));
+
+ params->slice_size = slice_size;
+ params->first_slice_segment_in_pic_flag = vl_rbsp_u(&rbsp, 1);
+
+ /* no_output_of_prior_pics_flag */
+ if (pic_info->pStdPictureInfo->flags.IrapPicFlag)
+ vl_rbsp_u(&rbsp, 1);
+
+ /* pps id */
+ vl_rbsp_ue(&rbsp);
+
+ if (!params->first_slice_segment_in_pic_flag) {
+ int size, num;
+ int bits_slice_segment_address = 0;
+
+ if (pps->flags.dependent_slice_segments_enabled_flag)
+ params->dependent_slice_segment = vl_rbsp_u(&rbsp, 1);
+
+ size = 1 << (sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size);
+
+ num = ((sps->pic_width_in_luma_samples + size - 1) / size) *
+ ((sps->pic_height_in_luma_samples + size - 1) / size);
+
+ while (num > (1 << bits_slice_segment_address))
+ bits_slice_segment_address++;
+
+ /* slice_segment_address */
+ params->slice_segment_address = vl_rbsp_u(&rbsp, bits_slice_segment_address);
+ }
+
+ if (params->dependent_slice_segment)
+ return;
+
+ for (unsigned i = 0; i < pps->num_extra_slice_header_bits; ++i)
+ /* slice_reserved_flag */
+ vl_rbsp_u(&rbsp, 1);
+
+ /* slice_type */
+ params->slice_type = vl_rbsp_ue(&rbsp);
+
+ if (pps->flags.output_flag_present_flag)
+ /* pic output flag */
+ vl_rbsp_u(&rbsp, 1);
+
+ if (sps->flags.separate_colour_plane_flag)
+ /* colour_plane_id */
+ vl_rbsp_u(&rbsp, 2);
+
+ if (!pic_info->pStdPictureInfo->flags.IdrPicFlag) {
+ /* slice_pic_order_cnt_lsb */
+ params->pic_order_cnt_lsb =
+ vl_rbsp_u(&rbsp, sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ /* short_term_ref_pic_set_sps_flag */
+ if (!vl_rbsp_u(&rbsp, 1)) {
+ uint8_t rps_predict = 0;
+
+ if (sps->num_short_term_ref_pic_sets)
+ rps_predict = vl_rbsp_u(&rbsp, 1);
+
+ if (rps_predict) {
+ /* delta_idx */
+ vl_rbsp_ue(&rbsp);
+ /* delta_rps_sign */
+ vl_rbsp_u(&rbsp, 1);
+ /* abs_delta_rps */
+ vl_rbsp_ue(&rbsp);
+
+ for (int i = 0 ; i <= pic_info->pStdPictureInfo->NumDeltaPocsOfRefRpsIdx; i++) {
+ uint8_t used = vl_rbsp_u(&rbsp, 1);
+ if (!used)
+ vl_rbsp_u(&rbsp, 1);
+ }
+ } else {
+ /* num_negative_pics */
+ unsigned num_neg_pics = vl_rbsp_ue(&rbsp);
+ /* num_positive_pics */
+ unsigned num_pos_pics = vl_rbsp_ue(&rbsp);
+
+ for(unsigned i = 0 ; i < num_neg_pics; ++i) {
+ /* delta_poc_s0_minus1 */
+ vl_rbsp_ue(&rbsp);
+ /* used_by_curr_pic_s0_flag */
+ vl_rbsp_u(&rbsp, 1);
+ }
+
+ for(unsigned i = 0; i < num_pos_pics; ++i) {
+ /* delta_poc_s1_minus1 */
+ vl_rbsp_ue(&rbsp);
+ /* used_by_curr_pic_s0_flag */
+ vl_rbsp_u(&rbsp, 1);
+ }
+ }
+
+ } else {
+ unsigned num_st_rps = sps->num_short_term_ref_pic_sets;
+
+ int numbits = util_logbase2_ceil(num_st_rps);
+ if (numbits > 0)
+ /* short_term_ref_pic_set_idx */
+ vl_rbsp_u(&rbsp, numbits);
+ }
+
+ if (sps->flags.long_term_ref_pics_present_flag) {
+ unsigned num_lt_sps = 0;
+
+ if (sps->num_long_term_ref_pics_sps > 0)
+ num_lt_sps = vl_rbsp_ue(&rbsp);
+
+ unsigned num_lt_pics = vl_rbsp_ue(&rbsp);
+ unsigned num_refs = num_lt_pics + num_lt_sps;
+
+ for (unsigned i = 0; i < num_refs; i++) {
+ if (i < num_lt_sps) {
+ if (sps->num_long_term_ref_pics_sps > 1)
+ /* lt_idx_sps */
+ vl_rbsp_u(&rbsp,
+ util_logbase2_ceil(sps->num_long_term_ref_pics_sps));
+ } else {
+ /* poc_lsb_lt */
+ vl_rbsp_u(&rbsp, sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+ /* used_by_curr_pic_lt_flag */
+ vl_rbsp_u(&rbsp, 1);
+ }
+
+ /* poc_msb_present */
+ if (vl_rbsp_u(&rbsp, 1)) {
+ /* delta_poc_msb_cycle_lt */
+ vl_rbsp_ue(&rbsp);
+ }
+ }
+ }
+
+ if (sps->flags.sps_temporal_mvp_enabled_flag)
+ params->temporal_mvp_enable = vl_rbsp_u(&rbsp, 1);
+ }
+
+ if (sps->flags.sample_adaptive_offset_enabled_flag) {
+ params->sao_luma_flag = vl_rbsp_u(&rbsp, 1);
+ if (sps->chroma_format_idc)
+ params->sao_chroma_flag = vl_rbsp_u(&rbsp, 1);
+ }
+
+ params->max_num_merge_cand = 5;
+
+ if (params->slice_type != STD_VIDEO_H265_SLICE_TYPE_I) {
+
+ params->num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active_minus1 + 1;
+
+ if (params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B)
+ params->num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active_minus1 + 1;
+ else
+ params->num_ref_idx_l1_active = 0;
+
+ /* num_ref_idx_active_override_flag */
+ if (vl_rbsp_u(&rbsp, 1)) {
+ params->num_ref_idx_l0_active = vl_rbsp_ue(&rbsp) + 1;
+ if (params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B)
+ params->num_ref_idx_l1_active = vl_rbsp_ue(&rbsp) + 1;
+ }
+
+ if (pps->flags.lists_modification_present_flag) {
+ params->rpl_modification_flag[0] = vl_rbsp_u(&rbsp, 1);
+ if (params->rpl_modification_flag[0]) {
+ for (int i = 0; i < params->num_ref_idx_l0_active; i++) {
+ /* list_entry_l0 */
+ vl_rbsp_u(&rbsp,
+ util_logbase2_ceil(params->num_ref_idx_l0_active + params->num_ref_idx_l1_active));
+ }
+ }
+
+ if (params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B) {
+ params->rpl_modification_flag[1] = vl_rbsp_u(&rbsp, 1);
+ if (params->rpl_modification_flag[1]) {
+ for (int i = 0; i < params->num_ref_idx_l1_active; i++) {
+ /* list_entry_l1 */
+ vl_rbsp_u(&rbsp,
+ util_logbase2_ceil(params->num_ref_idx_l0_active + params->num_ref_idx_l1_active));
+ }
+ }
+ }
+ }
+
+ if (params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B)
+ params->mvd_l1_zero_flag = vl_rbsp_u(&rbsp, 1);
+
+ if (pps->flags.cabac_init_present_flag)
+ /* cabac_init_flag */
+ params->cabac_init_idc = vl_rbsp_u(&rbsp, 1);
+
+ if (params->temporal_mvp_enable) {
+ if (params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B)
+ params->collocated_list = !vl_rbsp_u(&rbsp, 1);
+
+ if (params->collocated_list == 0) {
+ if (params->num_ref_idx_l0_active > 1)
+ params->collocated_ref_idx = vl_rbsp_ue(&rbsp);
+ } else if (params->collocated_list == 1) {
+ if (params->num_ref_idx_l1_active > 1)
+ params->collocated_ref_idx = vl_rbsp_ue(&rbsp);
+ }
+ }
+
+ if ((pps->flags.weighted_pred_flag && params->slice_type == STD_VIDEO_H265_SLICE_TYPE_P) ||
+ (pps->flags.weighted_bipred_flag && params->slice_type == STD_VIDEO_H265_SLICE_TYPE_B)) {
+ h265_pred_weight_table(params, &rbsp, sps, params->slice_type);
+ }
+
+ params->max_num_merge_cand -= vl_rbsp_ue(&rbsp);
+ }
+
+ params->slice_qp_delta = vl_rbsp_se(&rbsp);
+
+ if (pps->flags.pps_slice_chroma_qp_offsets_present_flag) {
+ params->slice_cb_qp_offset = vl_rbsp_se(&rbsp);
+ params->slice_cr_qp_offset = vl_rbsp_se(&rbsp);
+ }
+
+ if (pps->flags.chroma_qp_offset_list_enabled_flag)
+ /* cu_chroma_qp_offset_enabled_flag */
+ vl_rbsp_u(&rbsp, 1);
+
+ if (pps->flags.deblocking_filter_control_present_flag) {
+ if (pps->flags.deblocking_filter_override_enabled_flag) {
+ /* deblocking_filter_override_flag */
+ if (vl_rbsp_u(&rbsp, 1)) {
+ params->disable_deblocking_filter_idc = vl_rbsp_u(&rbsp, 1);
+
+ if (!params->disable_deblocking_filter_idc) {
+ params->beta_offset_div2 = vl_rbsp_se(&rbsp);
+ params->tc_offset_div2 = vl_rbsp_se(&rbsp);
+ }
+ } else {
+ params->disable_deblocking_filter_idc =
+ pps->flags.pps_deblocking_filter_disabled_flag;
+ }
+ }
+ }
+
+ if (pps->flags.pps_loop_filter_across_slices_enabled_flag &&
+ (params->sao_luma_flag || params->sao_chroma_flag ||
+ !params->disable_deblocking_filter_idc))
+ params->loop_filter_across_slices_enable = vl_rbsp_u(&rbsp, 1);
+
+ if (pps->flags.tiles_enabled_flag || pps->flags.entropy_coding_sync_enabled_flag) {
+ unsigned num_entry_points_offsets = vl_rbsp_ue(&rbsp);
+
+ if (num_entry_points_offsets > 0) {
+ unsigned offset_len = vl_rbsp_ue(&rbsp) + 1;
+ for (unsigned i = 0; i < num_entry_points_offsets; i++) {
+ /* entry_point_offset_minus1 */
+ vl_rbsp_u(&rbsp, offset_len);
+ }
+ }
+ }
+
+ if (pps->flags.pps_extension_present_flag) {
+ unsigned length = vl_rbsp_ue(&rbsp);
+ for (unsigned i = 0; i < length; i++)
+ /* slice_reserved_undetermined_flag */
+ vl_rbsp_u(&rbsp, 1);
+ }
+
+ unsigned header_bits =
+ (slice_size * 8 - 24 /* start code */) - vl_vlc_bits_left(&rbsp.nal) - rbsp.removed;
+ params->slice_data_bytes_offset = (header_bits + 8) / 8;
+}
+
+void
+vk_video_get_profile_alignments(const VkVideoProfileListInfoKHR *profile_list,
+ uint32_t *width_align_out, uint32_t *height_align_out)
+{
+ uint32_t width_align = 1, height_align = 1;
+ for (unsigned i = 0; i < profile_list->profileCount; i++) {
+ if (profile_list->pProfiles[i].videoCodecOperation == VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR ||
+ profile_list->pProfiles[i].videoCodecOperation == VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR
+ ) {
+ width_align = MAX2(width_align, VK_VIDEO_H264_MACROBLOCK_WIDTH);
+ height_align = MAX2(height_align, VK_VIDEO_H264_MACROBLOCK_HEIGHT);
+ }
+ if (profile_list->pProfiles[i].videoCodecOperation == VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR ||
+ profile_list->pProfiles[i].videoCodecOperation == VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR
+ ) {
+ width_align = MAX2(width_align, VK_VIDEO_H265_CTU_MAX_WIDTH);
+ height_align = MAX2(height_align, VK_VIDEO_H265_CTU_MAX_HEIGHT);
+ }
+ if (profile_list->pProfiles[i].videoCodecOperation == VK_VIDEO_CODEC_OPERATION_DECODE_AV1_BIT_KHR) {
+ width_align = MAX2(width_align, VK_VIDEO_AV1_BLOCK_WIDTH);
+ height_align = MAX2(height_align, VK_VIDEO_AV1_BLOCK_HEIGHT);
+ }
+ }
+ *width_align_out = width_align;
+ *height_align_out = height_align;
+}
+
+static const uint8_t vk_video_h264_levels[] = {10, 11, 12, 13, 20, 21, 22, 30, 31, 32, 40, 41, 42, 50, 51, 52, 60, 61, 62};
+uint8_t
+vk_video_get_h264_level(StdVideoH264LevelIdc level)
+{
+ assert(level <= STD_VIDEO_H264_LEVEL_IDC_6_2);
+ return vk_video_h264_levels[level];
+}
+
+const StdVideoH264SequenceParameterSet *
+vk_video_find_h264_enc_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h264_enc_h264_sps(params, id)->base;
+}
+
+const StdVideoH264PictureParameterSet *
+vk_video_find_h264_enc_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h264_enc_h264_pps(params, id)->base;
+}
+
+const StdVideoH265VideoParameterSet *
+vk_video_find_h265_enc_std_vps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h265_enc_h265_vps(params, id)->base;
+}
+
+const StdVideoH265SequenceParameterSet *
+vk_video_find_h265_enc_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h265_enc_h265_sps(params, id)->base;
+}
+
+const StdVideoH265PictureParameterSet *
+vk_video_find_h265_enc_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id)
+{
+ return &find_h265_enc_h265_pps(params, id)->base;
+}
+
+enum H264NALUType
+{
+ H264_NAL_UNSPECIFIED = 0,
+ H264_NAL_SLICE = 1,
+ H264_NAL_SLICEDATA_A = 2,
+ H264_NAL_SLICEDATA_B = 3,
+ H264_NAL_SLICEDATA_C = 4,
+ H264_NAL_IDR = 5,
+ H264_NAL_SEI = 6,
+ H264_NAL_SPS = 7,
+ H264_NAL_PPS = 8,
+ H264_NAL_ACCESS_UNIT_DEMILITER = 9,
+ H264_NAL_END_OF_SEQUENCE = 10,
+ H264_NAL_END_OF_STREAM = 11,
+ H264_NAL_FILLER_DATA = 12,
+ H264_NAL_SPS_EXTENSION = 13,
+ H264_NAL_PREFIX = 14,
+ /* 15...18 RESERVED */
+ H264_NAL_AUXILIARY_SLICE = 19,
+ /* 20...23 RESERVED */
+ /* 24...31 UNSPECIFIED */
+};
+
+enum HEVCNALUnitType {
+ HEVC_NAL_TRAIL_N = 0,
+ HEVC_NAL_TRAIL_R = 1,
+ HEVC_NAL_TSA_N = 2,
+ HEVC_NAL_TSA_R = 3,
+ HEVC_NAL_STSA_N = 4,
+ HEVC_NAL_STSA_R = 5,
+ HEVC_NAL_RADL_N = 6,
+ HEVC_NAL_RADL_R = 7,
+ HEVC_NAL_RASL_N = 8,
+ HEVC_NAL_RASL_R = 9,
+ HEVC_NAL_VCL_N10 = 10,
+ HEVC_NAL_VCL_R11 = 11,
+ HEVC_NAL_VCL_N12 = 12,
+ HEVC_NAL_VCL_R13 = 13,
+ HEVC_NAL_VCL_N14 = 14,
+ HEVC_NAL_VCL_R15 = 15,
+ HEVC_NAL_BLA_W_LP = 16,
+ HEVC_NAL_BLA_W_RADL = 17,
+ HEVC_NAL_BLA_N_LP = 18,
+ HEVC_NAL_IDR_W_RADL = 19,
+ HEVC_NAL_IDR_N_LP = 20,
+ HEVC_NAL_CRA_NUT = 21,
+ HEVC_NAL_VPS_NUT = 32,
+ HEVC_NAL_SPS_NUT = 33,
+ HEVC_NAL_PPS_NUT = 34,
+};
+
+unsigned
+vk_video_get_h265_nal_unit(const StdVideoEncodeH265PictureInfo *pic_info)
+{
+ switch (pic_info->pic_type) {
+ case STD_VIDEO_H265_PICTURE_TYPE_IDR:
+ return HEVC_NAL_IDR_W_RADL;
+ case STD_VIDEO_H265_PICTURE_TYPE_I:
+ return HEVC_NAL_CRA_NUT;
+ case STD_VIDEO_H265_PICTURE_TYPE_P:
+ return HEVC_NAL_TRAIL_R;
+ case STD_VIDEO_H265_PICTURE_TYPE_B:
+ if (pic_info->flags.IrapPicFlag)
+ if (pic_info->flags.is_reference)
+ return HEVC_NAL_RASL_R;
+ else
+ return HEVC_NAL_RASL_N;
+ else
+ if (pic_info->flags.is_reference)
+ return HEVC_NAL_TRAIL_R;
+ else
+ return HEVC_NAL_TRAIL_N;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ return 0;
+}
+
+static const uint8_t vk_video_h265_levels[] = {10, 20, 21, 30, 31, 40, 41, 50, 51, 52, 60, 61, 62};
+
+static uint8_t
+vk_video_get_h265_level(StdVideoH265LevelIdc level)
+{
+ assert(level <= STD_VIDEO_H265_LEVEL_IDC_6_2);
+ return vk_video_h265_levels[level];
+}
+
+static void
+emit_nalu_header(struct vl_bitstream_encoder *enc,
+ int nal_ref, int nal_unit)
+{
+ enc->prevent_start_code = false;
+
+ vl_bitstream_put_bits(enc, 24, 0);
+ vl_bitstream_put_bits(enc, 8, 1);
+ vl_bitstream_put_bits(enc, 1, 0);
+ vl_bitstream_put_bits(enc, 2, nal_ref); /* SPS NAL REF */
+ vl_bitstream_put_bits(enc, 5, nal_unit); /* SPS NAL UNIT */
+ vl_bitstream_flush(enc);
+
+ enc->prevent_start_code = true;
+}
+
+static void
+encode_hrd_params(struct vl_bitstream_encoder *enc,
+ const StdVideoH264HrdParameters *hrd)
+{
+ vl_bitstream_exp_golomb_ue(enc, hrd->cpb_cnt_minus1);
+ vl_bitstream_put_bits(enc, 4, hrd->bit_rate_scale);
+ vl_bitstream_put_bits(enc, 4, hrd->cpb_size_scale);
+ for (int sched_sel_idx = 0; sched_sel_idx <= hrd->cpb_cnt_minus1; sched_sel_idx++) {
+ vl_bitstream_exp_golomb_ue(enc, hrd->bit_rate_value_minus1[sched_sel_idx]);
+ vl_bitstream_exp_golomb_ue(enc, hrd->cpb_size_value_minus1[sched_sel_idx]);
+ vl_bitstream_put_bits(enc, 1, hrd->cbr_flag[sched_sel_idx]);
+ }
+ vl_bitstream_put_bits(enc, 5, hrd->initial_cpb_removal_delay_length_minus1);
+ vl_bitstream_put_bits(enc, 5, hrd->cpb_removal_delay_length_minus1);
+ vl_bitstream_put_bits(enc, 5, hrd->dpb_output_delay_length_minus1);
+ vl_bitstream_put_bits(enc, 5, hrd->time_offset_length);
+}
+
+void
+vk_video_encode_h264_sps(const StdVideoH264SequenceParameterSet *sps,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr)
+{
+ struct vl_bitstream_encoder enc;
+ uint32_t data_size = *data_size_ptr;
+
+ vl_bitstream_encoder_clear(&enc, data_ptr, data_size, size_limit);
+
+ emit_nalu_header(&enc, 3, H264_NAL_SPS);
+
+ vl_bitstream_put_bits(&enc, 8, sps->profile_idc);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.constraint_set0_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.constraint_set1_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.constraint_set2_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.constraint_set3_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.constraint_set4_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.constraint_set5_flag);
+ vl_bitstream_put_bits(&enc, 2, 0);
+ vl_bitstream_put_bits(&enc, 8, vk_video_get_h264_level(sps->level_idc));
+ vl_bitstream_exp_golomb_ue(&enc, sps->seq_parameter_set_id);
+
+ if (sps->profile_idc == STD_VIDEO_H264_PROFILE_IDC_HIGH /* high10 as well */) {
+ vl_bitstream_exp_golomb_ue(&enc, sps->chroma_format_idc);
+ vl_bitstream_exp_golomb_ue(&enc, sps->bit_depth_luma_minus8);
+ vl_bitstream_exp_golomb_ue(&enc, sps->bit_depth_chroma_minus8);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.qpprime_y_zero_transform_bypass_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.seq_scaling_matrix_present_flag);
+ }
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_max_frame_num_minus4);
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->pic_order_cnt_type);
+ if (sps->pic_order_cnt_type == 0)
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_max_pic_order_cnt_lsb_minus4);
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->max_num_ref_frames);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.gaps_in_frame_num_value_allowed_flag);
+ vl_bitstream_exp_golomb_ue(&enc, sps->pic_width_in_mbs_minus1);
+ vl_bitstream_exp_golomb_ue(&enc, sps->pic_height_in_map_units_minus1);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.frame_mbs_only_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.direct_8x8_inference_flag);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.frame_cropping_flag);
+ if (sps->flags.frame_cropping_flag) {
+ vl_bitstream_exp_golomb_ue(&enc, sps->frame_crop_left_offset);
+ vl_bitstream_exp_golomb_ue(&enc, sps->frame_crop_right_offset);
+ vl_bitstream_exp_golomb_ue(&enc, sps->frame_crop_top_offset);
+ vl_bitstream_exp_golomb_ue(&enc, sps->frame_crop_bottom_offset);
+ }
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.vui_parameters_present_flag); /* vui parameters present flag */
+ if (sps->flags.vui_parameters_present_flag) {
+ const StdVideoH264SequenceParameterSetVui *vui = sps->pSequenceParameterSetVui;
+ vl_bitstream_put_bits(&enc, 1, vui->flags.aspect_ratio_info_present_flag);
+
+ if (vui->flags.aspect_ratio_info_present_flag) {
+ vl_bitstream_put_bits(&enc, 8, vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == STD_VIDEO_H264_ASPECT_RATIO_IDC_EXTENDED_SAR) {
+ vl_bitstream_put_bits(&enc, 16, vui->sar_width);
+ vl_bitstream_put_bits(&enc, 16, vui->sar_height);
+ }
+ }
+
+ vl_bitstream_put_bits(&enc, 1, vui->flags.overscan_info_present_flag);
+ if (vui->flags.overscan_info_present_flag)
+ vl_bitstream_put_bits(&enc, 1, vui->flags.overscan_appropriate_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.video_signal_type_present_flag);
+ if (vui->flags.video_signal_type_present_flag) {
+ vl_bitstream_put_bits(&enc, 3, vui->video_format);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.video_full_range_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.color_description_present_flag);
+ if (vui->flags.color_description_present_flag) {
+ vl_bitstream_put_bits(&enc, 8, vui->colour_primaries);
+ vl_bitstream_put_bits(&enc, 8, vui->transfer_characteristics);
+ vl_bitstream_put_bits(&enc, 8, vui->matrix_coefficients);
+ }
+ }
+
+ vl_bitstream_put_bits(&enc, 1, vui->flags.chroma_loc_info_present_flag);
+ if (vui->flags.chroma_loc_info_present_flag) {
+ vl_bitstream_exp_golomb_ue(&enc, vui->chroma_sample_loc_type_top_field);
+ vl_bitstream_exp_golomb_ue(&enc, vui->chroma_sample_loc_type_bottom_field);
+ }
+ vl_bitstream_put_bits(&enc, 1, vui->flags.timing_info_present_flag);
+ if (vui->flags.timing_info_present_flag) {
+ vl_bitstream_put_bits(&enc, 32, vui->num_units_in_tick);
+ vl_bitstream_put_bits(&enc, 32, vui->time_scale);
+ vl_bitstream_put_bits(&enc, 32, vui->flags.fixed_frame_rate_flag);
+ }
+ vl_bitstream_put_bits(&enc, 1, vui->flags.nal_hrd_parameters_present_flag);
+ if (vui->flags.nal_hrd_parameters_present_flag)
+ encode_hrd_params(&enc, vui->pHrdParameters);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.vcl_hrd_parameters_present_flag);
+ if (vui->flags.vcl_hrd_parameters_present_flag)
+ encode_hrd_params(&enc, vui->pHrdParameters);
+ if (vui->flags.nal_hrd_parameters_present_flag || vui->flags.vcl_hrd_parameters_present_flag)
+ vl_bitstream_put_bits(&enc, 1, 0);
+ vl_bitstream_put_bits(&enc, 1, 0);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.bitstream_restriction_flag);
+ if (vui->flags.bitstream_restriction_flag) {
+ vl_bitstream_put_bits(&enc, 1, 0);
+ vl_bitstream_exp_golomb_ue(&enc, 0);
+ vl_bitstream_exp_golomb_ue(&enc, 0);
+ vl_bitstream_exp_golomb_ue(&enc, 0);
+ vl_bitstream_exp_golomb_ue(&enc, 0);
+ vl_bitstream_exp_golomb_ue(&enc, vui->max_num_reorder_frames);
+ vl_bitstream_exp_golomb_ue(&enc, vui->max_dec_frame_buffering);
+ }
+ }
+
+ vl_bitstream_rbsp_trailing(&enc);
+
+ vl_bitstream_flush(&enc);
+ *data_size_ptr += vl_bitstream_get_byte_count(&enc);
+ vl_bitstream_encoder_free(&enc);
+}
+
+void
+vk_video_encode_h264_pps(const StdVideoH264PictureParameterSet *pps,
+ bool high_profile,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr)
+{
+ struct vl_bitstream_encoder enc;
+ uint32_t data_size = *data_size_ptr;
+
+ vl_bitstream_encoder_clear(&enc, data_ptr, data_size, size_limit);
+
+ emit_nalu_header(&enc, 3, H264_NAL_PPS);
+
+ vl_bitstream_exp_golomb_ue(&enc, pps->pic_parameter_set_id);
+ vl_bitstream_exp_golomb_ue(&enc, pps->seq_parameter_set_id);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.entropy_coding_mode_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.bottom_field_pic_order_in_frame_present_flag);
+ vl_bitstream_exp_golomb_ue(&enc, 0); /* num_slice_groups_minus1 */
+
+ vl_bitstream_exp_golomb_ue(&enc, pps->num_ref_idx_l0_default_active_minus1);
+ vl_bitstream_exp_golomb_ue(&enc, pps->num_ref_idx_l1_default_active_minus1);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.weighted_pred_flag);
+ vl_bitstream_put_bits(&enc, 2, pps->weighted_bipred_idc);
+ vl_bitstream_exp_golomb_se(&enc, pps->pic_init_qp_minus26);
+ vl_bitstream_exp_golomb_se(&enc, pps->pic_init_qs_minus26);
+ vl_bitstream_exp_golomb_se(&enc, pps->chroma_qp_index_offset);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.deblocking_filter_control_present_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.constrained_intra_pred_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.redundant_pic_cnt_present_flag);
+
+ /* high profile */
+ if (high_profile) {
+ vl_bitstream_put_bits(&enc, 1, pps->flags.transform_8x8_mode_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.pic_scaling_matrix_present_flag);
+ vl_bitstream_exp_golomb_se(&enc, pps->second_chroma_qp_index_offset);
+ }
+ vl_bitstream_rbsp_trailing(&enc);
+
+ vl_bitstream_flush(&enc);
+ *data_size_ptr += vl_bitstream_get_byte_count(&enc);
+ vl_bitstream_encoder_free(&enc);
+}
+
+static void
+emit_nalu_h265_header(struct vl_bitstream_encoder *enc,
+ int nal_unit_type)
+{
+ enc->prevent_start_code = false;
+
+ vl_bitstream_put_bits(enc, 24, 0);
+ vl_bitstream_put_bits(enc, 8, 1);
+ vl_bitstream_put_bits(enc, 1, 0);
+ vl_bitstream_put_bits(enc, 6, nal_unit_type); /* SPS NAL REF */
+ vl_bitstream_put_bits(enc, 6, 0);//nuh_layer_id
+ vl_bitstream_put_bits(enc, 3, 1);//nuh_temporal_id_plus1;
+ vl_bitstream_flush(enc);
+
+ enc->prevent_start_code = true;
+}
+
+static void
+encode_h265_profile_tier_level(struct vl_bitstream_encoder *enc,
+ const StdVideoH265ProfileTierLevel *ptl)
+{
+ vl_bitstream_put_bits(enc, 2, 0);
+ vl_bitstream_put_bits(enc, 1, ptl->flags.general_tier_flag);
+ vl_bitstream_put_bits(enc, 5, ptl->general_profile_idc);
+
+ for (int j = 0; j < 32; j++)
+ vl_bitstream_put_bits(enc, 1, j == ptl->general_profile_idc);
+
+ vl_bitstream_put_bits(enc, 1, ptl->flags.general_progressive_source_flag);
+ vl_bitstream_put_bits(enc, 1, ptl->flags.general_interlaced_source_flag);
+ vl_bitstream_put_bits(enc, 1, ptl->flags.general_non_packed_constraint_flag);
+ vl_bitstream_put_bits(enc, 1, ptl->flags.general_frame_only_constraint_flag);
+ vl_bitstream_put_bits(enc, 31, 0);
+ vl_bitstream_put_bits(enc, 13, 0);
+ vl_bitstream_put_bits(enc, 8, vk_video_get_h265_level(ptl->general_level_idc));
+}
+
+void
+vk_video_encode_h265_vps(const StdVideoH265VideoParameterSet *vps,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr)
+{
+ struct vl_bitstream_encoder enc;
+ uint32_t data_size = *data_size_ptr;
+
+ vl_bitstream_encoder_clear(&enc, data_ptr, data_size, size_limit);
+
+ emit_nalu_h265_header(&enc, HEVC_NAL_VPS_NUT);
+
+ vl_bitstream_put_bits(&enc, 4, vps->vps_video_parameter_set_id);
+ vl_bitstream_put_bits(&enc, 2, 3);
+ vl_bitstream_put_bits(&enc, 6, 0);//vps->vps_max_layers_minus1);
+ vl_bitstream_put_bits(&enc, 3, vps->vps_max_sub_layers_minus1);
+ vl_bitstream_put_bits(&enc, 1, vps->flags.vps_temporal_id_nesting_flag);
+ vl_bitstream_put_bits(&enc, 16, 0xffff);
+
+ encode_h265_profile_tier_level(&enc, vps->pProfileTierLevel);
+
+ vl_bitstream_put_bits(&enc, 1, vps->flags.vps_sub_layer_ordering_info_present_flag);
+
+ for (int i = 0; i <= vps->vps_max_sub_layers_minus1; i++) {
+ vl_bitstream_exp_golomb_ue(&enc, vps->pDecPicBufMgr->max_dec_pic_buffering_minus1[i]);
+ vl_bitstream_exp_golomb_ue(&enc, vps->pDecPicBufMgr->max_num_reorder_pics[i]);
+ vl_bitstream_exp_golomb_ue(&enc, vps->pDecPicBufMgr->max_latency_increase_plus1[i]);
+ }
+
+
+ vl_bitstream_put_bits(&enc, 6, 0);//vps->vps_max_layer_id);
+ vl_bitstream_exp_golomb_ue(&enc, 0);//vps->vps_num_layer_sets_minus1);
+ vl_bitstream_put_bits(&enc, 1, vps->flags.vps_timing_info_present_flag);
+
+ if (vps->flags.vps_timing_info_present_flag) {
+ vl_bitstream_put_bits(&enc, 32, vps->vps_num_units_in_tick);
+ vl_bitstream_put_bits(&enc, 32, vps->vps_time_scale);
+ vl_bitstream_put_bits(&enc, 1, vps->flags.vps_poc_proportional_to_timing_flag);
+ if (vps->flags.vps_poc_proportional_to_timing_flag)
+ vl_bitstream_exp_golomb_ue(&enc, vps->vps_num_ticks_poc_diff_one_minus1);
+ vl_bitstream_exp_golomb_ue(&enc, 0);
+ }
+
+ vl_bitstream_put_bits(&enc, 1, 0); /* vps extension flag */
+ vl_bitstream_rbsp_trailing(&enc);
+
+ vl_bitstream_flush(&enc);
+ *data_size_ptr += vl_bitstream_get_byte_count(&enc);
+ vl_bitstream_encoder_free(&enc);
+}
+
+static void
+encode_rps(struct vl_bitstream_encoder *enc,
+ const StdVideoH265SequenceParameterSet *sps,
+ int st_rps_idx)
+{
+ const StdVideoH265ShortTermRefPicSet *rps = &sps->pShortTermRefPicSet[st_rps_idx];
+ if (st_rps_idx != 0)
+ vl_bitstream_put_bits(enc, 1, rps->flags.inter_ref_pic_set_prediction_flag);
+
+ if (rps->flags.inter_ref_pic_set_prediction_flag) {
+ int ref_rps_idx = st_rps_idx - (rps->delta_idx_minus1 + 1);
+ vl_bitstream_put_bits(enc, 1, rps->flags.delta_rps_sign);
+ vl_bitstream_exp_golomb_ue(enc, rps->abs_delta_rps_minus1);
+
+ const StdVideoH265ShortTermRefPicSet *rps_ref = &sps->pShortTermRefPicSet[ref_rps_idx];
+ int num_delta_pocs = rps_ref->num_negative_pics + rps_ref->num_positive_pics;
+
+ for (int j = 0; j < num_delta_pocs; j++) {
+ vl_bitstream_put_bits(enc, 1, !!(rps->used_by_curr_pic_flag & (1 << j)));
+ if (!(rps->used_by_curr_pic_flag & (1 << j))) {
+ vl_bitstream_put_bits(enc, 1, !!(rps->use_delta_flag & (1 << j)));
+ }
+ }
+ } else {
+ vl_bitstream_exp_golomb_ue(enc, rps->num_negative_pics);
+ vl_bitstream_exp_golomb_ue(enc, rps->num_positive_pics);
+
+ for (int i = 0; i < rps->num_negative_pics; i++) {
+ vl_bitstream_exp_golomb_ue(enc, rps->delta_poc_s0_minus1[i]);
+ vl_bitstream_put_bits(enc, 1, !!(rps->used_by_curr_pic_s0_flag & (1 << i)));
+ }
+ for (int i = 0; i < rps->num_positive_pics; i++) {
+ vl_bitstream_exp_golomb_ue(enc, rps->delta_poc_s1_minus1[i]);
+ vl_bitstream_put_bits(enc, 1, !!(rps->used_by_curr_pic_s1_flag & (1 << i)));
+ }
+ }
+}
+
+void
+vk_video_encode_h265_sps(const StdVideoH265SequenceParameterSet *sps,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr)
+{
+ struct vl_bitstream_encoder enc;
+ uint32_t data_size = *data_size_ptr;
+
+ vl_bitstream_encoder_clear(&enc, data_ptr, data_size, size_limit);
+
+ emit_nalu_h265_header(&enc, HEVC_NAL_SPS_NUT);
+
+ vl_bitstream_put_bits(&enc, 4, sps->sps_video_parameter_set_id);
+ vl_bitstream_put_bits(&enc, 3, sps->sps_max_sub_layers_minus1);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.sps_temporal_id_nesting_flag);
+
+ encode_h265_profile_tier_level(&enc, sps->pProfileTierLevel);
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->sps_seq_parameter_set_id);
+ vl_bitstream_exp_golomb_ue(&enc, sps->chroma_format_idc);
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->pic_width_in_luma_samples);
+ vl_bitstream_exp_golomb_ue(&enc, sps->pic_height_in_luma_samples);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.conformance_window_flag);
+
+ if (sps->flags.conformance_window_flag) {
+ vl_bitstream_exp_golomb_ue(&enc, sps->conf_win_left_offset);
+ vl_bitstream_exp_golomb_ue(&enc, sps->conf_win_right_offset);
+ vl_bitstream_exp_golomb_ue(&enc, sps->conf_win_top_offset);
+ vl_bitstream_exp_golomb_ue(&enc, sps->conf_win_bottom_offset);
+ }
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->bit_depth_luma_minus8);
+ vl_bitstream_exp_golomb_ue(&enc, sps->bit_depth_chroma_minus8);
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_max_pic_order_cnt_lsb_minus4);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.sps_sub_layer_ordering_info_present_flag);
+
+ for (int i = 0; i <= sps->sps_max_sub_layers_minus1; i++) {
+ vl_bitstream_exp_golomb_ue(&enc, sps->pDecPicBufMgr->max_dec_pic_buffering_minus1[i]);
+ vl_bitstream_exp_golomb_ue(&enc, sps->pDecPicBufMgr->max_num_reorder_pics[i]);
+ vl_bitstream_exp_golomb_ue(&enc, sps->pDecPicBufMgr->max_latency_increase_plus1[i]);
+ }
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_min_luma_coding_block_size_minus3);
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_diff_max_min_luma_coding_block_size);
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_min_luma_transform_block_size_minus2);
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_diff_max_min_luma_transform_block_size);
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->max_transform_hierarchy_depth_inter);
+ vl_bitstream_exp_golomb_ue(&enc, sps->max_transform_hierarchy_depth_intra);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.scaling_list_enabled_flag);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.amp_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.sample_adaptive_offset_enabled_flag);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.pcm_enabled_flag);
+
+ if (sps->flags.pcm_enabled_flag) {
+ vl_bitstream_put_bits(&enc, 4, sps->bit_depth_luma_minus8 + 7);
+ vl_bitstream_put_bits(&enc, 4, sps->bit_depth_chroma_minus8 + 7);
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_min_luma_coding_block_size_minus3);
+ vl_bitstream_exp_golomb_ue(&enc, sps->log2_diff_max_min_luma_coding_block_size);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.pcm_loop_filter_disabled_flag);
+ }
+
+ vl_bitstream_exp_golomb_ue(&enc, sps->num_short_term_ref_pic_sets);
+ for (int i = 0; i < sps->num_short_term_ref_pic_sets; i++)
+ encode_rps(&enc, sps, i);
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.long_term_ref_pics_present_flag);
+ if (sps->flags.long_term_ref_pics_present_flag) {
+ vl_bitstream_exp_golomb_ue(&enc, sps->num_long_term_ref_pics_sps);
+ for (int i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
+ vl_bitstream_put_bits(&enc, sps->log2_max_pic_order_cnt_lsb_minus4 + 4, sps->pLongTermRefPicsSps->lt_ref_pic_poc_lsb_sps[i]);
+ vl_bitstream_put_bits(&enc, 1, sps->pLongTermRefPicsSps->used_by_curr_pic_lt_sps_flag);
+ }
+ }
+
+ vl_bitstream_put_bits(&enc, 1, sps->flags.sps_temporal_mvp_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.strong_intra_smoothing_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, sps->flags.vui_parameters_present_flag);
+
+ if (sps->flags.vui_parameters_present_flag) {
+ const StdVideoH265SequenceParameterSetVui *vui = sps->pSequenceParameterSetVui;
+ vl_bitstream_put_bits(&enc, 1, vui->flags.aspect_ratio_info_present_flag);
+ if (vui->flags.aspect_ratio_info_present_flag) {
+ vl_bitstream_put_bits(&enc, 8, vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == STD_VIDEO_H265_ASPECT_RATIO_IDC_EXTENDED_SAR) {
+ vl_bitstream_put_bits(&enc, 16, vui->sar_width);
+ vl_bitstream_put_bits(&enc, 16, vui->sar_height);
+ }
+ }
+ vl_bitstream_put_bits(&enc, 1, vui->flags.overscan_info_present_flag);
+ if (vui->flags.overscan_info_present_flag)
+ vl_bitstream_put_bits(&enc, 1, vui->flags.overscan_appropriate_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.video_signal_type_present_flag);
+ if (vui->flags.video_signal_type_present_flag) {
+ vl_bitstream_put_bits(&enc, 3, vui->video_format);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.video_full_range_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.colour_description_present_flag);
+ if (vui->flags.colour_description_present_flag) {
+ vl_bitstream_put_bits(&enc, 8, vui->colour_primaries);
+ vl_bitstream_put_bits(&enc, 8, vui->transfer_characteristics);
+ vl_bitstream_put_bits(&enc, 8, vui->matrix_coeffs);
+ }
+ }
+ vl_bitstream_put_bits(&enc, 1, vui->flags.chroma_loc_info_present_flag);
+ if (vui->flags.chroma_loc_info_present_flag) {
+ vl_bitstream_exp_golomb_ue(&enc, vui->chroma_sample_loc_type_top_field);
+ vl_bitstream_exp_golomb_ue(&enc, vui->chroma_sample_loc_type_bottom_field);
+ }
+ vl_bitstream_put_bits(&enc, 1, vui->flags.neutral_chroma_indication_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.field_seq_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.frame_field_info_present_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.default_display_window_flag);
+ if (vui->flags.default_display_window_flag) {
+ vl_bitstream_exp_golomb_ue(&enc, vui->def_disp_win_left_offset);
+ vl_bitstream_exp_golomb_ue(&enc, vui->def_disp_win_right_offset);
+ vl_bitstream_exp_golomb_ue(&enc, vui->def_disp_win_top_offset);
+ vl_bitstream_exp_golomb_ue(&enc, vui->def_disp_win_bottom_offset);
+ }
+ vl_bitstream_put_bits(&enc, 1, vui->flags.vui_timing_info_present_flag);
+ if (vui->flags.vui_timing_info_present_flag) {
+ vl_bitstream_put_bits(&enc, 32, vui->vui_num_units_in_tick);
+ vl_bitstream_put_bits(&enc, 32, vui->vui_time_scale);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.vui_poc_proportional_to_timing_flag);
+ if (vui->flags.vui_poc_proportional_to_timing_flag)
+ vl_bitstream_exp_golomb_ue(&enc, vui->vui_num_ticks_poc_diff_one_minus1);
+ vl_bitstream_put_bits(&enc, 1, 0);//vui->flags.vui_hrd_parameters_present_flag);
+ // HRD
+ }
+
+ vl_bitstream_put_bits(&enc, 1, vui->flags.bitstream_restriction_flag);
+ if (vui->flags.bitstream_restriction_flag) {
+ vl_bitstream_put_bits(&enc, 1, vui->flags.tiles_fixed_structure_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.motion_vectors_over_pic_boundaries_flag);
+ vl_bitstream_put_bits(&enc, 1, vui->flags.restricted_ref_pic_lists_flag);
+ vl_bitstream_exp_golomb_ue(&enc, vui->min_spatial_segmentation_idc);
+ vl_bitstream_exp_golomb_ue(&enc, vui->max_bytes_per_pic_denom);
+ vl_bitstream_exp_golomb_ue(&enc, vui->max_bits_per_min_cu_denom);
+ vl_bitstream_exp_golomb_ue(&enc, vui->log2_max_mv_length_horizontal);
+ vl_bitstream_exp_golomb_ue(&enc, vui->log2_max_mv_length_vertical);
+ }
+ }
+
+ vl_bitstream_put_bits(&enc, 1, 0); /* sps extension flg */
+ vl_bitstream_rbsp_trailing(&enc);
+
+ vl_bitstream_flush(&enc);
+ *data_size_ptr += vl_bitstream_get_byte_count(&enc);
+ vl_bitstream_encoder_free(&enc);
+}
+
+void
+vk_video_encode_h265_pps(const StdVideoH265PictureParameterSet *pps,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr)
+{
+ struct vl_bitstream_encoder enc;
+ uint32_t data_size = *data_size_ptr;
+
+ vl_bitstream_encoder_clear(&enc, data_ptr, data_size, size_limit);
+
+ emit_nalu_h265_header(&enc, HEVC_NAL_PPS_NUT);
+ vl_bitstream_exp_golomb_ue(&enc, pps->pps_pic_parameter_set_id);
+ vl_bitstream_exp_golomb_ue(&enc, pps->pps_seq_parameter_set_id);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.dependent_slice_segments_enabled_flag);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.output_flag_present_flag);
+ vl_bitstream_put_bits(&enc, 3, pps->num_extra_slice_header_bits);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.sign_data_hiding_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.cabac_init_present_flag);
+
+ vl_bitstream_exp_golomb_ue(&enc, pps->num_ref_idx_l0_default_active_minus1);
+ vl_bitstream_exp_golomb_ue(&enc, pps->num_ref_idx_l1_default_active_minus1);
+
+ vl_bitstream_exp_golomb_se(&enc, pps->init_qp_minus26);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.constrained_intra_pred_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.transform_skip_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.cu_qp_delta_enabled_flag);
+
+ if (pps->flags.cu_qp_delta_enabled_flag)
+ vl_bitstream_exp_golomb_ue(&enc, pps->diff_cu_qp_delta_depth);
+
+ vl_bitstream_exp_golomb_se(&enc, pps->pps_cb_qp_offset);
+ vl_bitstream_exp_golomb_se(&enc, pps->pps_cr_qp_offset);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.pps_slice_chroma_qp_offsets_present_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.weighted_pred_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.weighted_bipred_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.transquant_bypass_enabled_flag);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.tiles_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.entropy_coding_sync_enabled_flag);
+
+ assert (!pps->flags.tiles_enabled_flag);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.pps_loop_filter_across_slices_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.deblocking_filter_control_present_flag);
+
+ if (pps->flags.deblocking_filter_control_present_flag) {
+ vl_bitstream_put_bits(&enc, 1, pps->flags.deblocking_filter_override_enabled_flag);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.pps_deblocking_filter_disabled_flag);
+ if (!pps->flags.pps_deblocking_filter_disabled_flag) {
+ vl_bitstream_exp_golomb_se(&enc, pps->pps_beta_offset_div2);
+ vl_bitstream_exp_golomb_se(&enc, pps->pps_tc_offset_div2);
+ }
+ }
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.pps_scaling_list_data_present_flag);
+ assert (!pps->flags.pps_scaling_list_data_present_flag);
+
+ vl_bitstream_put_bits(&enc, 1, pps->flags.lists_modification_present_flag);
+ vl_bitstream_exp_golomb_ue(&enc, pps->log2_parallel_merge_level_minus2);
+ vl_bitstream_put_bits(&enc, 1, pps->flags.slice_segment_header_extension_present_flag);
+
+ vl_bitstream_put_bits(&enc, 1, 0); /* pps extension flag */
+ vl_bitstream_rbsp_trailing(&enc);
+
+ vl_bitstream_flush(&enc);
+ *data_size_ptr += vl_bitstream_get_byte_count(&enc);
+ vl_bitstream_encoder_free(&enc);
+}
diff --git a/src/vulkan/runtime/vk_video.h b/src/vulkan/runtime/vk_video.h
new file mode 100644
index 00000000000..8eb8814a81c
--- /dev/null
+++ b/src/vulkan/runtime/vk_video.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright © 2021 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_VIDEO_H
+#define VK_VIDEO_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_video_h264_sps {
+ StdVideoH264SequenceParameterSet base;
+ int32_t offsets_for_ref_frame[256];
+ StdVideoH264ScalingLists scaling_lists;
+ StdVideoH264SequenceParameterSetVui vui;
+ StdVideoH264HrdParameters vui_hrd_parameters;
+};
+
+struct vk_video_h264_pps {
+ StdVideoH264PictureParameterSet base;
+ StdVideoH264ScalingLists scaling_lists;
+};
+
+struct vk_video_h265_vps {
+ StdVideoH265VideoParameterSet base;
+ StdVideoH265DecPicBufMgr dec_pic_buf_mgr;
+ StdVideoH265SubLayerHrdParameters hrd_parameters_nal;
+ StdVideoH265SubLayerHrdParameters hrd_parameters_vcl;
+ StdVideoH265HrdParameters hrd_parameters;
+ StdVideoH265ProfileTierLevel tier_level;
+};
+
+struct vk_video_h265_sps {
+ StdVideoH265SequenceParameterSet base;
+ StdVideoH265ProfileTierLevel tier_level;
+ StdVideoH265DecPicBufMgr dec_pic_buf_mgr;
+ StdVideoH265ScalingLists scaling_lists;
+ StdVideoH265ShortTermRefPicSet short_term_ref_pic_set;
+ StdVideoH265LongTermRefPicsSps long_term_ref_pics_sps;
+ StdVideoH265SubLayerHrdParameters hrd_parameters_nal;
+ StdVideoH265SubLayerHrdParameters hrd_parameters_vcl;
+ StdVideoH265HrdParameters hrd_parameters;
+ StdVideoH265SequenceParameterSetVui vui;
+ StdVideoH265PredictorPaletteEntries palette_entries;
+};
+
+struct vk_video_h265_pps {
+ StdVideoH265PictureParameterSet base;
+ StdVideoH265ScalingLists scaling_lists;
+ StdVideoH265PredictorPaletteEntries palette_entries;
+};
+
+struct vk_video_av1_seq_hdr {
+ StdVideoAV1SequenceHeader base;
+ StdVideoAV1ColorConfig color_config;
+ StdVideoAV1TimingInfo timing_info;
+};
+
+struct vk_video_session {
+ struct vk_object_base base;
+ VkVideoSessionCreateFlagsKHR flags;
+ VkVideoCodecOperationFlagsKHR op;
+ VkExtent2D max_coded;
+ VkFormat picture_format;
+ VkFormat ref_format;
+ uint32_t max_dpb_slots;
+ uint32_t max_active_ref_pics;
+
+ struct {
+ VkVideoEncodeUsageFlagsKHR video_usage_hints;
+ VkVideoEncodeContentFlagsKHR video_content_hints;
+ VkVideoEncodeTuningModeKHR tuning_mode;
+ } enc_usage;
+ union {
+ struct {
+ StdVideoH264ProfileIdc profile_idc;
+ } h264;
+ struct {
+ StdVideoH265ProfileIdc profile_idc;
+ } h265;
+ struct {
+ StdVideoAV1Profile profile;
+ int film_grain_support;
+ } av1;
+ };
+};
+
+struct vk_video_session_parameters {
+ struct vk_object_base base;
+ VkVideoCodecOperationFlagsKHR op;
+ union {
+ struct {
+ uint32_t max_h264_sps_count;
+ uint32_t max_h264_pps_count;
+
+ uint32_t h264_sps_count;
+ struct vk_video_h264_sps *h264_sps;
+ uint32_t h264_pps_count;
+ struct vk_video_h264_pps *h264_pps;
+ } h264_dec;
+
+ struct {
+ uint32_t max_h265_vps_count;
+ uint32_t max_h265_sps_count;
+ uint32_t max_h265_pps_count;
+
+ uint32_t h265_vps_count;
+ struct vk_video_h265_vps *h265_vps;
+ uint32_t h265_sps_count;
+ struct vk_video_h265_sps *h265_sps;
+ uint32_t h265_pps_count;
+ struct vk_video_h265_pps *h265_pps;
+ } h265_dec;
+
+ struct {
+ struct vk_video_av1_seq_hdr seq_hdr;
+ } av1_dec;
+
+ struct {
+ uint32_t max_h264_sps_count;
+ uint32_t max_h264_pps_count;
+
+ uint32_t h264_sps_count;
+ struct vk_video_h264_sps *h264_sps;
+ uint32_t h264_pps_count;
+ struct vk_video_h264_pps *h264_pps;
+ StdVideoH264ProfileIdc profile_idc;
+ } h264_enc;
+
+ struct {
+ uint32_t max_h265_vps_count;
+ uint32_t max_h265_sps_count;
+ uint32_t max_h265_pps_count;
+
+ uint32_t h265_vps_count;
+ struct vk_video_h265_vps *h265_vps;
+ uint32_t h265_sps_count;
+ struct vk_video_h265_sps *h265_sps;
+ uint32_t h265_pps_count;
+ struct vk_video_h265_pps *h265_pps;
+ } h265_enc;
+ };
+};
+
+VkResult vk_video_session_init(struct vk_device *device,
+ struct vk_video_session *vid,
+ const VkVideoSessionCreateInfoKHR *create_info);
+
+VkResult vk_video_session_parameters_init(struct vk_device *device,
+ struct vk_video_session_parameters *params,
+ const struct vk_video_session *vid,
+ const struct vk_video_session_parameters *templ,
+ const VkVideoSessionParametersCreateInfoKHR *create_info);
+
+VkResult vk_video_session_parameters_update(struct vk_video_session_parameters *params,
+ const VkVideoSessionParametersUpdateInfoKHR *update);
+
+void vk_video_session_parameters_finish(struct vk_device *device,
+ struct vk_video_session_parameters *params);
+
+void vk_video_derive_h264_scaling_list(const StdVideoH264SequenceParameterSet *sps,
+ const StdVideoH264PictureParameterSet *pps,
+ StdVideoH264ScalingLists *list);
+
+const StdVideoH264SequenceParameterSet *
+vk_video_find_h264_dec_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH264PictureParameterSet *
+vk_video_find_h264_dec_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH265VideoParameterSet *
+vk_video_find_h265_dec_std_vps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH265SequenceParameterSet *
+vk_video_find_h265_dec_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH265PictureParameterSet *
+vk_video_find_h265_dec_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+
+struct vk_video_h265_slice_params {
+ uint32_t slice_size;
+
+ uint8_t first_slice_segment_in_pic_flag;
+ StdVideoH265SliceType slice_type;
+ uint8_t dependent_slice_segment;
+ uint8_t temporal_mvp_enable;
+ uint8_t loop_filter_across_slices_enable;
+ int32_t pic_order_cnt_lsb;
+ uint8_t sao_luma_flag;
+ uint8_t sao_chroma_flag;
+ uint8_t collocated_list;
+ uint32_t collocated_ref_idx;
+ uint8_t mvd_l1_zero_flag;
+
+ uint8_t num_ref_idx_l0_active;
+ uint8_t num_ref_idx_l1_active;
+ uint8_t rpl_modification_flag[2];
+ uint8_t cabac_init_idc;
+ int8_t slice_qp_delta;
+ int8_t slice_cb_qp_offset;
+ int8_t slice_cr_qp_offset;
+ int8_t max_num_merge_cand;
+ uint32_t slice_data_bytes_offset;
+ uint8_t disable_deblocking_filter_idc;
+ int8_t tc_offset_div2;
+ int8_t beta_offset_div2;
+ uint32_t slice_segment_address;
+
+ uint8_t luma_log2_weight_denom;
+ uint8_t chroma_log2_weight_denom;
+ uint8_t luma_weight_l0_flag[16];
+ int16_t luma_weight_l0[16];
+ int16_t luma_offset_l0[16];
+ uint8_t chroma_weight_l0_flag[16];
+ int16_t chroma_weight_l0[16][2];
+ int16_t chroma_offset_l0[16][2];
+ uint8_t luma_weight_l1_flag[16];
+ int16_t luma_weight_l1[16];
+ int16_t luma_offset_l1[16];
+ uint8_t chroma_weight_l1_flag[16];
+ int16_t chroma_weight_l1[16][2];
+ int16_t chroma_offset_l1[16][2];
+
+ int8_t delta_luma_weight_l0[16];
+ int8_t delta_luma_weight_l1[16];
+ int8_t delta_chroma_weight_l0[16][2];
+ int8_t delta_chroma_weight_l1[16][2];
+ int16_t delta_chroma_offset_l0[16][2];
+ int16_t delta_chroma_offset_l1[16][2];
+};
+
+void
+vk_video_parse_h265_slice_header(const struct VkVideoDecodeInfoKHR *frame_info,
+ const VkVideoDecodeH265PictureInfoKHR *pic_info,
+ const StdVideoH265SequenceParameterSet *sps,
+ const StdVideoH265PictureParameterSet *pps,
+ void *slice_data,
+ uint32_t slice_size,
+ struct vk_video_h265_slice_params *params);
+
+
+struct vk_video_h265_reference {
+ const VkVideoPictureResourceInfoKHR *pPictureResource;
+ StdVideoDecodeH265ReferenceInfoFlags flags;
+ uint32_t slot_index;
+ int32_t pic_order_cnt;
+};
+
+int vk_video_h265_poc_by_slot(const struct VkVideoDecodeInfoKHR *frame_info, int slot);
+
+void vk_fill_video_h265_reference_info(const VkVideoDecodeInfoKHR *frame_info,
+ const struct VkVideoDecodeH265PictureInfoKHR *pic,
+ const struct vk_video_h265_slice_params *slice_params,
+ struct vk_video_h265_reference ref_slots[][8]);
+
+#define VK_VIDEO_H264_MACROBLOCK_WIDTH 16
+#define VK_VIDEO_H264_MACROBLOCK_HEIGHT 16
+
+#define VK_VIDEO_H265_CTU_MAX_WIDTH 64
+#define VK_VIDEO_H265_CTU_MAX_HEIGHT 64
+
+#define VK_VIDEO_AV1_BLOCK_WIDTH 128
+#define VK_VIDEO_AV1_BLOCK_HEIGHT 128
+
+void
+vk_video_get_profile_alignments(const VkVideoProfileListInfoKHR *profile_list,
+ uint32_t *width_align_out, uint32_t *height_align_out);
+
+uint8_t
+vk_video_get_h264_level(StdVideoH264LevelIdc level);
+
+const StdVideoH264SequenceParameterSet *
+vk_video_find_h264_enc_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH264PictureParameterSet *
+vk_video_find_h264_enc_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+
+const StdVideoH265VideoParameterSet *
+vk_video_find_h265_enc_std_vps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH265SequenceParameterSet *
+vk_video_find_h265_enc_std_sps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+const StdVideoH265PictureParameterSet *
+vk_video_find_h265_enc_std_pps(const struct vk_video_session_parameters *params,
+ uint32_t id);
+
+void
+vk_video_encode_h264_sps(const StdVideoH264SequenceParameterSet *sps,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr);
+
+void
+vk_video_encode_h264_pps(const StdVideoH264PictureParameterSet *pps,
+ bool high_profile,
+ size_t size_limit,
+ size_t *data_size_ptr,
+ void *data_ptr);
+
+unsigned
+vk_video_get_h265_nal_unit(const StdVideoEncodeH265PictureInfo *pic_info);
+
+void
+vk_video_encode_h265_vps(const StdVideoH265VideoParameterSet *vps,
+ size_t size_limit,
+ size_t *data_size,
+ void *data_ptr);
+void
+vk_video_encode_h265_sps(const StdVideoH265SequenceParameterSet *sps,
+ size_t size_limit,
+ size_t* pDataSize,
+ void* pData);
+
+void
+vk_video_encode_h265_pps(const StdVideoH265PictureParameterSet *pps,
+ size_t size_limit,
+ size_t *data_size,
+ void *data_ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/vulkan/runtime/vk_ycbcr_conversion.c b/src/vulkan/runtime/vk_ycbcr_conversion.c
new file mode 100644
index 00000000000..9c1da39c357
--- /dev/null
+++ b/src/vulkan/runtime/vk_ycbcr_conversion.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vk_ycbcr_conversion.h"
+
+#include <vulkan/vulkan_android.h>
+
+#include "vk_common_entrypoints.h"
+#include "vk_device.h"
+#include "vk_format.h"
+#include "vk_util.h"
+
+VKAPI_ATTR VkResult VKAPI_CALL
+vk_common_CreateSamplerYcbcrConversion(VkDevice _device,
+ const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSamplerYcbcrConversion *pYcbcrConversion)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_ycbcr_conversion *conversion;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO);
+
+ conversion = vk_object_zalloc(device, pAllocator, sizeof(*conversion),
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
+ if (!conversion)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ struct vk_ycbcr_conversion_state *state = &conversion->state;
+
+ state->format = pCreateInfo->format;
+ state->ycbcr_model = pCreateInfo->ycbcrModel;
+ state->ycbcr_range = pCreateInfo->ycbcrRange;
+
+ /* Search for VkExternalFormatANDROID and resolve the format. */
+ const VkExternalFormatANDROID *android_ext_info =
+ vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_FORMAT_ANDROID);
+
+ /* We assume that Android externalFormat is just a VkFormat */
+ if (android_ext_info && android_ext_info->externalFormat) {
+ assert(pCreateInfo->format == VK_FORMAT_UNDEFINED);
+ state->format = android_ext_info->externalFormat;
+ } else {
+ /* The Vulkan 1.1.95 spec says:
+ *
+ * "When creating an external format conversion, the value of
+ * components if ignored."
+ */
+ state->mapping[0] = pCreateInfo->components.r;
+ state->mapping[1] = pCreateInfo->components.g;
+ state->mapping[2] = pCreateInfo->components.b;
+ state->mapping[3] = pCreateInfo->components.a;
+ }
+
+ state->chroma_offsets[0] = pCreateInfo->xChromaOffset;
+ state->chroma_offsets[1] = pCreateInfo->yChromaOffset;
+ state->chroma_filter = pCreateInfo->chromaFilter;
+
+ const struct vk_format_ycbcr_info *ycbcr_info =
+ vk_format_get_ycbcr_info(state->format);
+
+ bool has_chroma_subsampled = false;
+ if (ycbcr_info) {
+ for (uint32_t p = 0; p < ycbcr_info->n_planes; p++) {
+ if (ycbcr_info->planes[p].has_chroma &&
+ (ycbcr_info->planes[p].denominator_scales[0] > 1 ||
+ ycbcr_info->planes[p].denominator_scales[1] > 1))
+ has_chroma_subsampled = true;
+ }
+ }
+ state->chroma_reconstruction = has_chroma_subsampled &&
+ (state->chroma_offsets[0] == VK_CHROMA_LOCATION_COSITED_EVEN ||
+ state->chroma_offsets[1] == VK_CHROMA_LOCATION_COSITED_EVEN);
+
+ *pYcbcrConversion = vk_ycbcr_conversion_to_handle(conversion);
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_DestroySamplerYcbcrConversion(VkDevice _device,
+ VkSamplerYcbcrConversion YcbcrConversion,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ VK_FROM_HANDLE(vk_ycbcr_conversion, conversion, YcbcrConversion);
+
+ if (!conversion)
+ return;
+
+ vk_object_free(device, pAllocator, conversion);
+}
diff --git a/src/vulkan/runtime/vk_ycbcr_conversion.h b/src/vulkan/runtime/vk_ycbcr_conversion.h
new file mode 100644
index 00000000000..cc4ed3eb22b
--- /dev/null
+++ b/src/vulkan/runtime/vk_ycbcr_conversion.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef VK_YCBCR_CONVERSION_H
+#define VK_YCBCR_CONVERSION_H
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_ycbcr_conversion_state {
+ VkFormat format;
+ VkSamplerYcbcrModelConversion ycbcr_model;
+ VkSamplerYcbcrRange ycbcr_range;
+ VkComponentSwizzle mapping[4];
+ VkChromaLocation chroma_offsets[2];
+ VkFilter chroma_filter;
+ bool chroma_reconstruction;
+};
+
+struct vk_ycbcr_conversion {
+ struct vk_object_base base;
+ struct vk_ycbcr_conversion_state state;
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(vk_ycbcr_conversion, base,
+ VkSamplerYcbcrConversion,
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_YCBCR_CONVERSION_H */
diff --git a/src/vulkan/util/gen_enum_to_str.py b/src/vulkan/util/gen_enum_to_str.py
index 600a79fd2d8..849364ace39 100644
--- a/src/vulkan/util/gen_enum_to_str.py
+++ b/src/vulkan/util/gen_enum_to_str.py
@@ -1,4 +1,3 @@
-# encoding=utf-8
# Copyright © 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -22,11 +21,14 @@
"""Create enum to string functions for vulkan using vk.xml."""
import argparse
+import functools
import os
+import re
import textwrap
import xml.etree.ElementTree as et
from mako.template import Template
+from vk_extensions import Extension, filter_api, get_all_required
COPYRIGHT = textwrap.dedent(u"""\
* Copyright © 2017 Intel Corporation
@@ -57,7 +59,7 @@ C_TEMPLATE = Template(textwrap.dedent(u"""\
*/
#include <string.h>
- #include <vulkan/vulkan.h>
+ #include <vulkan/vulkan_core.h>
#include <vulkan/vk_android_native_buffer.h>
#include <vulkan/vk_layer.h>
#include "util/macros.h"
@@ -76,8 +78,32 @@ C_TEMPLATE = Template(textwrap.dedent(u"""\
case ${v}:
return "${enum.values[v]}";
% endfor
+ case ${enum.max_enum_name}: return "${enum.max_enum_name}";
default:
- unreachable("Undefined enum value.");
+ return "Unknown ${enum.name} value.";
+ }
+ }
+
+ % if enum.guard:
+#endif
+ % endif
+ %endfor
+
+ % for enum in bitmasks:
+
+ % if enum.guard:
+#ifdef ${enum.guard}
+ % endif
+ const char *
+ vk_${enum.name[2:]}_to_str(${enum.name} input)
+ {
+ switch((int64_t)input) {
+ % for v in sorted(enum.values.keys()):
+ case ${v}:
+ return "${enum.values[v]}";
+ % endfor
+ default:
+ return "Unknown ${enum.name} value.";
}
}
@@ -104,6 +130,19 @@ C_TEMPLATE = Template(textwrap.dedent(u"""\
unreachable("Undefined struct type.");
}
}
+
+ const char *
+ vk_ObjectType_to_ObjectName(VkObjectType type)
+ {
+ switch((int)type) {
+ % for object_type in sorted(object_types[0].enum_to_name.keys()):
+ case ${object_type}:
+ return "${object_types[0].enum_to_name[object_type]}";
+ % endfor
+ default:
+ return "Unknown VkObjectType value.";
+ }
+ }
"""))
H_TEMPLATE = Template(textwrap.dedent(u"""\
@@ -123,11 +162,17 @@ H_TEMPLATE = Template(textwrap.dedent(u"""\
extern "C" {
#endif
- % for ext in extensions:
- #define _${ext.name}_number (${ext.number})
+ % for enum in enums:
+ % if enum.guard:
+#ifdef ${enum.guard}
+ % endif
+ const char * vk_${enum.name[2:]}_to_str(${enum.name} input);
+ % if enum.guard:
+#endif
+ % endif
% endfor
- % for enum in enums:
+ % for enum in bitmasks:
% if enum.guard:
#ifdef ${enum.guard}
% endif
@@ -139,6 +184,71 @@ H_TEMPLATE = Template(textwrap.dedent(u"""\
size_t vk_structure_type_size(const struct VkBaseInStructure *item);
+ const char * vk_ObjectType_to_ObjectName(VkObjectType type);
+
+ #ifdef __cplusplus
+ } /* extern "C" */
+ #endif
+
+ #endif"""))
+
+
+H_DEFINE_TEMPLATE = Template(textwrap.dedent(u"""\
+ /* Autogenerated file -- do not edit
+ * generated by ${file}
+ *
+ ${copyright}
+ */
+
+ #ifndef MESA_VK_ENUM_DEFINES_H
+ #define MESA_VK_ENUM_DEFINES_H
+
+ #include <vulkan/vulkan_core.h>
+ #include <vulkan/vk_android_native_buffer.h>
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+
+ % for ext in extensions:
+ #define _${ext.name}_number (${ext.number})
+ % endfor
+
+ % for enum in bitmasks:
+ % if enum.bitwidth > 32:
+ <% continue %>
+ % endif
+ % if enum.guard:
+#ifdef ${enum.guard}
+ % endif
+ #define ${enum.all_bits_name()} ${hex(enum.all_bits_value())}u
+ % if enum.guard:
+#endif
+ % endif
+ % endfor
+
+ % for enum in bitmasks:
+ % if enum.bitwidth < 64:
+ <% continue %>
+ % endif
+ /* Redefine bitmask values of ${enum.name} */
+ % if enum.guard:
+#ifdef ${enum.guard}
+ % endif
+ % for n, v in enum.name_to_value.items():
+ #define ${n} (${hex(v)}ULL)
+ % endfor
+ % if enum.guard:
+#endif
+ % endif
+ % endfor
+
+ static inline VkFormatFeatureFlags
+ vk_format_features2_to_features(VkFormatFeatureFlags2 features2)
+ {
+ return features2 & VK_ALL_FORMAT_FEATURE_FLAG_BITS;
+ }
+
#ifdef __cplusplus
} /* extern "C" */
#endif
@@ -173,11 +283,30 @@ class VkExtension(object):
self.define = define
+def CamelCase_to_SHOUT_CASE(s):
+ return (s[:1] + re.sub(r'(?<![A-Z])([A-Z])', r'_\1', s[1:])).upper()
+
+def compute_max_enum_name(s):
+ if s == "VkSwapchainImageUsageFlagBitsANDROID":
+ return "VK_SWAPCHAIN_IMAGE_USAGE_FLAG_BITS_MAX_ENUM"
+ max_enum_name = CamelCase_to_SHOUT_CASE(s)
+ last_prefix = max_enum_name.rsplit('_', 1)[-1]
+ # Those special prefixes need to be always at the end
+ if last_prefix in ['AMD', 'EXT', 'INTEL', 'KHR', 'NV', 'LUNARG', 'QCOM', 'MSFT'] :
+ max_enum_name = "_".join(max_enum_name.split('_')[:-1])
+ max_enum_name = max_enum_name + "_MAX_ENUM_" + last_prefix
+ else:
+ max_enum_name = max_enum_name + "_MAX_ENUM"
+
+ return max_enum_name
+
class VkEnum(object):
"""Simple struct-like class representing a single Vulkan Enum."""
- def __init__(self, name, values=None):
+ def __init__(self, name, bitwidth=32, values=None):
self.name = name
+ self.max_enum_name = compute_max_enum_name(name)
+ self.bitwidth = bitwidth
self.extension = None
# Maps numbers to names
self.values = values or dict()
@@ -185,6 +314,15 @@ class VkEnum(object):
self.guard = None
self.name_to_alias_list = {}
+ def all_bits_name(self):
+ assert self.name.startswith('Vk')
+ assert re.search(r'FlagBits[A-Z]*$', self.name)
+
+ return 'VK_ALL_' + CamelCase_to_SHOUT_CASE(self.name[2:])
+
+ def all_bits_value(self):
+ return functools.reduce(lambda a,b: a | b, self.values.keys(), 0)
+
def add_value(self, name, value=None,
extnum=None, offset=None, alias=None,
error=False):
@@ -193,7 +331,7 @@ class VkEnum(object):
if alias not in self.name_to_value:
# We don't have this alias yet. Just record the alias and
# we'll deal with it later.
- alias_list = self.name_to_alias_list.get(alias, [])
+ alias_list = self.name_to_alias_list.setdefault(alias, [])
alias_list.append(name);
return
@@ -215,7 +353,7 @@ class VkEnum(object):
# Now that the value has been fully added, resolve aliases, if any.
if name in self.name_to_alias_list:
for alias in self.name_to_alias_list[name]:
- add_value(alias, value)
+ self.add_value(alias, value)
del self.name_to_alias_list[name]
def add_value_from_xml(self, elem, extension=None):
@@ -223,6 +361,9 @@ class VkEnum(object):
if 'value' in elem.attrib:
self.add_value(elem.attrib['name'],
value=int(elem.attrib['value'], base=0))
+ elif 'bitpos' in elem.attrib:
+ self.add_value(elem.attrib['name'],
+ value=(1 << int(elem.attrib['bitpos'], base=0)))
elif 'alias' in elem.attrib:
self.add_value(elem.attrib['name'], alias=elem.attrib['alias'])
else:
@@ -240,15 +381,6 @@ class VkEnum(object):
self.guard = g
-class VkCommand(object):
- """Simple struct-like class representing a single Vulkan command"""
-
- def __init__(self, name, device_entrypoint=False):
- self.name = name
- self.device_entrypoint = device_entrypoint
- self.extension = None
-
-
class VkChainStruct(object):
"""Simple struct-like class representing a single Vulkan struct identified with a VkStructureType"""
def __init__(self, name, stype):
@@ -264,8 +396,15 @@ def struct_get_stype(xml_node):
return member.get('values')
return None
+class VkObjectType(object):
+ """Simple struct-like class representing a single Vulkan object type"""
+ def __init__(self, name):
+ self.name = name
+ self.enum_to_name = dict()
+
-def parse_xml(enum_factory, ext_factory, struct_factory, filename):
+def parse_xml(enum_factory, ext_factory, struct_factory, bitmask_factory,
+ obj_type_factory, filename, beta):
"""Parse the XML file. Accumulate results into the factories.
This parser is a memory efficient iterative XML parser that returns a list
@@ -273,19 +412,59 @@ def parse_xml(enum_factory, ext_factory, struct_factory, filename):
"""
xml = et.parse(filename)
+ api = 'vulkan'
+
+ required_types = get_all_required(xml, 'type', api, beta)
for enum_type in xml.findall('./enums[@type="enum"]'):
- enum = enum_factory(enum_type.attrib['name'])
+ if not filter_api(enum_type, api):
+ continue
+
+ type_name = enum_type.attrib['name']
+ if not type_name in required_types:
+ continue
+
+ enum = enum_factory(type_name)
+ for value in enum_type.findall('./enum'):
+ if filter_api(value, api):
+ enum.add_value_from_xml(value)
+
+ # For bitmask we only add the Enum selected for convenience.
+ for enum_type in xml.findall('./enums[@type="bitmask"]'):
+ if not filter_api(enum_type, api):
+ continue
+
+ type_name = enum_type.attrib['name']
+ if not type_name in required_types:
+ continue
+
+ bitwidth = int(enum_type.attrib.get('bitwidth', 32))
+ enum = bitmask_factory(type_name, bitwidth=bitwidth)
for value in enum_type.findall('./enum'):
- enum.add_value_from_xml(value)
+ if filter_api(value, api):
+ enum.add_value_from_xml(value)
- for value in xml.findall('./feature/require/enum[@extends]'):
- enum = enum_factory.get(value.attrib['extends'])
- if enum is not None:
- enum.add_value_from_xml(value)
+ for feature in xml.findall('./feature'):
+ if not api in feature.attrib['api'].split(','):
+ continue
+
+ for value in feature.findall('./require/enum[@extends]'):
+ extends = value.attrib['extends']
+ enum = enum_factory.get(extends)
+ if enum is not None:
+ enum.add_value_from_xml(value)
+ enum = bitmask_factory.get(extends)
+ if enum is not None:
+ enum.add_value_from_xml(value)
for struct_type in xml.findall('./types/type[@category="struct"]'):
+ if not filter_api(struct_type, api):
+ continue
+
name = struct_type.attrib['name']
+ if name not in required_types:
+ continue
+
stype = struct_get_stype(struct_type)
if stype is not None:
struct_factory(name, stype=stype)
@@ -296,32 +475,61 @@ def parse_xml(enum_factory, ext_factory, struct_factory, filename):
define = platform.attrib['protect']
platform_define[name] = define
- for ext_elem in xml.findall('./extensions/extension[@supported="vulkan"]'):
- define = None
- if "platform" in ext_elem.attrib:
- define = platform_define[ext_elem.attrib['platform']]
- extension = ext_factory(ext_elem.attrib['name'],
- number=int(ext_elem.attrib['number']),
- define=define)
+ for ext_elem in xml.findall('./extensions/extension'):
+ ext = Extension.from_xml(ext_elem)
+ if api not in ext.supported:
+ continue
- for value in ext_elem.findall('./require/enum[@extends]'):
- enum = enum_factory.get(value.attrib['extends'])
- if enum is not None:
- enum.add_value_from_xml(value, extension)
- for t in ext_elem.findall('./require/type'):
- struct = struct_factory.get(t.attrib['name'])
- if struct is not None:
- struct.extension = extension
+ define = platform_define.get(ext.platform, None)
+ extension = ext_factory(ext.name, number=ext.number, define=define)
+
+ for req_elem in ext_elem.findall('./require'):
+ if not filter_api(req_elem, api):
+ continue
+
+ for value in req_elem.findall('./enum[@extends]'):
+ extends = value.attrib['extends']
+ enum = enum_factory.get(extends)
+ if enum is not None:
+ enum.add_value_from_xml(value, extension)
+ enum = bitmask_factory.get(extends)
+ if enum is not None:
+ enum.add_value_from_xml(value, extension)
+
+ for t in req_elem.findall('./type'):
+ struct = struct_factory.get(t.attrib['name'])
+ if struct is not None:
+ struct.extension = extension
if define:
for value in ext_elem.findall('./require/type[@name]'):
enum = enum_factory.get(value.attrib['name'])
if enum is not None:
enum.set_guard(define)
+ enum = bitmask_factory.get(value.attrib['name'])
+ if enum is not None:
+ enum.set_guard(define)
+
+ obj_type_enum = enum_factory.get("VkObjectType")
+ obj_types = obj_type_factory("VkObjectType")
+ for object_type in xml.findall('./types/type[@category="handle"]'):
+ for object_name in object_type.findall('./name'):
+ # Convert to int to avoid undefined enums
+ enum = object_type.attrib['objtypeenum']
+
+ # Annoyingly, object types are hard to filter by API so just
+ # look for whether or not we can find the enum name in the
+ # VkObjectType enum.
+ if enum not in obj_type_enum.name_to_value:
+ continue
+
+ enum_val = obj_type_enum.name_to_value[enum]
+ obj_types.enum_to_name[enum_val] = object_name.text
def main():
parser = argparse.ArgumentParser()
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
parser.add_argument('--xml', required=True,
help='Vulkan API XML files',
action='append',
@@ -335,20 +543,29 @@ def main():
enum_factory = NamedFactory(VkEnum)
ext_factory = NamedFactory(VkExtension)
struct_factory = NamedFactory(VkChainStruct)
+ obj_type_factory = NamedFactory(VkObjectType)
+ bitmask_factory = NamedFactory(VkEnum)
+
for filename in args.xml_files:
- parse_xml(enum_factory, ext_factory, struct_factory, filename)
+ parse_xml(enum_factory, ext_factory, struct_factory, bitmask_factory,
+ obj_type_factory, filename, args.beta)
enums = sorted(enum_factory.registry.values(), key=lambda e: e.name)
extensions = sorted(ext_factory.registry.values(), key=lambda e: e.name)
structs = sorted(struct_factory.registry.values(), key=lambda e: e.name)
+ bitmasks = sorted(bitmask_factory.registry.values(), key=lambda e: e.name)
+ object_types = sorted(obj_type_factory.registry.values(), key=lambda e: e.name)
for template, file_ in [(C_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.c')),
- (H_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.h'))]:
- with open(file_, 'w') as f:
+ (H_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.h')),
+ (H_DEFINE_TEMPLATE, os.path.join(args.outdir, 'vk_enum_defines.h'))]:
+ with open(file_, 'w', encoding='utf-8') as f:
f.write(template.render(
file=os.path.basename(__file__),
enums=enums,
extensions=extensions,
structs=structs,
+ bitmasks=bitmasks,
+ object_types=object_types,
copyright=COPYRIGHT))
diff --git a/src/vulkan/util/meson.build b/src/vulkan/util/meson.build
index cd87a69cbf3..3861501efc1 100644
--- a/src/vulkan/util/meson.build
+++ b/src/vulkan/util/meson.build
@@ -22,69 +22,53 @@
# dependency tracking.
vk_extensions_depend_files = [
]
-vk_extensions_gen_depend_files = [
+vk_entrypoints_depend_files = [
files('vk_extensions.py'),
vk_extensions_depend_files,
]
-vk_dispatch_table_gen_depend_files = [
+vk_extensions_gen_depend_files = [
files('vk_extensions.py'),
vk_extensions_depend_files,
]
+vk_dispatch_table_gen_depend_files = [
+ files('vk_entrypoints.py'),
+ vk_entrypoints_depend_files,
+]
+vk_dispatch_trampolines_gen_depend_files = [
+ files('vk_entrypoints.py'),
+ vk_entrypoints_depend_files,
+]
vk_entrypoints_gen_depend_files = [
- files('vk_dispatch_table_gen.py'),
- vk_dispatch_table_gen_depend_files,
+ files('vk_entrypoints.py'),
+ vk_entrypoints_depend_files,
]
vk_cmd_queue_gen_depend_files = [
- files('vk_dispatch_table_gen.py'),
- vk_dispatch_table_gen_depend_files,
+ files('vk_entrypoints.py'),
+ vk_entrypoints_depend_files,
+]
+vk_physical_device_features_gen_depend_files = [
+ files('vk_extensions.py'),
+]
+vk_physical_device_properties_gen_depend_files = [
+ files('vk_extensions.py'),
]
-vk_commands_gen_depend_files = [
- files('vk_dispatch_table_gen.py'),
- vk_dispatch_table_gen_depend_files,
+vk_synchronization_helpers_gen_depend_files = [
+ files('vk_extensions.py'),
]
vk_entrypoints_gen = files('vk_entrypoints_gen.py')
vk_extensions_gen = files('vk_extensions_gen.py')
vk_icd_gen = files('vk_icd_gen.py')
-vk_commands_gen = files('vk_commands_gen.py')
+vk_cmd_queue_gen = files('vk_cmd_queue_gen.py')
+vk_dispatch_trampolines_gen = files('vk_dispatch_trampolines_gen.py')
+vk_physical_device_features_gen = files('vk_physical_device_features_gen.py')
+vk_physical_device_properties_gen = files('vk_physical_device_properties_gen.py')
+vk_synchronization_helpers_gen = files('vk_synchronization_helpers_gen.py')
files_vulkan_util = files(
'vk_alloc.c',
- 'vk_alloc.h',
- 'vk_cmd_copy.c',
- 'vk_debug_report.c',
- 'vk_debug_report.h',
- 'vk_deferred_operation.c',
- 'vk_deferred_operation.h',
- 'vk_descriptors.c',
- 'vk_descriptors.h',
- 'vk_device.c',
- 'vk_device.h',
'vk_format.c',
- 'vk_image.c',
- 'vk_image.h',
- 'vk_instance.c',
- 'vk_instance.h',
- 'vk_object.c',
- 'vk_object.h',
- 'vk_physical_device.c',
- 'vk_physical_device.h',
- 'vk_render_pass.c',
- 'vk_shader_module.c',
- 'vk_shader_module.h',
'vk_util.c',
- 'vk_util.h',
-)
-
-vk_common_entrypoints = custom_target(
- 'vk_common_entrypoints',
- input : [vk_entrypoints_gen, vk_api_xml],
- output : ['vk_common_entrypoints.h', 'vk_common_entrypoints.c'],
- command : [
- prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--proto', '--weak',
- '--out-h', '@OUTPUT0@', '--out-c', '@OUTPUT1@', '--prefix', 'vk_common',
- ],
- depend_files : vk_entrypoints_gen_depend_files,
)
vk_dispatch_table = custom_target(
@@ -93,7 +77,8 @@ vk_dispatch_table = custom_target(
output : ['vk_dispatch_table.c', 'vk_dispatch_table.h'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@',
- '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@'
+ '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@',
+ '--beta', with_vulkan_beta.to_string()
],
depend_files : vk_dispatch_table_gen_depend_files,
)
@@ -101,66 +86,54 @@ vk_dispatch_table = custom_target(
vk_enum_to_str = custom_target(
'vk_enum_to_str',
input : ['gen_enum_to_str.py', vk_api_xml],
- output : ['vk_enum_to_str.c', 'vk_enum_to_str.h'],
+ output : ['vk_enum_to_str.c', 'vk_enum_to_str.h', 'vk_enum_defines.h'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@',
- '--outdir', meson.current_build_dir()
+ '--outdir', meson.current_build_dir(),
+ '--beta', with_vulkan_beta.to_string()
],
)
-vk_extensions = custom_target(
- 'vk_extensions',
- input : ['vk_extensions_gen.py', vk_api_xml],
- output : ['vk_extensions.c', 'vk_extensions.h'],
+vk_struct_type_cast = custom_target(
+ 'vk_struct_type_cast',
+ input : ['vk_struct_type_cast_gen.py', vk_api_xml],
+ output : ['vk_struct_type_cast.h'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@',
- '--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@'
+ '--outdir', meson.current_build_dir(),
+ '--beta', with_vulkan_beta.to_string()
],
- depend_files : vk_extensions_gen_depend_files,
)
-vk_cmd_queue = custom_target(
- 'vk_cmd_queue',
- input : ['vk_cmd_queue_gen.py', vk_api_xml],
- output : ['vk_cmd_queue.c', 'vk_cmd_queue.h'],
+vk_extensions = custom_target(
+ 'vk_extensions',
+ input : ['vk_extensions_gen.py', vk_api_xml],
+ output : ['vk_extensions.c', 'vk_extensions.h'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@',
'--out-c', '@OUTPUT0@', '--out-h', '@OUTPUT1@'
],
- depend_files : vk_cmd_queue_gen_depend_files,
+ depend_files : vk_extensions_gen_depend_files,
)
libvulkan_util = static_library(
'vulkan_util',
- [files_vulkan_util, vk_common_entrypoints, vk_dispatch_table,
- vk_enum_to_str, vk_extensions, vk_cmd_queue],
- include_directories : [inc_include, inc_src, inc_gallium],
+ [files_vulkan_util, vk_dispatch_table, vk_enum_to_str,
+ vk_struct_type_cast, vk_extensions],
+ include_directories : [inc_include, inc_src],
dependencies : [vulkan_wsi_deps, idep_mesautil, idep_nir_headers],
- # For glsl_type_singleton
- link_with : libcompiler,
- c_args : [vulkan_wsi_args],
+ c_args : [c_msvc_compat_args],
gnu_symbol_visibility : 'hidden',
build_by_default : false,
)
idep_vulkan_util_headers = declare_dependency(
- sources : [vk_dispatch_table[1], vk_enum_to_str[1], vk_extensions[1]],
+ sources : [vk_dispatch_table[1], vk_enum_to_str[1],
+ vk_struct_type_cast[0], vk_extensions[1]],
include_directories : include_directories('.')
)
-# This is likely a bug in the Meson VS backend, as MSVC with ninja works fine.
-# See this discussion here:
-# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10506
-if get_option('backend').startswith('vs')
- idep_vulkan_util = declare_dependency(
- link_with : libvulkan_util,
- dependencies : idep_vulkan_util_headers
- )
-else
- idep_vulkan_util = declare_dependency(
- # Instruct users of this library to link with --whole-archive. Otherwise,
- # our weak function overloads may not resolve properly.
- link_whole : libvulkan_util,
- dependencies : idep_vulkan_util_headers
- )
-endif
+idep_vulkan_util = declare_dependency(
+ link_with : libvulkan_util,
+ dependencies : idep_vulkan_util_headers
+)
diff --git a/src/vulkan/util/vk_alloc.c b/src/vulkan/util/vk_alloc.c
index c687f92ae30..8ab116656c2 100644
--- a/src/vulkan/util/vk_alloc.c
+++ b/src/vulkan/util/vk_alloc.c
@@ -7,7 +7,7 @@
#include <stdlib.h>
-#if __STDC_VERSION__ >= 201112L && !defined(_MSC_VER)
+#ifndef _MSC_VER
#include <stddef.h>
#define MAX_ALIGN alignof(max_align_t)
#else
diff --git a/src/vulkan/util/vk_alloc.h b/src/vulkan/util/vk_alloc.h
index 56d3d7744fb..0121e96ccfd 100644
--- a/src/vulkan/util/vk_alloc.h
+++ b/src/vulkan/util/vk_alloc.h
@@ -25,12 +25,17 @@
/* common allocation inlines for vulkan drivers */
+#include <stdio.h>
#include <string.h>
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
#include "util/u_math.h"
#include "util/macros.h"
+#include "util/u_printf.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
const VkAllocationCallbacks *
vk_default_allocator(void);
@@ -82,7 +87,7 @@ vk_strdup(const VkAllocationCallbacks *alloc, const char *s,
return NULL;
size_t size = strlen(s) + 1;
- char *copy = vk_alloc(alloc, size, 1, scope);
+ char *copy = (char *)vk_alloc(alloc, size, 1, scope);
if (copy == NULL)
return NULL;
@@ -91,6 +96,32 @@ vk_strdup(const VkAllocationCallbacks *alloc, const char *s,
return copy;
}
+static inline char *
+vk_vasprintf(const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ const char *fmt, va_list args)
+{
+ size_t size = u_printf_length(fmt, args) + 1;
+ char *ptr = (char *)vk_alloc(alloc, size, 1, scope);
+ if (ptr != NULL)
+ vsnprintf(ptr, size, fmt, args);
+
+ return ptr;
+}
+
+PRINTFLIKE(3, 4) static inline char *
+vk_asprintf(const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ char *ptr = vk_vasprintf(alloc, scope, fmt, args);
+ va_end(args);
+
+ return ptr;
+}
+
static inline void *
vk_alloc2(const VkAllocationCallbacks *parent_alloc,
const VkAllocationCallbacks *alloc,
@@ -150,20 +181,17 @@ struct vk_multialloc {
size_t align;
uint32_t ptr_count;
- void **ptrs[8];
+ void **ptrs[16];
};
-#define VK_MULTIALLOC_INIT \
- ((struct vk_multialloc) { 0, })
-
#define VK_MULTIALLOC(_name) \
- struct vk_multialloc _name = VK_MULTIALLOC_INIT
+ struct vk_multialloc _name = { .align = 1 }
static ALWAYS_INLINE void
vk_multialloc_add_size_align(struct vk_multialloc *ma,
void **ptr, size_t size, size_t align)
{
- assert(util_is_power_of_two_nonzero(align));
+ assert(util_is_power_of_two_nonzero_uintptr(align));
if (size == 0) {
*ptr = NULL;
return;
@@ -203,7 +231,7 @@ vk_multialloc_alloc(struct vk_multialloc *ma,
const VkAllocationCallbacks *alloc,
VkSystemAllocationScope scope)
{
- char *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
+ void *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
if (!ptr)
return NULL;
@@ -216,10 +244,10 @@ vk_multialloc_alloc(struct vk_multialloc *ma,
* constant, GCC is incapable of figuring this out and unrolling the loop
* so we have to give it a little help.
*/
- STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 8);
+ STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 16);
#define _VK_MULTIALLOC_UPDATE_POINTER(_i) \
if ((_i) < ma->ptr_count) \
- *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
+ *ma->ptrs[_i] = (char *)ptr + (uintptr_t)*ma->ptrs[_i]
_VK_MULTIALLOC_UPDATE_POINTER(0);
_VK_MULTIALLOC_UPDATE_POINTER(1);
_VK_MULTIALLOC_UPDATE_POINTER(2);
@@ -228,6 +256,14 @@ vk_multialloc_alloc(struct vk_multialloc *ma,
_VK_MULTIALLOC_UPDATE_POINTER(5);
_VK_MULTIALLOC_UPDATE_POINTER(6);
_VK_MULTIALLOC_UPDATE_POINTER(7);
+ _VK_MULTIALLOC_UPDATE_POINTER(8);
+ _VK_MULTIALLOC_UPDATE_POINTER(9);
+ _VK_MULTIALLOC_UPDATE_POINTER(10);
+ _VK_MULTIALLOC_UPDATE_POINTER(11);
+ _VK_MULTIALLOC_UPDATE_POINTER(12);
+ _VK_MULTIALLOC_UPDATE_POINTER(13);
+ _VK_MULTIALLOC_UPDATE_POINTER(14);
+ _VK_MULTIALLOC_UPDATE_POINTER(15);
#undef _VK_MULTIALLOC_UPDATE_POINTER
return ptr;
@@ -266,4 +302,8 @@ vk_multialloc_zalloc2(struct vk_multialloc *ma,
return vk_multialloc_zalloc(ma, alloc ? alloc : parent_alloc, scope);
}
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/src/vulkan/util/vk_cmd_queue_gen.py b/src/vulkan/util/vk_cmd_queue_gen.py
index 539392a3101..6b4eb33403f 100644
--- a/src/vulkan/util/vk_cmd_queue_gen.py
+++ b/src/vulkan/util/vk_cmd_queue_gen.py
@@ -1,4 +1,3 @@
-# coding=utf-8
COPYRIGHT=u"""
/* Copyright © 2015-2021 Intel Corporation
* Copyright © 2021 Collabora, Ltd.
@@ -34,14 +33,38 @@ from mako.template import Template
# Mesa-local imports must be declared in meson variable
# '{file_without_suffix}_depend_files'.
-from vk_dispatch_table_gen import get_entrypoints_from_xml, EntrypointParam
-
-MANUAL_COMMANDS = ['CmdPushDescriptorSetKHR', # This script doesn't know how to copy arrays in structs in arrays
- 'CmdPushDescriptorSetWithTemplateKHR', # pData's size cannot be calculated from the xml
- 'CmdDrawMultiEXT', # The size of the elements is specified in a stride param
- 'CmdDrawMultiIndexedEXT', # The size of the elements is specified in a stride param
- 'CmdBindDescriptorSets', # The VkPipelineLayout object could be released before the command is executed
- ]
+from vk_entrypoints import EntrypointParam, get_entrypoints_from_xml
+from vk_extensions import filter_api, get_all_required
+
+# These have hand-typed implementations in vk_cmd_enqueue.c
+MANUAL_COMMANDS = [
+ # This script doesn't know how to copy arrays in structs in arrays
+ 'CmdPushDescriptorSetKHR',
+
+ # The size of the elements is specified in a stride param
+ 'CmdDrawMultiEXT',
+ 'CmdDrawMultiIndexedEXT',
+
+ # The VkPipelineLayout object could be released before the command is
+ # executed
+ 'CmdBindDescriptorSets',
+
+ # Incomplete struct copies which lead to an use after free.
+ 'CmdBuildAccelerationStructuresKHR',
+]
+
+NO_ENQUEUE_COMMANDS = [
+ # pData's size cannot be calculated from the xml
+ 'CmdPushConstants2KHR',
+ 'CmdPushDescriptorSet2KHR',
+ 'CmdPushDescriptorSetWithTemplate2KHR',
+ 'CmdPushDescriptorSetWithTemplateKHR',
+
+ # These don't return void
+ 'CmdSetPerformanceMarkerINTEL',
+ 'CmdSetPerformanceStreamMarkerINTEL',
+ 'CmdSetPerformanceOverrideINTEL',
+]
TEMPLATE_H = Template(COPYRIGHT + """\
/* This file generated from ${filename}, don't edit directly. */
@@ -51,14 +74,19 @@ TEMPLATE_H = Template(COPYRIGHT + """\
#include "util/list.h"
#define VK_PROTOTYPES
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+#include <vulkan/vulkan_beta.h>
+#endif
#ifdef __cplusplus
extern "C" {
#endif
+struct vk_device_dispatch_table;
+
struct vk_cmd_queue {
- VkAllocationCallbacks *alloc;
+ const VkAllocationCallbacks *alloc;
struct list_head cmds;
};
@@ -75,6 +103,7 @@ enum vk_cmd_type {
};
extern const char *vk_cmd_queue_type_names[];
+extern size_t vk_cmd_queue_type_sizes[];
% for c in commands:
% if len(c.params) <= 1: # Avoid "error C2016: C requires that a struct or union have at least one member"
@@ -93,9 +122,24 @@ struct ${to_struct_name(c.name)} {
% endif
% endfor
+struct vk_cmd_queue_entry;
+
+/* this ordering must match vk_cmd_queue_entry */
+struct vk_cmd_queue_entry_base {
+ struct list_head cmd_link;
+ enum vk_cmd_type type;
+ void *driver_data;
+ void (*driver_free_cb)(struct vk_cmd_queue *queue,
+ struct vk_cmd_queue_entry *cmd);
+};
+
+/* this ordering must match vk_cmd_queue_entry_base */
struct vk_cmd_queue_entry {
struct list_head cmd_link;
enum vk_cmd_type type;
+ void *driver_data;
+ void (*driver_free_cb)(struct vk_cmd_queue *queue,
+ struct vk_cmd_queue_entry *cmd);
union {
% for c in commands:
% if len(c.params) <= 1:
@@ -110,17 +154,16 @@ struct vk_cmd_queue_entry {
% endif
% endfor
} u;
- void *driver_data;
};
% for c in commands:
-% if c.name in manual_commands:
+% if c.name in manual_commands or c.name in no_enqueue_commands:
<% continue %>
% endif
% if c.guard is not None:
#ifdef ${c.guard}
% endif
- void vk_enqueue_${to_underscore(c.name)}(struct vk_cmd_queue *queue
+ VkResult vk_enqueue_${to_underscore(c.name)}(struct vk_cmd_queue *queue
% for p in c.params[1:]:
, ${p.decl}
% endfor
@@ -133,10 +176,35 @@ struct vk_cmd_queue_entry {
void vk_free_queue(struct vk_cmd_queue *queue);
+static inline void
+vk_cmd_queue_init(struct vk_cmd_queue *queue, VkAllocationCallbacks *alloc)
+{
+ queue->alloc = alloc;
+ list_inithead(&queue->cmds);
+}
+
+static inline void
+vk_cmd_queue_reset(struct vk_cmd_queue *queue)
+{
+ vk_free_queue(queue);
+ list_inithead(&queue->cmds);
+}
+
+static inline void
+vk_cmd_queue_finish(struct vk_cmd_queue *queue)
+{
+ vk_free_queue(queue);
+ list_inithead(&queue->cmds);
+}
+
+void vk_cmd_queue_execute(struct vk_cmd_queue *queue,
+ VkCommandBuffer commandBuffer,
+ const struct vk_device_dispatch_table *disp);
+
#ifdef __cplusplus
}
#endif
-""", output_encoding='utf-8')
+""")
TEMPLATE_C = Template(COPYRIGHT + """
/* This file generated from ${filename}, don't edit directly. */
@@ -144,9 +212,16 @@ TEMPLATE_C = Template(COPYRIGHT + """
#include "${header}"
#define VK_PROTOTYPES
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+#include <vulkan/vulkan_beta.h>
+#endif
#include "vk_alloc.h"
+#include "vk_cmd_enqueue_entrypoints.h"
+#include "vk_command_buffer.h"
+#include "vk_dispatch_table.h"
+#include "vk_device.h"
const char *vk_cmd_queue_type_names[] = {
% for c in commands:
@@ -160,45 +235,88 @@ const char *vk_cmd_queue_type_names[] = {
% endfor
};
+size_t vk_cmd_queue_type_sizes[] = {
% for c in commands:
-% if c.name in manual_commands:
-<% continue %>
+% if c.guard is not None:
+#ifdef ${c.guard}
% endif
+% if len(c.params) > 1:
+ sizeof(struct ${to_struct_name(c.name)}) +
+% endif
+ sizeof(struct vk_cmd_queue_entry_base),
+% if c.guard is not None:
+#endif // ${c.guard}
+% endif
+% endfor
+};
+
+% for c in commands:
% if c.guard is not None:
#ifdef ${c.guard}
% endif
-void vk_enqueue_${to_underscore(c.name)}(struct vk_cmd_queue *queue
+static void
+vk_free_${to_underscore(c.name)}(struct vk_cmd_queue *queue,
+${' ' * len('vk_free_' + to_underscore(c.name) + '(')}\\
+struct vk_cmd_queue_entry *cmd)
+{
+ if (cmd->driver_free_cb)
+ cmd->driver_free_cb(queue, cmd);
+ else
+ vk_free(queue->alloc, cmd->driver_data);
+% for p in c.params[1:]:
+% if p.len:
+ vk_free(queue->alloc, (${remove_suffix(p.decl.replace("const", ""), p.name)})cmd->u.${to_struct_field_name(c.name)}.${to_field_name(p.name)});
+% elif '*' in p.decl:
+ ${get_struct_free(c, p, types)}
+% endif
+% endfor
+ vk_free(queue->alloc, cmd);
+}
+
+% if c.name not in manual_commands and c.name not in no_enqueue_commands:
+VkResult vk_enqueue_${to_underscore(c.name)}(struct vk_cmd_queue *queue
% for p in c.params[1:]:
, ${p.decl}
% endfor
)
{
- struct vk_cmd_queue_entry *cmd = vk_zalloc(queue->alloc,
- sizeof(*cmd), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (!cmd)
- return;
+ struct vk_cmd_queue_entry *cmd = vk_zalloc(queue->alloc, vk_cmd_queue_type_sizes[${to_enum_name(c.name)}], 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!cmd) return VK_ERROR_OUT_OF_HOST_MEMORY;
cmd->type = ${to_enum_name(c.name)};
- list_addtail(&cmd->cmd_link, &queue->cmds);
-
+ \
+ <% need_error_handling = False %>
% for p in c.params[1:]:
% if p.len:
if (${p.name}) {
${get_array_copy(c, p)}
- }
+ }\
+ <% need_error_handling = True %>
% elif '[' in p.decl:
memcpy(cmd->u.${to_struct_field_name(c.name)}.${to_field_name(p.name)}, ${p.name},
sizeof(*${p.name}) * ${get_array_len(p)});
% elif p.type == "void":
cmd->u.${to_struct_field_name(c.name)}.${to_field_name(p.name)} = (${remove_suffix(p.decl.replace("const", ""), p.name)}) ${p.name};
% elif '*' in p.decl:
- ${get_struct_copy("cmd->u.%s.%s" % (to_struct_field_name(c.name), to_field_name(p.name)), p.name, p.type, 'sizeof(%s)' % p.type, types)}
+ ${get_struct_copy("cmd->u.%s.%s" % (to_struct_field_name(c.name), to_field_name(p.name)), p.name, p.type, 'sizeof(%s)' % p.type, types)}\
+ <% need_error_handling = True %>
% else:
cmd->u.${to_struct_field_name(c.name)}.${to_field_name(p.name)} = ${p.name};
% endif
% endfor
+
+ list_addtail(&cmd->cmd_link, &queue->cmds);
+ return VK_SUCCESS;
+
+% if need_error_handling:
+err:
+ if (cmd)
+ vk_free_${to_underscore(c.name)}(queue, cmd);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+% endif
}
+% endif
% if c.guard is not None:
#endif // ${c.guard}
% endif
@@ -216,24 +334,94 @@ vk_free_queue(struct vk_cmd_queue *queue)
#ifdef ${c.guard}
% endif
case ${to_enum_name(c.name)}:
-% for p in c.params[1:]:
-% if p.len:
- vk_free(queue->alloc, (${remove_suffix(p.decl.replace("const", ""), p.name)})cmd->u.${to_struct_field_name(c.name)}.${to_field_name(p.name)});
-% elif '*' in p.decl:
- ${get_struct_free(c, p, types)}
+ vk_free_${to_underscore(c.name)}(queue, cmd);
+ break;
+% if c.guard is not None:
+#endif // ${c.guard}
% endif
% endfor
- break;
+ }
+ }
+}
+
+void
+vk_cmd_queue_execute(struct vk_cmd_queue *queue,
+ VkCommandBuffer commandBuffer,
+ const struct vk_device_dispatch_table *disp)
+{
+ list_for_each_entry(struct vk_cmd_queue_entry, cmd, &queue->cmds, cmd_link) {
+ switch (cmd->type) {
+% for c in commands:
+% if c.guard is not None:
+#ifdef ${c.guard}
+% endif
+ case ${to_enum_name(c.name)}:
+ disp->${c.name}(commandBuffer
+% for p in c.params[1:]:
+ , cmd->u.${to_struct_field_name(c.name)}.${to_field_name(p.name)}\\
+% endfor
+ );
+ break;
% if c.guard is not None:
#endif // ${c.guard}
% endif
% endfor
+ default: unreachable("Unsupported command");
}
- vk_free(queue->alloc, cmd);
}
}
-""", output_encoding='utf-8')
+% for c in commands:
+% if c.name in no_enqueue_commands:
+/* TODO: Generate vk_cmd_enqueue_${c.name}() */
+<% continue %>
+% endif
+
+% if c.guard is not None:
+#ifdef ${c.guard}
+% endif
+<% assert c.return_type == 'void' %>
+
+% if c.name in manual_commands:
+/* vk_cmd_enqueue_${c.name}() is hand-typed in vk_cmd_enqueue.c */
+% else:
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_${c.name}(${c.decl_params()})
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ if (vk_command_buffer_has_error(cmd_buffer))
+ return;
+% if len(c.params) == 1:
+ VkResult result = vk_enqueue_${to_underscore(c.name)}(&cmd_buffer->cmd_queue);
+% else:
+ VkResult result = vk_enqueue_${to_underscore(c.name)}(&cmd_buffer->cmd_queue,
+ ${c.call_params(1)});
+% endif
+ if (unlikely(result != VK_SUCCESS))
+ vk_command_buffer_set_error(cmd_buffer, result);
+}
+% endif
+
+VKAPI_ATTR void VKAPI_CALL
+vk_cmd_enqueue_unless_primary_${c.name}(${c.decl_params()})
+{
+ VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
+
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ const struct vk_device_dispatch_table *disp =
+ cmd_buffer->base.device->command_dispatch_table;
+
+ disp->${c.name}(${c.call_params()});
+ } else {
+ vk_cmd_enqueue_${c.name}(${c.call_params()});
+ }
+}
+% if c.guard is not None:
+#endif // ${c.guard}
+% endif
+% endfor
+""")
def remove_prefix(text, prefix):
if text.startswith(prefix):
@@ -255,7 +443,10 @@ def to_field_name(name):
return remove_prefix(to_underscore(name).replace('cmd_', ''), 'p_')
def to_field_decl(decl):
- decl = decl.replace('const ', '')
+ if 'const*' in decl:
+ decl = decl.replace('const*', '*')
+ else:
+ decl = decl.replace('const ', '')
[decl, name] = decl.rsplit(' ', 1)
return decl + ' ' + to_field_name(name)
@@ -274,18 +465,19 @@ def get_array_copy(command, param):
field_size = "1"
else:
field_size = "sizeof(*%s)" % field_name
- allocation = "%s = vk_zalloc(queue->alloc, %s * %s, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);" % (field_name, field_size, param.len)
- const_cast = remove_suffix(param.decl.replace("const", ""), param.name)
- copy = "memcpy((%s)%s, %s, %s * %s);" % (const_cast, field_name, param.name, field_size, param.len)
+ allocation = "%s = vk_zalloc(queue->alloc, %s * (%s), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);\n if (%s == NULL) goto err;\n" % (field_name, field_size, param.len, field_name)
+ copy = "memcpy((void*)%s, %s, %s * (%s));" % (field_name, param.name, field_size, param.len)
return "%s\n %s" % (allocation, copy)
def get_array_member_copy(struct, src_name, member):
field_name = "%s->%s" % (struct, member.name)
- len_field_name = "%s->%s" % (struct, member.len)
- allocation = "%s = vk_zalloc(queue->alloc, sizeof(*%s) * %s, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);" % (field_name, field_name, len_field_name)
- const_cast = remove_suffix(member.decl.replace("const", ""), member.name)
- copy = "memcpy((%s)%s, %s->%s, sizeof(*%s) * %s);" % (const_cast, field_name, src_name, member.name, field_name, len_field_name)
- return "%s\n %s\n" % (allocation, copy)
+ if member.len == "struct-ptr":
+ field_size = "sizeof(*%s)" % (field_name)
+ else:
+ field_size = "sizeof(*%s) * %s->%s" % (field_name, struct, member.len)
+ allocation = "%s = vk_zalloc(queue->alloc, %s, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);\n if (%s == NULL) goto err;\n" % (field_name, field_size, field_name)
+ copy = "memcpy((void*)%s, %s->%s, %s);" % (field_name, src_name, member.name, field_size)
+ return "if (%s->%s) {\n %s\n %s\n}\n" % (src_name, member.name, allocation, copy)
def get_pnext_member_copy(struct, src_type, member, types, level):
if not types[src_type].extended_by:
@@ -294,11 +486,18 @@ def get_pnext_member_copy(struct, src_type, member, types, level):
pnext_decl = "const VkBaseInStructure *pnext = %s;" % field_name
case_stmts = ""
for type in types[src_type].extended_by:
+ guard_pre_stmt = ""
+ guard_post_stmt = ""
+ if type.guard is not None:
+ guard_pre_stmt = "#ifdef %s" % type.guard
+ guard_post_stmt = "#endif"
case_stmts += """
- case %s:
- %s
+%s
+ case %s:
+ %s
break;
- """ % (type.enum, get_struct_copy(field_name, "pnext", type.name, "sizeof(%s)" % type.name, types, level))
+%s
+ """ % (guard_pre_stmt, type.enum, get_struct_copy(field_name, "pnext", type.name, "sizeof(%s)" % type.name, types, level), guard_post_stmt)
return """
%s
if (pnext) {
@@ -312,7 +511,7 @@ def get_struct_copy(dst, src_name, src_type, size, types, level=0):
global tmp_dst_idx
global tmp_src_idx
- allocation = "%s = vk_zalloc(queue->alloc, %s, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);" % (dst, size)
+ allocation = "%s = vk_zalloc(queue->alloc, %s, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);\n if (%s == NULL) goto err;\n" % (dst, size, dst)
copy = "memcpy((void*)%s, %s, %s);" % (dst, src_name, size)
level += 1
@@ -329,12 +528,12 @@ def get_struct_copy(dst, src_name, src_type, size, types, level=0):
null_assignment = "%s = NULL;" % dst
if_stmt = "if (%s) {" % src_name
- return "%s\n %s\n %s\n %s\n %s \n %s } else {\n %s\n }" % (if_stmt, allocation, copy, tmp_dst, tmp_src, member_copies, null_assignment)
+ indent = " " * level
+ return "%s\n %s\n %s\n %s\n %s\n %s\n%s} else {\n %s\n%s}" % (if_stmt, allocation, copy, tmp_dst, tmp_src, member_copies, indent, null_assignment, indent)
def get_struct_free(command, param, types):
field_name = "cmd->u.%s.%s" % (to_struct_field_name(command.name), to_field_name(param.name))
const_cast = remove_suffix(param.decl.replace("const", ""), param.name)
- driver_data_free = "vk_free(queue->alloc, cmd->driver_data);\n"
struct_free = "vk_free(queue->alloc, (%s)%s);" % (const_cast, field_name)
member_frees = ""
if (param.type in types):
@@ -343,46 +542,89 @@ def get_struct_free(command, param, types):
member_name = "cmd->u.%s.%s->%s" % (to_struct_field_name(command.name), to_field_name(param.name), member.name)
const_cast = remove_suffix(member.decl.replace("const", ""), member.name)
member_frees += "vk_free(queue->alloc, (%s)%s);\n" % (const_cast, member_name)
- return "%s %s %s\n" % (member_frees, driver_data_free, struct_free)
+ return "%s %s\n" % (member_frees, struct_free)
+
+EntrypointType = namedtuple('EntrypointType', 'name enum members extended_by guard')
+
+def get_types_defines(doc):
+ """Maps types to extension defines."""
+ types_to_defines = {}
+
+ platform_define = {}
+ for platform in doc.findall('./platforms/platform'):
+ name = platform.attrib['name']
+ define = platform.attrib['protect']
+ platform_define[name] = define
+
+ for extension in doc.findall('./extensions/extension[@platform]'):
+ platform = extension.attrib['platform']
+ define = platform_define[platform]
-EntrypointType = namedtuple('EntrypointType', 'name enum members extended_by')
+ for types in extension.findall('./require/type'):
+ fullname = types.attrib['name']
+ types_to_defines[fullname] = define
-def get_types(doc):
+ return types_to_defines
+
+def get_types(doc, beta, api, types_to_defines):
"""Extract the types from the registry."""
types = {}
+ required = get_all_required(doc, 'type', api, beta)
+
for _type in doc.findall('./types/type'):
if _type.attrib.get('category') != 'struct':
continue
+ if not filter_api(_type, api):
+ continue
+ if _type.attrib['name'] not in required:
+ continue
+
members = []
type_enum = None
for p in _type.findall('./member'):
- member = EntrypointParam(type=p.find('./type').text,
- name=p.find('./name').text,
- decl=''.join(p.itertext()),
- len=p.attrib.get('len', None))
+ if not filter_api(p, api):
+ continue
+
+ mem_type = p.find('./type').text
+ mem_name = p.find('./name').text
+ mem_decl = ''.join(p.itertext())
+ mem_len = p.attrib.get('altlen', p.attrib.get('len', None))
+ if mem_len is None and '*' in mem_decl and mem_name != 'pNext':
+ mem_len = "struct-ptr"
+
+ member = EntrypointParam(type=mem_type,
+ name=mem_name,
+ decl=mem_decl,
+ len=mem_len)
members.append(member)
- if p.find('./name').text == 'sType':
+ if mem_name == 'sType':
type_enum = p.attrib.get('values')
- types[_type.attrib['name']] = EntrypointType(name=_type.attrib['name'], enum=type_enum, members=members, extended_by=[])
+ types[_type.attrib['name']] = EntrypointType(name=_type.attrib['name'], enum=type_enum, members=members, extended_by=[], guard=types_to_defines.get(_type.attrib['name']))
for _type in doc.findall('./types/type'):
if _type.attrib.get('category') != 'struct':
continue
+ if not filter_api(_type, api):
+ continue
+ if _type.attrib['name'] not in required:
+ continue
if _type.attrib.get('structextends') is None:
continue
for extended in _type.attrib.get('structextends').split(','):
+ if extended not in required:
+ continue
types[extended].extended_by.append(types[_type.attrib['name']])
return types
-def get_types_from_xml(xml_files):
+def get_types_from_xml(xml_files, beta, api='vulkan'):
types = {}
for filename in xml_files:
doc = et.parse(filename)
- types.update(get_types(doc))
+ types.update(get_types(doc, beta, api, get_types_defines(doc)))
return types
@@ -390,18 +632,19 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-c', required=True, help='Output C file.')
parser.add_argument('--out-h', required=True, help='Output H file.')
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
parser.add_argument('--xml',
help='Vulkan API XML file.',
required=True, action='append', dest='xml_files')
args = parser.parse_args()
commands = []
- for e in get_entrypoints_from_xml(args.xml_files):
+ for e in get_entrypoints_from_xml(args.xml_files, args.beta):
if e.name.startswith('Cmd') and \
not e.alias:
commands.append(e)
- types = get_types_from_xml(args.xml_files)
+ types = get_types_from_xml(args.xml_files, args.beta)
assert os.path.dirname(args.out_c) == os.path.dirname(args.out_h)
@@ -421,26 +664,25 @@ def main():
'get_struct_free': get_struct_free,
'types': types,
'manual_commands': MANUAL_COMMANDS,
+ 'no_enqueue_commands': NO_ENQUEUE_COMMANDS,
'remove_suffix': remove_suffix,
}
try:
- with open(args.out_h, 'wb') as f:
+ with open(args.out_h, 'w', encoding='utf-8') as f:
guard = os.path.basename(args.out_h).replace('.', '_').upper()
f.write(TEMPLATE_H.render(guard=guard, **environment))
- with open(args.out_c, 'wb') as f:
+ with open(args.out_c, 'w', encoding='utf-8') as f:
f.write(TEMPLATE_C.render(**environment))
except Exception:
# In the event there's an error, this imports some helpers from mako
# to print a useful stack trace and prints it, then exits with
# status 1, if python is run with debug; otherwise it just raises
# the exception
- if __debug__:
- import sys
- from mako import exceptions
- sys.stderr.write(exceptions.text_error_template().render() + '\n')
- sys.exit(1)
- raise
+ import sys
+ from mako import exceptions
+ print(exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
if __name__ == '__main__':
main()
diff --git a/src/vulkan/util/vk_commands_gen.py b/src/vulkan/util/vk_commands_gen.py
deleted file mode 100644
index 610c835ffc3..00000000000
--- a/src/vulkan/util/vk_commands_gen.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# coding=utf-8
-COPYRIGHT=u"""
-/* Copyright © 2015-2021 Intel Corporation
- * Copyright © 2021 Collabora, Ltd.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-"""
-
-import argparse
-import os
-import re
-import xml.etree.ElementTree as et
-
-from mako.template import Template
-
-# Mesa-local imports must be declared in meson variable
-# '{file_without_suffix}_depend_files'.
-from vk_dispatch_table_gen import get_entrypoints_from_xml, EntrypointParam
-
-MANUAL_COMMANDS = ['CmdPushDescriptorSetKHR', # This script doesn't know how to copy arrays in structs in arrays
- 'CmdPushDescriptorSetWithTemplateKHR', # pData's size cannot be calculated from the xml
- 'CmdDrawMultiEXT', # The size of the elements is specified in a stride param
- 'CmdDrawMultiIndexedEXT', # The size of the elements is specified in a stride param
- 'CmdBindDescriptorSets', # The VkPipelineLayout object could be released before the command is executed
- 'CmdCopyImageToBuffer', # There are wrappers that implement these in terms of the newer variants
- 'CmdCopyImage',
- 'CmdCopyBuffer',
- 'CmdCopyImage',
- 'CmdCopyBufferToImage',
- 'CmdCopyImageToBuffer',
- 'CmdBlitImage',
- 'CmdResolveImage',
- ]
-
-TEMPLATE_C = Template(COPYRIGHT + """
-/* This file generated from ${filename}, don't edit directly. */
-
-#define VK_PROTOTYPES
-#include <vulkan/vulkan.h>
-
-#include "lvp_private.h"
-#include "pipe/p_context.h"
-#include "vk_util.h"
-
-% for c in commands:
-% if c.name in manual_commands:
-<% continue %>
-% endif
-% if c.guard is not None:
-#ifdef ${c.guard}
-% endif
-VKAPI_ATTR ${c.return_type} VKAPI_CALL lvp_${c.name} (VkCommandBuffer commandBuffer
-% for p in c.params[1:]:
-, ${p.decl}
-% endfor
-)
-{
- LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
-
- vk_enqueue_${to_underscore(c.name)}(&cmd_buffer->queue
-% for p in c.params[1:]:
-, ${p.name}
-% endfor
- );
-
-% if c.return_type == 'VkResult':
- return VK_SUCCESS;
-% endif
-}
-% if c.guard is not None:
-#endif // ${c.guard}
-% endif
-% endfor
-
-""", output_encoding='utf-8')
-
-def remove_prefix(text, prefix):
- if text.startswith(prefix):
- return text[len(prefix):]
- return text
-
-def to_underscore(name):
- return remove_prefix(re.sub('([A-Z]+)', r'_\1', name).lower(), '_')
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--out-c', required=True, help='Output C file.')
- parser.add_argument('--xml',
- help='Vulkan API XML file.',
- required=True, action='append', dest='xml_files')
- parser.add_argument('--prefix',
- help='Prefix to use for all dispatch tables.',
- action='append', default=[], dest='prefixes')
- args = parser.parse_args()
-
- commands = []
- for e in get_entrypoints_from_xml(args.xml_files):
- if e.name.startswith('Cmd') and \
- not e.alias:
- commands.append(e)
-
- environment = {
- 'commands': commands,
- 'filename': os.path.basename(__file__),
- 'to_underscore': to_underscore,
- 'manual_commands': MANUAL_COMMANDS,
- }
-
- try:
- with open(args.out_c, 'wb') as f:
- f.write(TEMPLATE_C.render(**environment))
- except Exception:
- # In the event there's an error, this imports some helpers from mako
- # to print a useful stack trace and prints it, then exits with
- # status 1, if python is run with debug; otherwise it just raises
- # the exception
- if __debug__:
- import sys
- from mako import exceptions
- sys.stderr.write(exceptions.text_error_template().render() + '\n')
- sys.exit(1)
- raise
-
-if __name__ == '__main__':
- main()
diff --git a/src/vulkan/util/vk_device.c b/src/vulkan/util/vk_device.c
deleted file mode 100644
index 42f571c9a48..00000000000
--- a/src/vulkan/util/vk_device.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright © 2020 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "vk_device.h"
-
-#include "vk_common_entrypoints.h"
-#include "vk_instance.h"
-#include "vk_physical_device.h"
-#include "vk_util.h"
-#include "util/hash_table.h"
-#include "util/ralloc.h"
-
-VkResult
-vk_device_init(struct vk_device *device,
- struct vk_physical_device *physical_device,
- const struct vk_device_dispatch_table *dispatch_table,
- const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc)
-{
- memset(device, 0, sizeof(*device));
- vk_object_base_init(device, &device->base, VK_OBJECT_TYPE_DEVICE);
- if (alloc != NULL)
- device->alloc = *alloc;
- else
- device->alloc = physical_device->instance->alloc;
-
- device->physical = physical_device;
-
- device->dispatch_table = *dispatch_table;
-
- /* Add common entrypoints without overwriting driver-provided ones. */
- vk_device_dispatch_table_from_entrypoints(
- &device->dispatch_table, &vk_common_device_entrypoints, false);
-
- for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- int idx;
- for (idx = 0; idx < VK_DEVICE_EXTENSION_COUNT; idx++) {
- if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- vk_device_extensions[idx].extensionName) == 0)
- break;
- }
-
- if (idx >= VK_DEVICE_EXTENSION_COUNT)
- return VK_ERROR_EXTENSION_NOT_PRESENT;
-
- if (!physical_device->supported_extensions.extensions[idx])
- return VK_ERROR_EXTENSION_NOT_PRESENT;
-
-#ifdef ANDROID
- if (!vk_android_allowed_device_extensions.extensions[idx])
- return VK_ERROR_EXTENSION_NOT_PRESENT;
-#endif
-
- device->enabled_extensions.extensions[idx] = true;
- }
-
- p_atomic_set(&device->private_data_next_index, 0);
-
-#ifdef ANDROID
- mtx_init(&device->swapchain_private_mtx, mtx_plain);
- device->swapchain_private = NULL;
-#endif /* ANDROID */
-
- return VK_SUCCESS;
-}
-
-void
-vk_device_finish(UNUSED struct vk_device *device)
-{
-#ifdef ANDROID
- if (device->swapchain_private) {
- hash_table_foreach(device->swapchain_private, entry)
- util_sparse_array_finish(entry->data);
- ralloc_free(device->swapchain_private);
- }
-#endif /* ANDROID */
-
- vk_object_base_finish(&device->base);
-}
-
-PFN_vkVoidFunction
-vk_device_get_proc_addr(const struct vk_device *device,
- const char *name)
-{
- if (device == NULL || name == NULL)
- return NULL;
-
- struct vk_instance *instance = device->physical->instance;
- return vk_device_dispatch_table_get_if_supported(&device->dispatch_table,
- name,
- instance->app_info.api_version,
- &instance->enabled_extensions,
- &device->enabled_extensions);
-}
-
-VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
-vk_common_GetDeviceProcAddr(VkDevice _device,
- const char *pName)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
- return vk_device_get_proc_addr(device, pName);
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_GetDeviceQueue(VkDevice _device,
- uint32_t queueFamilyIndex,
- uint32_t queueIndex,
- VkQueue *pQueue)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- const VkDeviceQueueInfo2 info = {
- .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
- .pNext = NULL,
- /* flags = 0 because (Vulkan spec 1.2.170 - vkGetDeviceQueue):
- *
- * "vkGetDeviceQueue must only be used to get queues that were
- * created with the flags parameter of VkDeviceQueueCreateInfo set
- * to zero. To get queues that were created with a non-zero flags
- * parameter use vkGetDeviceQueue2."
- */
- .flags = 0,
- .queueFamilyIndex = queueFamilyIndex,
- .queueIndex = queueIndex,
- };
-
- device->dispatch_table.GetDeviceQueue2(_device, &info, pQueue);
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_GetBufferMemoryRequirements(VkDevice _device,
- VkBuffer buffer,
- VkMemoryRequirements *pMemoryRequirements)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- VkBufferMemoryRequirementsInfo2 info = {
- .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
- .buffer = buffer,
- };
- VkMemoryRequirements2 reqs = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
- };
- device->dispatch_table.GetBufferMemoryRequirements2(_device, &info, &reqs);
-
- *pMemoryRequirements = reqs.memoryRequirements;
-}
-
-VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_BindBufferMemory(VkDevice _device,
- VkBuffer buffer,
- VkDeviceMemory memory,
- VkDeviceSize memoryOffset)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- VkBindBufferMemoryInfo bind = {
- .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
- .buffer = buffer,
- .memory = memory,
- .memoryOffset = memoryOffset,
- };
-
- return device->dispatch_table.BindBufferMemory2(_device, 1, &bind);
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_GetImageMemoryRequirements(VkDevice _device,
- VkImage image,
- VkMemoryRequirements *pMemoryRequirements)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- VkImageMemoryRequirementsInfo2 info = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
- .image = image,
- };
- VkMemoryRequirements2 reqs = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
- };
- device->dispatch_table.GetImageMemoryRequirements2(_device, &info, &reqs);
-
- *pMemoryRequirements = reqs.memoryRequirements;
-}
-
-VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_BindImageMemory(VkDevice _device,
- VkImage image,
- VkDeviceMemory memory,
- VkDeviceSize memoryOffset)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- VkBindImageMemoryInfo bind = {
- .sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,
- .image = image,
- .memory = memory,
- .memoryOffset = memoryOffset,
- };
-
- return device->dispatch_table.BindImageMemory2(_device, 1, &bind);
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_GetImageSparseMemoryRequirements(VkDevice _device,
- VkImage image,
- uint32_t *pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- VkImageSparseMemoryRequirementsInfo2 info = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,
- .image = image,
- };
-
- if (!pSparseMemoryRequirements) {
- device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
- &info,
- pSparseMemoryRequirementCount,
- NULL);
- return;
- }
-
- STACK_ARRAY(VkSparseImageMemoryRequirements2, mem_reqs2, *pSparseMemoryRequirementCount);
-
- for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i) {
- mem_reqs2[i].sType = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2;
- mem_reqs2[i].pNext = NULL;
- }
-
- device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
- &info,
- pSparseMemoryRequirementCount,
- mem_reqs2);
-
- for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i)
- pSparseMemoryRequirements[i] = mem_reqs2[i].memoryRequirements;
-
- STACK_ARRAY_FINISH(mem_reqs2);
-}
diff --git a/src/vulkan/util/vk_dispatch_table_gen.py b/src/vulkan/util/vk_dispatch_table_gen.py
index 7eafa8ecb3c..7a3b459b371 100644
--- a/src/vulkan/util/vk_dispatch_table_gen.py
+++ b/src/vulkan/util/vk_dispatch_table_gen.py
@@ -1,4 +1,3 @@
-# coding=utf-8
COPYRIGHT = """\
/*
* Copyright 2020 Intel Corporation
@@ -28,14 +27,12 @@ COPYRIGHT = """\
import argparse
import math
import os
-import xml.etree.ElementTree as et
-from collections import OrderedDict, namedtuple
from mako.template import Template
# Mesa-local imports must be declared in meson variable
# '{file_without_suffix}_depend_files'.
-from vk_extensions import *
+from vk_entrypoints import get_entrypoints_from_xml
# We generate a static hash table for entry point lookup
# (vkGetProcAddress). We use a linear congruential generator for our hash
@@ -68,6 +65,10 @@ TEMPLATE_H = Template(COPYRIGHT + """\
extern "C" {
#endif
+#ifdef _MSC_VER
+VKAPI_ATTR void VKAPI_CALL vk_entrypoint_stub(void);
+#endif
+
<%def name="dispatch_table(entrypoints)">
% for e in entrypoints:
% if e.alias:
@@ -151,6 +152,62 @@ ${entrypoint_table('instance', instance_entrypoints)}
${entrypoint_table('physical_device', physical_device_entrypoints)}
${entrypoint_table('device', device_entrypoints)}
+<%def name="uncompacted_dispatch_table(entrypoints)">
+% for e in entrypoints:
+ % if e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ PFN_vk${e.name} ${e.name};
+ % if e.aliases:
+ % for a in e.aliases:
+ PFN_vk${a.name} ${a.name};
+ % endfor
+ % endif
+ % if e.guard is not None:
+#else
+ PFN_vkVoidFunction ${e.name};
+ % if e.aliases:
+ % for a in e.aliases:
+ PFN_vkVoidFunction ${a.name};
+ % endfor
+ % endif
+#endif
+ % endif
+% endfor
+</%def>
+
+
+struct vk_instance_uncompacted_dispatch_table {
+ ${uncompacted_dispatch_table(instance_entrypoints)}
+};
+
+struct vk_physical_device_uncompacted_dispatch_table {
+ ${uncompacted_dispatch_table(physical_device_entrypoints)}
+};
+
+struct vk_device_uncompacted_dispatch_table {
+ ${uncompacted_dispatch_table(device_entrypoints)}
+};
+
+struct vk_uncompacted_dispatch_table {
+ union {
+ struct {
+ struct vk_instance_uncompacted_dispatch_table instance;
+ struct vk_physical_device_uncompacted_dispatch_table physical_device;
+ struct vk_device_uncompacted_dispatch_table device;
+ };
+
+ struct {
+ ${uncompacted_dispatch_table(instance_entrypoints)}
+ ${uncompacted_dispatch_table(physical_device_entrypoints)}
+ ${uncompacted_dispatch_table(device_entrypoints)}
+ };
+ };
+};
+
void
vk_instance_dispatch_table_load(struct vk_instance_dispatch_table *table,
PFN_vkGetInstanceProcAddr gpa,
@@ -164,6 +221,19 @@ vk_device_dispatch_table_load(struct vk_device_dispatch_table *table,
PFN_vkGetDeviceProcAddr gpa,
VkDevice device);
+void
+vk_instance_uncompacted_dispatch_table_load(struct vk_instance_uncompacted_dispatch_table *table,
+ PFN_vkGetInstanceProcAddr gpa,
+ VkInstance instance);
+void
+vk_physical_device_uncompacted_dispatch_table_load(struct vk_physical_device_uncompacted_dispatch_table *table,
+ PFN_vkGetInstanceProcAddr gpa,
+ VkInstance instance);
+void
+vk_device_uncompacted_dispatch_table_load(struct vk_device_uncompacted_dispatch_table *table,
+ PFN_vkGetDeviceProcAddr gpa,
+ VkDevice device);
+
void vk_instance_dispatch_table_from_entrypoints(
struct vk_instance_dispatch_table *dispatch_table,
const struct vk_instance_entrypoint_table *entrypoint_table,
@@ -213,9 +283,6 @@ vk_device_dispatch_table_get_if_supported(
const struct vk_instance_extension_table *instance_exts,
const struct vk_device_extension_table *device_exts);
-extern struct vk_physical_device_dispatch_table vk_physical_device_trampolines;
-extern struct vk_device_dispatch_table vk_device_trampolines;
-
#ifdef __cplusplus
}
#endif
@@ -226,11 +293,7 @@ extern struct vk_device_dispatch_table vk_device_trampolines;
TEMPLATE_C = Template(COPYRIGHT + """\
/* This file generated from ${filename}, don't edit directly. */
-#include "vk_device.h"
#include "vk_dispatch_table.h"
-#include "vk_instance.h"
-#include "vk_object.h"
-#include "vk_physical_device.h"
#include "util/macros.h"
#include "string.h"
@@ -273,6 +336,46 @@ ${load_dispatch_table('physical_device', 'VkInstance', 'GetInstanceProcAddr',
${load_dispatch_table('device', 'VkDevice', 'GetDeviceProcAddr',
device_entrypoints)}
+<%def name="load_uncompacted_dispatch_table(type, VkType, ProcAddr, entrypoints)">
+void
+vk_${type}_uncompacted_dispatch_table_load(struct vk_${type}_uncompacted_dispatch_table *table,
+ PFN_vk${ProcAddr} gpa,
+ ${VkType} obj)
+{
+% if type != 'physical_device':
+ table->${ProcAddr} = gpa;
+% endif
+% for e in entrypoints:
+ % if e.alias or e.name == '${ProcAddr}':
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ table->${e.name} = (PFN_vk${e.name}) gpa(obj, "vk${e.name}");
+ % for a in e.aliases:
+ table->${a.name} = (PFN_vk${a.name}) gpa(obj, "vk${a.name}");
+ if (table->${e.name} && !table->${a.name})
+ table->${a.name} = (PFN_vk${a.name}) table->${e.name};
+ if (!table->${e.name})
+ table->${e.name} = (PFN_vk${e.name}) table->${a.name};
+ % endfor
+ % if e.guard is not None:
+#endif
+ % endif
+% endfor
+}
+</%def>
+
+${load_uncompacted_dispatch_table('instance', 'VkInstance', 'GetInstanceProcAddr',
+ instance_entrypoints)}
+
+${load_uncompacted_dispatch_table('physical_device', 'VkInstance', 'GetInstanceProcAddr',
+ physical_device_entrypoints)}
+
+${load_uncompacted_dispatch_table('device', 'VkDevice', 'GetDeviceProcAddr',
+ device_entrypoints)}
+
struct string_map_entry {
uint32_t name;
@@ -464,6 +567,36 @@ vk_device_entrypoint_is_enabled(int index, uint32_t core_version,
}
}
+#ifdef _MSC_VER
+VKAPI_ATTR void VKAPI_CALL vk_entrypoint_stub(void)
+{
+ unreachable("Entrypoint not implemented");
+}
+
+static const void *get_function_target(const void *func)
+{
+ const uint8_t *address = func;
+#ifdef _M_X64
+ /* Incremental linking may indirect through relative jump */
+ if (*address == 0xE9)
+ {
+ /* Compute JMP target if the first byte is opcode 0xE9 */
+ uint32_t offset;
+ memcpy(&offset, address + 1, 4);
+ address += offset + 5;
+ }
+#else
+ /* Add other platforms here if necessary */
+#endif
+ return address;
+}
+
+static bool vk_function_is_stub(PFN_vkVoidFunction func)
+{
+ return (func == vk_entrypoint_stub) || (get_function_target(func) == get_function_target(vk_entrypoint_stub));
+}
+#endif
+
<%def name="dispatch_table_from_entrypoints(type)">
void vk_${type}_dispatch_table_from_entrypoints(
struct vk_${type}_dispatch_table *dispatch_table,
@@ -477,8 +610,8 @@ void vk_${type}_dispatch_table_from_entrypoints(
memset(dispatch_table, 0, sizeof(*dispatch_table));
for (unsigned i = 0; i < ARRAY_SIZE(${type}_compaction_table); i++) {
#ifdef _MSC_VER
- const uintptr_t zero = 0;
- if (entry[i] == NULL || memcmp(entry[i], &zero, sizeof(zero)) == 0)
+ assert(entry[i] != NULL);
+ if (vk_function_is_stub(entry[i]))
#else
if (entry[i] == NULL)
#endif
@@ -490,7 +623,12 @@ void vk_${type}_dispatch_table_from_entrypoints(
} else {
for (unsigned i = 0; i < ARRAY_SIZE(${type}_compaction_table); i++) {
unsigned disp_index = ${type}_compaction_table[i];
+#ifdef _MSC_VER
+ assert(entry[i] != NULL);
+ if (disp[disp_index] == NULL && !vk_function_is_stub(entry[i]))
+#else
if (disp[disp_index] == NULL)
+#endif
disp[disp_index] = entry[i];
}
}
@@ -581,92 +719,6 @@ vk_device_dispatch_table_get_if_supported(
return vk_device_dispatch_table_get_for_entry_index(table, entry_index);
}
-
-% for e in physical_device_entrypoints:
- % if e.alias:
- <% continue %>
- % endif
- % if e.guard is not None:
-#ifdef ${e.guard}
- % endif
-static VKAPI_ATTR ${e.return_type} VKAPI_CALL
-${e.prefixed_name('vk_tramp')}(${e.decl_params()})
-{
- <% assert e.params[0].type == 'VkPhysicalDevice' %>
- VK_FROM_HANDLE(vk_physical_device, vk_physical_device, ${e.params[0].name});
- % if e.return_type == 'void':
- vk_physical_device->dispatch_table.${e.name}(${e.call_params()});
- % else:
- return vk_physical_device->dispatch_table.${e.name}(${e.call_params()});
- % endif
-}
- % if e.guard is not None:
-#endif
- % endif
-% endfor
-
-struct vk_physical_device_dispatch_table vk_physical_device_trampolines = {
-% for e in physical_device_entrypoints:
- % if e.alias:
- <% continue %>
- % endif
- % if e.guard is not None:
-#ifdef ${e.guard}
- % endif
- .${e.name} = ${e.prefixed_name('vk_tramp')},
- % if e.guard is not None:
-#endif
- % endif
-% endfor
-};
-
-% for e in device_entrypoints:
- % if e.alias:
- <% continue %>
- % endif
- % if e.guard is not None:
-#ifdef ${e.guard}
- % endif
-static VKAPI_ATTR ${e.return_type} VKAPI_CALL
-${e.prefixed_name('vk_tramp')}(${e.decl_params()})
-{
- % if e.params[0].type == 'VkDevice':
- VK_FROM_HANDLE(vk_device, vk_device, ${e.params[0].name});
- % if e.return_type == 'void':
- vk_device->dispatch_table.${e.name}(${e.call_params()});
- % else:
- return vk_device->dispatch_table.${e.name}(${e.call_params()});
- % endif
- % elif e.params[0].type in ('VkCommandBuffer', 'VkQueue'):
- struct vk_object_base *vk_object = (struct vk_object_base *)${e.params[0].name};
- % if e.return_type == 'void':
- vk_object->device->dispatch_table.${e.name}(${e.call_params()});
- % else:
- return vk_object->device->dispatch_table.${e.name}(${e.call_params()});
- % endif
- % else:
- assert(!"Unhandled device child trampoline case: ${e.params[0].type}");
- % endif
-}
- % if e.guard is not None:
-#endif
- % endif
-% endfor
-
-struct vk_device_dispatch_table vk_device_trampolines = {
-% for e in device_entrypoints:
- % if e.alias:
- <% continue %>
- % endif
- % if e.guard is not None:
-#ifdef ${e.guard}
- % endif
- .${e.name} = ${e.prefixed_name('vk_tramp')},
- % if e.guard is not None:
-#endif
- % endif
-% endfor
-};
""")
U32_MASK = 2**32 - 1
@@ -674,7 +726,7 @@ U32_MASK = 2**32 - 1
PRIME_FACTOR = 5024183
PRIME_STEP = 19
-class StringIntMapEntry(object):
+class StringIntMapEntry:
def __init__(self, string, num):
self.string = string
self.num = num
@@ -690,10 +742,10 @@ class StringIntMapEntry(object):
def round_to_pow2(x):
return 2**int(math.ceil(math.log(x, 2)))
-class StringIntMap(object):
+class StringIntMap:
def __init__(self):
self.baked = False
- self.strings = dict()
+ self.strings = {}
def add_string(self, string, num):
assert not self.baked
@@ -726,157 +778,11 @@ class StringIntMap(object):
self.collisions[min(level, 9)] += 1
self.mapping[h & self.hash_mask] = idx
-EntrypointParam = namedtuple('EntrypointParam', 'type name decl len')
-
-class EntrypointBase(object):
- def __init__(self, name):
- assert name.startswith('vk')
- self.name = name[2:]
- self.alias = None
- self.guard = None
- self.entry_table_index = None
- # Extensions which require this entrypoint
- self.core_version = None
- self.extensions = []
-
- def prefixed_name(self, prefix):
- return prefix + '_' + self.name
-
-class Entrypoint(EntrypointBase):
- def __init__(self, name, return_type, params, guard=None):
- super(Entrypoint, self).__init__(name)
- self.return_type = return_type
- self.params = params
- self.guard = guard
- self.aliases = []
- self.disp_table_index = None
-
- def is_physical_device_entrypoint(self):
- return self.params[0].type in ('VkPhysicalDevice', )
-
- def is_device_entrypoint(self):
- return self.params[0].type in ('VkDevice', 'VkCommandBuffer', 'VkQueue')
-
- def decl_params(self):
- return ', '.join(p.decl for p in self.params)
-
- def call_params(self):
- return ', '.join(p.name for p in self.params)
-
-class EntrypointAlias(EntrypointBase):
- def __init__(self, name, entrypoint):
- super(EntrypointAlias, self).__init__(name)
- self.alias = entrypoint
- entrypoint.aliases.append(self)
-
- def is_physical_device_entrypoint(self):
- return self.alias.is_physical_device_entrypoint()
-
- def is_device_entrypoint(self):
- return self.alias.is_device_entrypoint()
-
- def prefixed_name(self, prefix):
- return self.alias.prefixed_name(prefix)
-
- @property
- def params(self):
- return self.alias.params
-
- @property
- def return_type(self):
- return self.alias.return_type
-
- @property
- def disp_table_index(self):
- return self.alias.disp_table_index
-
- def decl_params(self):
- return self.alias.decl_params()
-
- def call_params(self):
- return self.alias.call_params()
-
-def get_entrypoints(doc, entrypoints_to_defines):
- """Extract the entry points from the registry."""
- entrypoints = OrderedDict()
-
- for command in doc.findall('./commands/command'):
- if 'alias' in command.attrib:
- alias = command.attrib['name']
- target = command.attrib['alias']
- entrypoints[alias] = EntrypointAlias(alias, entrypoints[target])
- else:
- name = command.find('./proto/name').text
- ret_type = command.find('./proto/type').text
- params = [EntrypointParam(
- type=p.find('./type').text,
- name=p.find('./name').text,
- decl=''.join(p.itertext()),
- len=p.attrib.get('len', None)
- ) for p in command.findall('./param')]
- guard = entrypoints_to_defines.get(name)
- # They really need to be unique
- assert name not in entrypoints
- entrypoints[name] = Entrypoint(name, ret_type, params, guard)
-
- for feature in doc.findall('./feature'):
- assert feature.attrib['api'] == 'vulkan'
- version = VkVersion(feature.attrib['number'])
- for command in feature.findall('./require/command'):
- e = entrypoints[command.attrib['name']]
- assert e.core_version is None
- e.core_version = version
-
- for extension in doc.findall('.extensions/extension'):
- if extension.attrib['supported'] != 'vulkan':
- continue
-
- ext_name = extension.attrib['name']
-
- ext = Extension(ext_name, 1, True)
- ext.type = extension.attrib['type']
-
- for command in extension.findall('./require/command'):
- e = entrypoints[command.attrib['name']]
- assert e.core_version is None
- e.extensions.append(ext)
-
- return entrypoints.values()
-
-
-def get_entrypoints_defines(doc):
- """Maps entry points to extension defines."""
- entrypoints_to_defines = {}
-
- platform_define = {}
- for platform in doc.findall('./platforms/platform'):
- name = platform.attrib['name']
- define = platform.attrib['protect']
- platform_define[name] = define
-
- for extension in doc.findall('./extensions/extension[@platform]'):
- platform = extension.attrib['platform']
- define = platform_define[platform]
-
- for entrypoint in extension.findall('./require/command'):
- fullname = entrypoint.attrib['name']
- entrypoints_to_defines[fullname] = define
-
- return entrypoints_to_defines
-
-def get_entrypoints_from_xml(xml_files):
- entrypoints = []
-
- for filename in xml_files:
- doc = et.parse(filename)
- entrypoints += get_entrypoints(doc, get_entrypoints_defines(doc))
-
- return entrypoints
-
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-c', help='Output C file.')
parser.add_argument('--out-h', help='Output H file.')
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
parser.add_argument('--xml',
help='Vulkan API XML file.',
required=True,
@@ -884,7 +790,7 @@ def main():
dest='xml_files')
args = parser.parse_args()
- entrypoints = get_entrypoints_from_xml(args.xml_files)
+ entrypoints = get_entrypoints_from_xml(args.xml_files, args.beta)
device_entrypoints = []
physical_device_entrypoints = []
@@ -928,13 +834,13 @@ def main():
# per entry point.
try:
if args.out_h:
- with open(args.out_h, 'w') as f:
+ with open(args.out_h, 'w', encoding='utf-8') as f:
f.write(TEMPLATE_H.render(instance_entrypoints=instance_entrypoints,
physical_device_entrypoints=physical_device_entrypoints,
device_entrypoints=device_entrypoints,
filename=os.path.basename(__file__)))
if args.out_c:
- with open(args.out_c, 'w') as f:
+ with open(args.out_c, 'w', encoding='utf-8') as f:
f.write(TEMPLATE_C.render(instance_entrypoints=instance_entrypoints,
physical_device_entrypoints=physical_device_entrypoints,
device_entrypoints=device_entrypoints,
@@ -947,12 +853,10 @@ def main():
# to print a useful stack trace and prints it, then exits with
# status 1, if python is run with debug; otherwise it just raises
# the exception
- if __debug__:
- import sys
- from mako import exceptions
- sys.stderr.write(exceptions.text_error_template().render() + '\n')
- sys.exit(1)
- raise
+ import sys
+ from mako import exceptions
+ print(exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
if __name__ == '__main__':
diff --git a/src/vulkan/util/vk_dispatch_trampolines_gen.py b/src/vulkan/util/vk_dispatch_trampolines_gen.py
new file mode 100644
index 00000000000..ccedf7f82cb
--- /dev/null
+++ b/src/vulkan/util/vk_dispatch_trampolines_gen.py
@@ -0,0 +1,193 @@
+# coding=utf-8
+COPYRIGHT = """\
+/*
+ * Copyright 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+import os
+
+from mako.template import Template
+
+# Mesa-local imports must be declared in meson variable
+# '{file_without_suffix}_depend_files'.
+from vk_entrypoints import get_entrypoints_from_xml
+
+TEMPLATE_H = Template(COPYRIGHT + """\
+/* This file generated from ${filename}, don't edit directly. */
+
+#ifndef VK_DISPATCH_TRAMPOLINES_H
+#define VK_DISPATCH_TRAMPOLINES_H
+
+#include "vk_dispatch_table.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern struct vk_physical_device_dispatch_table vk_physical_device_trampolines;
+extern struct vk_device_dispatch_table vk_device_trampolines;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VK_DISPATCH_TRAMPOLINES_H */
+""")
+
+TEMPLATE_C = Template(COPYRIGHT + """\
+/* This file generated from ${filename}, don't edit directly. */
+
+#include "vk_device.h"
+#include "vk_dispatch_trampolines.h"
+#include "vk_object.h"
+#include "vk_physical_device.h"
+
+% for e in entrypoints:
+ % if not e.is_physical_device_entrypoint() or e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+static VKAPI_ATTR ${e.return_type} VKAPI_CALL
+${e.prefixed_name('vk_tramp')}(${e.decl_params()})
+{
+ <% assert e.params[0].type == 'VkPhysicalDevice' %>
+ VK_FROM_HANDLE(vk_physical_device, vk_physical_device, ${e.params[0].name});
+ % if e.return_type == 'void':
+ vk_physical_device->dispatch_table.${e.name}(${e.call_params()});
+ % else:
+ return vk_physical_device->dispatch_table.${e.name}(${e.call_params()});
+ % endif
+}
+ % if e.guard is not None:
+#endif
+ % endif
+% endfor
+
+struct vk_physical_device_dispatch_table vk_physical_device_trampolines = {
+% for e in entrypoints:
+ % if not e.is_physical_device_entrypoint() or e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ .${e.name} = ${e.prefixed_name('vk_tramp')},
+ % if e.guard is not None:
+#endif
+ % endif
+% endfor
+};
+
+% for e in entrypoints:
+ % if not e.is_device_entrypoint() or e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+static VKAPI_ATTR ${e.return_type} VKAPI_CALL
+${e.prefixed_name('vk_tramp')}(${e.decl_params()})
+{
+ % if e.params[0].type == 'VkDevice':
+ VK_FROM_HANDLE(vk_device, vk_device, ${e.params[0].name});
+ % if e.return_type == 'void':
+ vk_device->dispatch_table.${e.name}(${e.call_params()});
+ % else:
+ return vk_device->dispatch_table.${e.name}(${e.call_params()});
+ % endif
+ % elif e.params[0].type in ('VkCommandBuffer', 'VkQueue'):
+ struct vk_object_base *vk_object = (struct vk_object_base *)${e.params[0].name};
+ % if e.return_type == 'void':
+ vk_object->device->dispatch_table.${e.name}(${e.call_params()});
+ % else:
+ return vk_object->device->dispatch_table.${e.name}(${e.call_params()});
+ % endif
+ % else:
+ assert(!"Unhandled device child trampoline case: ${e.params[0].type}");
+ % endif
+}
+ % if e.guard is not None:
+#endif
+ % endif
+% endfor
+
+struct vk_device_dispatch_table vk_device_trampolines = {
+% for e in entrypoints:
+ % if not e.is_device_entrypoint() or e.alias:
+ <% continue %>
+ % endif
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ .${e.name} = ${e.prefixed_name('vk_tramp')},
+ % if e.guard is not None:
+#endif
+ % endif
+% endfor
+};
+""")
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--out-c', help='Output C file.')
+ parser.add_argument('--out-h', help='Output H file.')
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
+ parser.add_argument('--xml',
+ help='Vulkan API XML file.',
+ required=True,
+ action='append',
+ dest='xml_files')
+ args = parser.parse_args()
+
+ entrypoints = get_entrypoints_from_xml(args.xml_files, args.beta)
+
+ # For outputting entrypoints.h we generate a anv_EntryPoint() prototype
+ # per entry point.
+ try:
+ if args.out_h:
+ with open(args.out_h, 'w', encoding='utf-8') as f:
+ f.write(TEMPLATE_H.render(entrypoints=entrypoints,
+ filename=os.path.basename(__file__)))
+ if args.out_c:
+ with open(args.out_c, 'w', encoding='utf-8') as f:
+ f.write(TEMPLATE_C.render(entrypoints=entrypoints,
+ filename=os.path.basename(__file__)))
+ except Exception:
+ # In the event there's an error, this imports some helpers from mako
+ # to print a useful stack trace and prints it, then exits with
+ # status 1, if python is run with debug; otherwise it just raises
+ # the exception
+ if __debug__:
+ import sys
+ from mako import exceptions
+ sys.stderr.write(exceptions.text_error_template().render() + '\n')
+ sys.exit(1)
+ raise
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/vulkan/util/vk_entrypoints.py b/src/vulkan/util/vk_entrypoints.py
new file mode 100644
index 00000000000..a8280bae2ff
--- /dev/null
+++ b/src/vulkan/util/vk_entrypoints.py
@@ -0,0 +1,147 @@
+# Copyright 2020 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import xml.etree.ElementTree as et
+
+from collections import OrderedDict, namedtuple
+
+# Mesa-local imports must be declared in meson variable
+# '{file_without_suffix}_depend_files'.
+from vk_extensions import get_all_required, filter_api
+
+EntrypointParam = namedtuple('EntrypointParam', 'type name decl len')
+
+class EntrypointBase:
+ def __init__(self, name):
+ assert name.startswith('vk')
+ self.name = name[2:]
+ self.alias = None
+ self.guard = None
+ self.entry_table_index = None
+ # Extensions which require this entrypoint
+ self.core_version = None
+ self.extensions = []
+
+ def prefixed_name(self, prefix):
+ return prefix + '_' + self.name
+
+class Entrypoint(EntrypointBase):
+ def __init__(self, name, return_type, params):
+ super(Entrypoint, self).__init__(name)
+ self.return_type = return_type
+ self.params = params
+ self.guard = None
+ self.aliases = []
+ self.disp_table_index = None
+
+ def is_physical_device_entrypoint(self):
+ return self.params[0].type in ('VkPhysicalDevice', )
+
+ def is_device_entrypoint(self):
+ return self.params[0].type in ('VkDevice', 'VkCommandBuffer', 'VkQueue')
+
+ def decl_params(self, start=0):
+ return ', '.join(p.decl for p in self.params[start:])
+
+ def call_params(self, start=0):
+ return ', '.join(p.name for p in self.params[start:])
+
+class EntrypointAlias(EntrypointBase):
+ def __init__(self, name, entrypoint):
+ super(EntrypointAlias, self).__init__(name)
+ self.alias = entrypoint
+ entrypoint.aliases.append(self)
+
+ def is_physical_device_entrypoint(self):
+ return self.alias.is_physical_device_entrypoint()
+
+ def is_device_entrypoint(self):
+ return self.alias.is_device_entrypoint()
+
+ def prefixed_name(self, prefix):
+ return self.alias.prefixed_name(prefix)
+
+ @property
+ def params(self):
+ return self.alias.params
+
+ @property
+ def return_type(self):
+ return self.alias.return_type
+
+ @property
+ def disp_table_index(self):
+ return self.alias.disp_table_index
+
+ def decl_params(self):
+ return self.alias.decl_params()
+
+ def call_params(self):
+ return self.alias.call_params()
+
+def get_entrypoints(doc, api, beta):
+ """Extract the entry points from the registry."""
+ entrypoints = OrderedDict()
+
+ required = get_all_required(doc, 'command', api, beta)
+
+ for command in doc.findall('./commands/command'):
+ if not filter_api(command, api):
+ continue
+
+ if 'alias' in command.attrib:
+ name = command.attrib['name']
+ target = command.attrib['alias']
+ e = EntrypointAlias(name, entrypoints[target])
+ else:
+ name = command.find('./proto/name').text
+ ret_type = command.find('./proto/type').text
+ params = [EntrypointParam(
+ type=p.find('./type').text,
+ name=p.find('./name').text,
+ decl=''.join(p.itertext()),
+ len=p.attrib.get('altlen', p.attrib.get('len', None))
+ ) for p in command.findall('./param') if filter_api(p, api)]
+ # They really need to be unique
+ e = Entrypoint(name, ret_type, params)
+
+ if name not in required:
+ continue
+
+ r = required[name]
+ e.core_version = r.core_version
+ e.extensions = r.extensions
+ e.guard = r.guard
+
+ assert name not in entrypoints, name
+ entrypoints[name] = e
+
+ return entrypoints.values()
+
+def get_entrypoints_from_xml(xml_files, beta, api='vulkan'):
+ entrypoints = []
+
+ for filename in xml_files:
+ doc = et.parse(filename)
+ entrypoints += get_entrypoints(doc, api, beta)
+
+ return entrypoints
diff --git a/src/vulkan/util/vk_entrypoints_gen.py b/src/vulkan/util/vk_entrypoints_gen.py
index a8f50c71c24..bbfb4e80158 100644
--- a/src/vulkan/util/vk_entrypoints_gen.py
+++ b/src/vulkan/util/vk_entrypoints_gen.py
@@ -1,4 +1,3 @@
-# coding=utf-8
COPYRIGHT=u"""
/* Copyright © 2015-2021 Intel Corporation
*
@@ -30,19 +29,32 @@ from mako.template import Template
# Mesa-local imports must be declared in meson variable
# '{file_without_suffix}_depend_files'.
-from vk_dispatch_table_gen import get_entrypoints_from_xml
+from vk_entrypoints import get_entrypoints_from_xml
TEMPLATE_H = Template(COPYRIGHT + """\
/* This file generated from ${filename}, don't edit directly. */
#include "vk_dispatch_table.h"
+% for i in includes:
+#include "${i}"
+% endfor
+
#ifndef ${guard}
#define ${guard}
+% if not tmpl_prefix:
#ifdef __cplusplus
extern "C" {
#endif
+% endif
+
+/* clang wants function declarations in the header to have weak attribute */
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#define ATTR_WEAK __attribute__ ((weak))
+#else
+#define ATTR_WEAK
+#endif
% for p in instance_prefixes:
extern const struct vk_instance_entrypoint_table ${p}_instance_entrypoints;
@@ -56,6 +68,10 @@ extern const struct vk_physical_device_entrypoint_table ${p}_physical_device_ent
extern const struct vk_device_entrypoint_table ${p}_device_entrypoints;
% endfor
+% for v in tmpl_variants_sanitized:
+extern const struct vk_device_entrypoint_table ${tmpl_prefix}_device_entrypoints_${v};
+% endfor
+
% if gen_proto:
% for e in instance_entrypoints:
% if e.guard is not None:
@@ -86,17 +102,28 @@ extern const struct vk_device_entrypoint_table ${p}_device_entrypoints;
#ifdef ${e.guard}
% endif
% for p in device_prefixes:
- VKAPI_ATTR ${e.return_type} VKAPI_CALL ${p}_${e.name}(${e.decl_params()});
+ VKAPI_ATTR ${e.return_type} VKAPI_CALL ${p}_${e.name}(${e.decl_params()}) ATTR_WEAK;
% endfor
+
+ % if tmpl_prefix:
+ template <${tmpl_param}>
+ VKAPI_ATTR ${e.return_type} VKAPI_CALL ${tmpl_prefix}_${e.name}(${e.decl_params()});
+
+ #define ${tmpl_prefix}_${e.name}_GENS(X) \
+ template VKAPI_ATTR ${e.return_type} VKAPI_CALL ${tmpl_prefix}_${e.name}<X>(${e.decl_params()});
+ % endif
+
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
% endif
+% if not tmpl_prefix:
#ifdef __cplusplus
}
#endif
+% endif
#endif /* ${guard} */
""")
@@ -123,18 +150,28 @@ TEMPLATE_C = Template(COPYRIGHT + """
% endif
% for p in prefixes:
#ifdef _MSC_VER
- ${e.return_type} (*${p}_${e.name}_Null)(${e.decl_params()}) = 0;
#ifdef _M_IX86
% for args_size in [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 60, 104]:
- #pragma comment(linker, "/alternatename:_${p}_${e.name}@${args_size}=_${p}_${e.name}_Null")
+ #pragma comment(linker, "/alternatename:_${p}_${e.name}@${args_size}=_vk_entrypoint_stub@0")
% endfor
#else
- #pragma comment(linker, "/alternatename:${p}_${e.name}=${p}_${e.name}_Null")
+ #pragma comment(linker, "/alternatename:${p}_${e.name}=vk_entrypoint_stub")
+#if defined(_M_ARM64EC)
+ #pragma comment(linker, "/alternatename:#${p}_${e.name}=#vk_entrypoint_stub")
+#endif
#endif
#else
VKAPI_ATTR ${e.return_type} VKAPI_CALL ${p}_${e.name}(${e.decl_params()}) __attribute__ ((weak));
+
+ % if entrypoints == device_entrypoints:
+ % for v in tmpl_variants:
+ extern template
+ VKAPI_ATTR __attribute__ ((weak)) ${e.return_type} VKAPI_CALL ${tmpl_prefix}_${e.name}${v}(${e.decl_params()});
+ % endfor
+ % endif
#endif
% endfor
+
% if e.guard is not None:
#endif // ${e.guard}
% endif
@@ -149,11 +186,31 @@ const struct vk_${type}_entrypoint_table ${p}_${type}_entrypoints = {
% endif
.${e.name} = ${p}_${e.name},
% if e.guard is not None:
+#elif defined(_MSC_VER)
+ .${e.name} = (PFN_vkVoidFunction)vk_entrypoint_stub,
#endif // ${e.guard}
% endif
% endfor
};
% endfor
+
+% if entrypoints == device_entrypoints:
+% for v, entrypoint_v in zip(tmpl_variants, tmpl_variants_sanitized):
+const struct vk_${type}_entrypoint_table ${tmpl_prefix}_${type}_entrypoints_${entrypoint_v} = {
+ % for e in entrypoints:
+ % if e.guard is not None:
+#ifdef ${e.guard}
+ % endif
+ .${e.name} = ${tmpl_prefix}_${e.name}${v},
+ % if e.guard is not None:
+#elif defined(_MSC_VER)
+ .${e.name} = (PFN_vkVoidFunction)vk_entrypoint_stub,
+#endif // ${e.guard}
+ % endif
+ % endfor
+};
+% endfor
+% endif
</%def>
${entrypoint_table('instance', instance_entrypoints, instance_prefixes)}
@@ -161,31 +218,12 @@ ${entrypoint_table('physical_device', physical_device_entrypoints, physical_devi
${entrypoint_table('device', device_entrypoints, device_prefixes)}
""")
-def get_entrypoints_defines(doc):
- """Maps entry points to extension defines."""
- entrypoints_to_defines = {}
-
- platform_define = {}
- for platform in doc.findall('./platforms/platform'):
- name = platform.attrib['name']
- define = platform.attrib['protect']
- platform_define[name] = define
-
- for extension in doc.findall('./extensions/extension[@platform]'):
- platform = extension.attrib['platform']
- define = platform_define[platform]
-
- for entrypoint in extension.findall('./require/command'):
- fullname = entrypoint.attrib['name']
- entrypoints_to_defines[fullname] = define
-
- return entrypoints_to_defines
-
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-c', required=True, help='Output C file.')
parser.add_argument('--out-h', required=True, help='Output H file.')
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
parser.add_argument('--xml',
help='Vulkan API XML file.',
required=True, action='append', dest='xml_files')
@@ -199,13 +237,28 @@ def main():
parser.add_argument('--device-prefix',
help='Prefix to use for device dispatch tables.',
action='append', default=[], dest='device_prefixes')
+ parser.add_argument('--include',
+ help='Includes to add to the H file.',
+ action='append', default=[], dest='includes')
+ parser.add_argument('--tmpl-prefix',
+ help='Prefix to use for templated device dispatch tables.',
+ dest='tmpl_prefix')
+ parser.add_argument('--tmpl-param',
+ help='Param to use for templated device dispatch tables.',
+ dest='tmpl_param')
+ parser.add_argument('--tmpl-variants',
+ help='All template specializations.',
+ nargs='+', default=[], dest='tmpl_variants')
args = parser.parse_args()
instance_prefixes = args.prefixes
physical_device_prefixes = args.prefixes
device_prefixes = args.prefixes + args.device_prefixes
- entrypoints = get_entrypoints_from_xml(args.xml_files)
+ tmpl_variants_sanitized = [
+ ''.join(filter(str.isalnum, v)).lower() for v in args.tmpl_variants]
+
+ entrypoints = get_entrypoints_from_xml(args.xml_files, args.beta)
device_entrypoints = []
physical_device_entrypoints = []
@@ -230,16 +283,21 @@ def main():
'physical_device_prefixes': physical_device_prefixes,
'device_entrypoints': device_entrypoints,
'device_prefixes': device_prefixes,
+ 'includes': args.includes,
+ 'tmpl_prefix': args.tmpl_prefix,
+ 'tmpl_param': args.tmpl_param,
+ 'tmpl_variants': args.tmpl_variants,
+ 'tmpl_variants_sanitized': tmpl_variants_sanitized,
'filename': os.path.basename(__file__),
}
# For outputting entrypoints.h we generate a anv_EntryPoint() prototype
# per entry point.
try:
- with open(args.out_h, 'w') as f:
+ with open(args.out_h, 'w', encoding='utf-8') as f:
guard = os.path.basename(args.out_h).replace('.', '_').upper()
f.write(TEMPLATE_H.render(guard=guard, **environment))
- with open(args.out_c, 'w') as f:
+ with open(args.out_c, 'w', encoding='utf-8') as f:
f.write(TEMPLATE_C.render(**environment))
except Exception:
@@ -247,12 +305,10 @@ def main():
# to print a useful stack trace and prints it, then exits with
# status 1, if python is run with debug; otherwise it just raises
# the exception
- if __debug__:
- import sys
- from mako import exceptions
- sys.stderr.write(exceptions.text_error_template().render() + '\n')
- sys.exit(1)
- raise
+ import sys
+ from mako import exceptions
+ print(exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
if __name__ == '__main__':
main()
diff --git a/src/vulkan/util/vk_extensions.py b/src/vulkan/util/vk_extensions.py
index 852951f05b6..5372edadb47 100644
--- a/src/vulkan/util/vk_extensions.py
+++ b/src/vulkan/util/vk_extensions.py
@@ -1,20 +1,53 @@
-import argparse
import copy
import re
import xml.etree.ElementTree as et
-def _bool_to_c_expr(b):
- if b is True:
- return 'true'
- if b is False:
- return 'false'
- return b
+def get_api_list(s):
+ apis = []
+ for a in s.split(','):
+ if a == 'disabled':
+ continue
+ assert a in ('vulkan', 'vulkansc')
+ apis.append(a)
+ return apis
class Extension:
- def __init__(self, name, ext_version, enable):
+ def __init__(self, name, number, ext_version):
self.name = name
+ self.type = None
+ self.number = number
+ self.platform = None
+ self.provisional = False
self.ext_version = int(ext_version)
- self.enable = _bool_to_c_expr(enable)
+ self.supported = []
+
+ def from_xml(ext_elem):
+ name = ext_elem.attrib['name']
+ number = int(ext_elem.attrib['number'])
+ supported = get_api_list(ext_elem.attrib['supported'])
+ if name == 'VK_ANDROID_native_buffer':
+ assert not supported
+ supported = ['vulkan']
+
+ if not supported:
+ return Extension(name, number, 0)
+
+ version = None
+ for enum_elem in ext_elem.findall('.require/enum'):
+ if enum_elem.attrib['name'].endswith('_SPEC_VERSION'):
+ # Skip alias SPEC_VERSIONs
+ if 'value' in enum_elem.attrib:
+ assert version is None
+ version = int(enum_elem.attrib['value'])
+
+ assert version is not None
+ ext = Extension(name, number, version)
+ ext.type = ext_elem.attrib['type']
+ ext.platform = ext_elem.attrib.get('platform', None)
+ ext.provisional = ext_elem.attrib.get('provisional', False)
+ ext.supported = supported
+
+ return ext
def c_android_condition(self):
# if it's an EXT or vendor extension, it's allowed
@@ -28,9 +61,8 @@ class Extension:
return 'ANDROID_API_LEVEL >= %d' % (allowed_version)
class ApiVersion:
- def __init__(self, version, enable):
+ def __init__(self, version):
self.version = version
- self.enable = _bool_to_c_expr(enable)
class VkVersion:
def __init__(self, string):
@@ -47,7 +79,7 @@ class VkVersion:
# VK_MAKE_VERSION macro
assert self.major < 1024 and self.minor < 1024
assert self.patch is None or self.patch < 4096
- assert(str(self) == string)
+ assert str(self) == string
def __str__(self):
ver_list = [str(self.major), str(self.minor)]
@@ -56,14 +88,12 @@ class VkVersion:
return '.'.join(ver_list)
def c_vk_version(self):
- patch = self.patch if self.patch is not None else 0
- ver_list = [str(self.major), str(self.minor), str(patch)]
+ ver_list = [str(self.major), str(self.minor), str(self.patch or 0)]
return 'VK_MAKE_VERSION(' + ', '.join(ver_list) + ')'
def __int_ver(self):
# This is just an expansion of VK_VERSION
- patch = self.patch if self.patch is not None else 0
- return (self.major << 22) | (self.minor << 12) | patch
+ return (self.major << 22) | (self.minor << 12) | (self.patch or 0)
def __gt__(self, other):
# If only one of them has a patch version, "ignore" it by making
@@ -90,24 +120,16 @@ def extension_order(ext):
order.append(substring)
return order
-def get_all_exts_from_xml(xml):
+def get_all_exts_from_xml(xml, api='vulkan'):
""" Get a list of all Vulkan extensions. """
xml = et.parse(xml)
extensions = []
for ext_elem in xml.findall('.extensions/extension'):
- supported = ext_elem.attrib['supported'] == 'vulkan'
- name = ext_elem.attrib['name']
- if not supported and name != 'VK_ANDROID_native_buffer':
- continue
- version = None
- for enum_elem in ext_elem.findall('.require/enum'):
- if enum_elem.attrib['name'].endswith('_SPEC_VERSION'):
- assert version is None
- version = int(enum_elem.attrib['value'])
- ext = Extension(name, version, True)
- extensions.append(Extension(name, version, True))
+ ext = Extension.from_xml(ext_elem)
+ if api in ext.supported:
+ extensions.append(ext)
return sorted(extensions, key=extension_order)
@@ -133,10 +155,78 @@ def init_exts_from_xml(xml, extensions, platform_defines):
ext = ext_name_map[ext_name]
ext.type = ext_elem.attrib['type']
+class Requirements:
+ def __init__(self, core_version=None):
+ self.core_version = core_version
+ self.extensions = []
+ self.guard = None
+
+ def add_extension(self, ext):
+ for e in self.extensions:
+ if e == ext:
+ return;
+ assert e.name != ext.name
+
+ self.extensions.append(ext)
+
+def filter_api(elem, api):
+ if 'api' not in elem.attrib:
+ return True
+
+ return api in elem.attrib['api'].split(',')
+
+def get_all_required(xml, thing, api, beta):
+ things = {}
+ for feature in xml.findall('./feature'):
+ if not filter_api(feature, api):
+ continue
+
+ version = VkVersion(feature.attrib['number'])
+ for t in feature.findall('./require/' + thing):
+ name = t.attrib['name']
+ assert name not in things
+ things[name] = Requirements(core_version=version)
+
+ for extension in xml.findall('.extensions/extension'):
+ ext = Extension.from_xml(extension)
+ if api not in ext.supported:
+ continue
+
+ if beta != 'true' and ext.provisional:
+ continue
+
+ for require in extension.findall('./require'):
+ if not filter_api(require, api):
+ continue
+
+ for t in require.findall('./' + thing):
+ name = t.attrib['name']
+ r = things.setdefault(name, Requirements())
+ r.add_extension(ext)
+
+ platform_defines = {}
+ for platform in xml.findall('./platforms/platform'):
+ name = platform.attrib['name']
+ define = platform.attrib['protect']
+ platform_defines[name] = define
+
+ for req in things.values():
+ if req.core_version is not None:
+ continue
+
+ for ext in req.extensions:
+ if ext.platform in platform_defines:
+ req.guard = platform_defines[ext.platform]
+ break
+
+ return things
+
# Mapping between extension name and the android version in which the extension
-# was whitelisted in Android CTS.
+# was whitelisted in Android CTS's dEQP-VK.info.device_extensions and
+# dEQP-VK.api.info.android.no_unknown_extensions, excluding those blocked by
+# android.graphics.cts.VulkanFeaturesTest#testVulkanBlockedExtensions.
ALLOWED_ANDROID_VERSION = {
- # Allowed Instance KHR Extensions
+ # checkInstanceExtensions on oreo-cts-release
"VK_KHR_surface": 26,
"VK_KHR_display": 26,
"VK_KHR_android_surface": 26,
@@ -147,61 +237,113 @@ ALLOWED_ANDROID_VERSION = {
"VK_KHR_xlib_surface": 26,
"VK_KHR_get_physical_device_properties2": 26,
"VK_KHR_get_surface_capabilities2": 26,
- "VK_KHR_external_memory_capabilities": 28,
- "VK_KHR_external_semaphore_capabilities": 28,
- "VK_KHR_external_fence_capabilities": 28,
+ "VK_KHR_external_memory_capabilities": 26,
+ "VK_KHR_external_semaphore_capabilities": 26,
+ "VK_KHR_external_fence_capabilities": 26,
+ # on pie-cts-release
"VK_KHR_device_group_creation": 28,
- "VK_KHR_get_display_properties2": 29,
+ "VK_KHR_get_display_properties2": 28,
+ # on android10-tests-release
"VK_KHR_surface_protected_capabilities": 29,
+ # on android13-tests-release
+ "VK_KHR_portability_enumeration": 33,
- # Allowed Device KHR Extensions
+ # checkDeviceExtensions on oreo-cts-release
"VK_KHR_swapchain": 26,
"VK_KHR_display_swapchain": 26,
"VK_KHR_sampler_mirror_clamp_to_edge": 26,
"VK_KHR_shader_draw_parameters": 26,
- "VK_KHR_shader_float_controls": 29,
- "VK_KHR_shader_float16_int8": 29,
"VK_KHR_maintenance1": 26,
"VK_KHR_push_descriptor": 26,
"VK_KHR_descriptor_update_template": 26,
"VK_KHR_incremental_present": 26,
"VK_KHR_shared_presentable_image": 26,
- "VK_KHR_storage_buffer_storage_class": 28,
- "VK_KHR_8bit_storage": 29,
- "VK_KHR_16bit_storage": 28,
- "VK_KHR_get_memory_requirements2": 28,
- "VK_KHR_external_memory": 28,
- "VK_KHR_external_memory_fd": 28,
- "VK_KHR_external_memory_win32": 28,
- "VK_KHR_external_semaphore": 28,
- "VK_KHR_external_semaphore_fd": 28,
- "VK_KHR_external_semaphore_win32": 28,
- "VK_KHR_external_fence": 28,
- "VK_KHR_external_fence_fd": 28,
- "VK_KHR_external_fence_win32": 28,
- "VK_KHR_win32_keyed_mutex": 28,
- "VK_KHR_dedicated_allocation": 28,
- "VK_KHR_variable_pointers": 28,
- "VK_KHR_relaxed_block_layout": 28,
- "VK_KHR_bind_memory2": 28,
- "VK_KHR_maintenance2": 28,
- "VK_KHR_image_format_list": 28,
- "VK_KHR_sampler_ycbcr_conversion": 28,
+ "VK_KHR_storage_buffer_storage_class": 26,
+ "VK_KHR_16bit_storage": 26,
+ "VK_KHR_get_memory_requirements2": 26,
+ "VK_KHR_external_memory": 26,
+ "VK_KHR_external_memory_fd": 26,
+ "VK_KHR_external_memory_win32": 26,
+ "VK_KHR_external_semaphore": 26,
+ "VK_KHR_external_semaphore_fd": 26,
+ "VK_KHR_external_semaphore_win32": 26,
+ "VK_KHR_external_fence": 26,
+ "VK_KHR_external_fence_fd": 26,
+ "VK_KHR_external_fence_win32": 26,
+ "VK_KHR_win32_keyed_mutex": 26,
+ "VK_KHR_dedicated_allocation": 26,
+ "VK_KHR_variable_pointers": 26,
+ "VK_KHR_relaxed_block_layout": 26,
+ "VK_KHR_bind_memory2": 26,
+ "VK_KHR_maintenance2": 26,
+ "VK_KHR_image_format_list": 26,
+ "VK_KHR_sampler_ycbcr_conversion": 26,
+ # on oreo-mr1-cts-release
+ "VK_KHR_draw_indirect_count": 27,
+ # on pie-cts-release
"VK_KHR_device_group": 28,
"VK_KHR_multiview": 28,
"VK_KHR_maintenance3": 28,
- "VK_KHR_draw_indirect_count": 28,
"VK_KHR_create_renderpass2": 28,
- "VK_KHR_depth_stencil_resolve": 29,
"VK_KHR_driver_properties": 28,
+ # on android10-tests-release
+ "VK_KHR_shader_float_controls": 29,
+ "VK_KHR_shader_float16_int8": 29,
+ "VK_KHR_8bit_storage": 29,
+ "VK_KHR_depth_stencil_resolve": 29,
"VK_KHR_swapchain_mutable_format": 29,
"VK_KHR_shader_atomic_int64": 29,
"VK_KHR_vulkan_memory_model": 29,
- "VK_KHR_performance_query": 30,
+ "VK_KHR_swapchain_mutable_format": 29,
+ "VK_KHR_uniform_buffer_standard_layout": 29,
+ # on android11-tests-release
+ "VK_KHR_imageless_framebuffer": 30,
+ "VK_KHR_shader_subgroup_extended_types": 30,
+ "VK_KHR_buffer_device_address": 30,
+ "VK_KHR_separate_depth_stencil_layouts": 30,
+ "VK_KHR_timeline_semaphore": 30,
+ "VK_KHR_spirv_1_4": 30,
+ "VK_KHR_pipeline_executable_properties": 30,
+ "VK_KHR_shader_clock": 30,
+ # blocked by testVulkanBlockedExtensions
+ # "VK_KHR_performance_query": 30,
+ "VK_KHR_shader_non_semantic_info": 30,
+ "VK_KHR_copy_commands2": 30,
+ # on android12-tests-release
+ "VK_KHR_shader_terminate_invocation": 31,
+ "VK_KHR_ray_tracing_pipeline": 31,
+ "VK_KHR_ray_query": 31,
+ "VK_KHR_acceleration_structure": 31,
+ "VK_KHR_pipeline_library": 31,
+ "VK_KHR_deferred_host_operations": 31,
+ "VK_KHR_fragment_shading_rate": 31,
+ "VK_KHR_zero_initialize_workgroup_memory": 31,
+ "VK_KHR_workgroup_memory_explicit_layout": 31,
+ "VK_KHR_synchronization2": 31,
+ "VK_KHR_shader_integer_dot_product": 31,
+ # on android13-tests-release
+ "VK_KHR_dynamic_rendering": 33,
+ "VK_KHR_format_feature_flags2": 33,
+ "VK_KHR_global_priority": 33,
+ "VK_KHR_maintenance4": 33,
+ "VK_KHR_portability_subset": 33,
+ "VK_KHR_present_id": 33,
+ "VK_KHR_present_wait": 33,
+ "VK_KHR_shader_subgroup_uniform_control_flow": 33,
+ # testNoUnknownExtensions on oreo-cts-release
"VK_GOOGLE_display_timing": 26,
- "VK_ANDROID_native_buffer": 26,
+ # on pie-cts-release
"VK_ANDROID_external_memory_android_hardware_buffer": 28,
+ # on android11-tests-release
+ "VK_GOOGLE_decorate_string": 30,
+ "VK_GOOGLE_hlsl_functionality1": 30,
+ # on android13-tests-release
+ "VK_GOOGLE_surfaceless_query": 33,
+
+ # this HAL extension is always allowed and will be filtered out by the
+ # loader
+ "VK_ANDROID_native_buffer": 26,
}
# Extensions with these prefixes are checked in Android CTS, and thus must be
diff --git a/src/vulkan/util/vk_extensions_gen.py b/src/vulkan/util/vk_extensions_gen.py
index 4f8f66c40e5..556e8fcb9e0 100644
--- a/src/vulkan/util/vk_extensions_gen.py
+++ b/src/vulkan/util/vk_extensions_gen.py
@@ -24,218 +24,114 @@ COPYRIGHT = """\
*/
"""
-import xml.etree.ElementTree as et
+import argparse
from mako.template import Template
# Mesa-local imports must be declared in meson variable
# '{file_without_suffix}_depend_files'.
-from vk_extensions import *
+from vk_extensions import get_all_exts_from_xml, init_exts_from_xml
_TEMPLATE_H = Template(COPYRIGHT + """
-#ifndef ${driver.upper()}_EXTENSIONS_H
-#define ${driver.upper()}_EXTENSIONS_H
+#ifndef VK_EXTENSIONS_H
+#define VK_EXTENSIONS_H
#include <stdbool.h>
-%for include in includes:
-#include "${include}"
-%endfor
-
-%if driver == 'vk':
-#define VK_INSTANCE_EXTENSION_COUNT ${len(instance_extensions)}
+<%def name="extension_table(type, extensions)">
+#define VK_${type.upper()}_EXTENSION_COUNT ${len(extensions)}
-extern const VkExtensionProperties vk_instance_extensions[];
+extern const VkExtensionProperties vk_${type}_extensions[];
-struct vk_instance_extension_table {
+struct vk_${type}_extension_table {
union {
- bool extensions[VK_INSTANCE_EXTENSION_COUNT];
+ bool extensions[VK_${type.upper()}_EXTENSION_COUNT];
struct {
-%for ext in instance_extensions:
+%for ext in extensions:
bool ${ext.name[3:]};
%endfor
};
- };
-};
-
-#define VK_DEVICE_EXTENSION_COUNT ${len(device_extensions)}
-
-extern const VkExtensionProperties vk_device_extensions[];
-
-struct vk_device_extension_table {
- union {
- bool extensions[VK_DEVICE_EXTENSION_COUNT];
+ /* Workaround for "error: too many initializers for vk_${type}_extension_table" */
struct {
-%for ext in device_extensions:
- bool ${ext.name[3:]};
+%for ext in extensions:
+ bool ${ext.name[3:]};
%endfor
- };
+ } table;
};
};
-%else:
-#include "vk_extensions.h"
-%endif
+</%def>
-struct ${driver}_physical_device;
+${extension_table('instance', instance_extensions)}
+${extension_table('device', device_extensions)}
-%if driver == 'vk':
-#ifdef ANDROID
+struct vk_physical_device;
+
+#ifdef ANDROID_STRICT
extern const struct vk_instance_extension_table vk_android_allowed_instance_extensions;
extern const struct vk_device_extension_table vk_android_allowed_device_extensions;
#endif
-%else:
-extern const struct vk_instance_extension_table ${driver}_instance_extensions_supported;
-
-void
-${driver}_physical_device_get_supported_extensions(const struct ${driver}_physical_device *device,
- struct vk_device_extension_table *extensions);
-%endif
-#endif /* ${driver.upper()}_EXTENSIONS_H */
+#endif /* VK_EXTENSIONS_H */
""")
_TEMPLATE_C = Template(COPYRIGHT + """
-%if driver == 'vk':
-#include "vk_object.h"
-%else:
-#include "${driver}_private.h"
-%endif
+#include "vulkan/vulkan_core.h"
-#include "${driver}_extensions.h"
+#include "vk_extensions.h"
-%if driver == 'vk':
-const VkExtensionProperties ${driver}_instance_extensions[${driver.upper()}_INSTANCE_EXTENSION_COUNT] = {
+const VkExtensionProperties vk_instance_extensions[VK_INSTANCE_EXTENSION_COUNT] = {
%for ext in instance_extensions:
{"${ext.name}", ${ext.ext_version}},
%endfor
};
-const VkExtensionProperties ${driver}_device_extensions[${driver.upper()}_DEVICE_EXTENSION_COUNT] = {
+const VkExtensionProperties vk_device_extensions[VK_DEVICE_EXTENSION_COUNT] = {
%for ext in device_extensions:
{"${ext.name}", ${ext.ext_version}},
%endfor
};
-#ifdef ANDROID
+#ifdef ANDROID_STRICT
const struct vk_instance_extension_table vk_android_allowed_instance_extensions = {
%for ext in instance_extensions:
.${ext.name[3:]} = ${ext.c_android_condition()},
%endfor
};
-extern const struct vk_device_extension_table vk_android_allowed_device_extensions = {
+const struct vk_device_extension_table vk_android_allowed_device_extensions = {
%for ext in device_extensions:
.${ext.name[3:]} = ${ext.c_android_condition()},
%endfor
};
#endif
-%endif
-
-%if driver != 'vk':
-#include "vk_util.h"
-
-/* Convert the VK_USE_PLATFORM_* defines to booleans */
-%for platform_define in platform_defines:
-#ifdef ${platform_define}
-# undef ${platform_define}
-# define ${platform_define} true
-#else
-# define ${platform_define} false
-#endif
-%endfor
-
-/* And ANDROID too */
-#ifdef ANDROID
-# undef ANDROID
-# define ANDROID true
-#else
-# define ANDROID false
-# define ANDROID_API_LEVEL 0
-#endif
-
-#define ${driver.upper()}_HAS_SURFACE (VK_USE_PLATFORM_WIN32_KHR || \\
- VK_USE_PLATFORM_WAYLAND_KHR || \\
- VK_USE_PLATFORM_XCB_KHR || \\
- VK_USE_PLATFORM_XLIB_KHR || \\
- VK_USE_PLATFORM_DISPLAY_KHR)
-
-static const uint32_t MAX_API_VERSION = ${MAX_API_VERSION.c_vk_version()};
-
-VKAPI_ATTR VkResult VKAPI_CALL ${driver}_EnumerateInstanceVersion(
- uint32_t* pApiVersion)
-{
- *pApiVersion = MAX_API_VERSION;
- return VK_SUCCESS;
-}
-
-const struct vk_instance_extension_table ${driver}_instance_extensions_supported = {
-%for ext in instance_extensions:
- .${ext.name[3:]} = ${ext.enable},
-%endfor
-};
-
-uint32_t
-${driver}_physical_device_api_version(struct ${driver}_physical_device *device)
-{
- uint32_t version = 0;
-
- uint32_t override = vk_get_version_override();
- if (override)
- return MIN2(override, MAX_API_VERSION);
-
-%for version in API_VERSIONS:
- if (!(${version.enable}))
- return version;
- version = ${version.version.c_vk_version()};
-
-%endfor
- return version;
-}
-
-void
-${driver}_physical_device_get_supported_extensions(const struct ${driver}_physical_device *device,
- struct vk_device_extension_table *extensions)
-{
- *extensions = (struct vk_device_extension_table) {
-%for ext in device_extensions:
- .${ext.name[3:]} = ${ext.enable},
-%endfor
- };
-}
-%endif
""")
-def gen_extensions(driver, xml_files, api_versions, max_api_version,
- extensions, out_c, out_h, includes = []):
+def gen_extensions(xml_files, extensions, out_c, out_h):
platform_defines = []
for filename in xml_files:
init_exts_from_xml(filename, extensions, platform_defines)
for ext in extensions:
- assert ext.type == 'instance' or ext.type == 'device'
+ assert ext.type in {'instance', 'device'}
template_env = {
- 'driver': driver,
- 'API_VERSIONS': api_versions,
- 'MAX_API_VERSION': max_api_version,
'instance_extensions': [e for e in extensions if e.type == 'instance'],
'device_extensions': [e for e in extensions if e.type == 'device'],
'platform_defines': platform_defines,
- 'includes': includes,
}
if out_h:
- with open(out_h, 'w') as f:
+ with open(out_h, 'w', encoding='utf-8') as f:
f.write(_TEMPLATE_H.render(**template_env))
if out_c:
- with open(out_c, 'w') as f:
+ with open(out_c, 'w', encoding='utf-8') as f:
f.write(_TEMPLATE_C.render(**template_env))
-if __name__ == '__main__':
+def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-c', help='Output C file.')
parser.add_argument('--out-h', help='Output H file.')
@@ -250,5 +146,7 @@ if __name__ == '__main__':
for filename in args.xml_files:
extensions += get_all_exts_from_xml(filename)
- gen_extensions('vk', args.xml_files, None, None,
- extensions, args.out_c, args.out_h, [])
+ gen_extensions(args.xml_files, extensions, args.out_c, args.out_h)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/vulkan/util/vk_format.c b/src/vulkan/util/vk_format.c
index f6e6bcec7cf..db05d32ce5c 100644
--- a/src/vulkan/util/vk_format.c
+++ b/src/vulkan/util/vk_format.c
@@ -24,6 +24,10 @@
#include "vk_format.h"
+#include "vk_enum_defines.h"
+#include "vk_enum_to_str.h"
+#include "vk_util.h"
+
/* Note that for packed formats, VK_FORMAT_ lists channels from high to low
* bits occupied by the channel, while MESA_FORMAT_* and PIPE_FORMAT_* are
* low-to-high.
@@ -31,7 +35,7 @@
* Also, missing entries are zero-filled, which happens to be
* PIPE_FORMAT_NONE.
*/
-static const enum pipe_format vk_format_map[] = {
+const enum pipe_format vk_format_map[] = {
/* Missing R4G4 */
[VK_FORMAT_R4G4B4A4_UNORM_PACK16] = PIPE_FORMAT_A4B4G4R4_UNORM,
[VK_FORMAT_B4G4R4A4_UNORM_PACK16] = PIPE_FORMAT_A4R4G4B4_UNORM,
@@ -161,10 +165,18 @@ static const enum pipe_format vk_format_map[] = {
[VK_FORMAT_R64_UINT] = PIPE_FORMAT_R64_UINT,
[VK_FORMAT_R64_SINT] = PIPE_FORMAT_R64_SINT,
- /* Missing rest of 64-bit uint/sint formats */
[VK_FORMAT_R64_SFLOAT] = PIPE_FORMAT_R64_FLOAT,
+
+ [VK_FORMAT_R64G64_UINT] = PIPE_FORMAT_R64G64_UINT,
+ [VK_FORMAT_R64G64_SINT] = PIPE_FORMAT_R64G64_SINT,
[VK_FORMAT_R64G64_SFLOAT] = PIPE_FORMAT_R64G64_FLOAT,
+
+ [VK_FORMAT_R64G64B64_UINT] = PIPE_FORMAT_R64G64B64_UINT,
+ [VK_FORMAT_R64G64B64_SINT] = PIPE_FORMAT_R64G64B64_SINT,
[VK_FORMAT_R64G64B64_SFLOAT] = PIPE_FORMAT_R64G64B64_FLOAT,
+
+ [VK_FORMAT_R64G64B64A64_UINT] = PIPE_FORMAT_R64G64B64A64_UINT,
+ [VK_FORMAT_R64G64B64A64_SINT] = PIPE_FORMAT_R64G64B64A64_SINT,
[VK_FORMAT_R64G64B64A64_SFLOAT] = PIPE_FORMAT_R64G64B64A64_FLOAT,
[VK_FORMAT_B10G11R11_UFLOAT_PACK32] = PIPE_FORMAT_R11G11B10_FLOAT,
@@ -249,10 +261,14 @@ vk_format_to_pipe_format(enum VkFormat vkformat)
{
if (vkformat >= ARRAY_SIZE(vk_format_map)) {
switch (vkformat) {
+ case VK_FORMAT_R10X6_UNORM_PACK16:
+ return PIPE_FORMAT_R16_UNORM;
+ case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
+ return PIPE_FORMAT_R16G16_UNORM;
case VK_FORMAT_G8B8G8R8_422_UNORM:
- return PIPE_FORMAT_YUYV;
+ return PIPE_FORMAT_G8B8_G8R8_UNORM;
case VK_FORMAT_B8G8R8G8_422_UNORM:
- return PIPE_FORMAT_UYVY;
+ return PIPE_FORMAT_B8G8_R8G8_UNORM;
case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
return PIPE_FORMAT_IYUV;
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
@@ -273,10 +289,16 @@ vk_format_to_pipe_format(enum VkFormat vkformat)
return PIPE_FORMAT_Y16_U16V16_422_UNORM;
case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
return PIPE_FORMAT_Y16_U16_V16_444_UNORM;
- case VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT:
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+ return PIPE_FORMAT_P010;
+ case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
return PIPE_FORMAT_B4G4R4A4_UNORM;
- case VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT:
+ case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
return PIPE_FORMAT_R4G4B4A4_UNORM;
+ case VK_FORMAT_A8_UNORM_KHR:
+ return PIPE_FORMAT_A8_UNORM;
+ case VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR:
+ return PIPE_FORMAT_R5G5B5A1_UNORM;
default:
return PIPE_FORMAT_NONE;
}
@@ -288,6 +310,200 @@ vk_format_to_pipe_format(enum VkFormat vkformat)
return vk_format_map[vkformat];
}
+static const VkFormat formats[PIPE_FORMAT_COUNT] = {
+#define MAP_FORMAT_NORM(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _UNORM] = VK_FORMAT_ ## FMT ## _UNORM, \
+ [PIPE_FORMAT_ ## FMT ## _SNORM] = VK_FORMAT_ ## FMT ## _SNORM,
+
+#define MAP_FORMAT_SCALED(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _USCALED] = VK_FORMAT_ ## FMT ## _USCALED, \
+ [PIPE_FORMAT_ ## FMT ## _SSCALED] = VK_FORMAT_ ## FMT ## _SSCALED,
+
+#define MAP_FORMAT_INT(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _UINT] = VK_FORMAT_ ## FMT ## _UINT, \
+ [PIPE_FORMAT_ ## FMT ## _SINT] = VK_FORMAT_ ## FMT ## _SINT,
+
+#define MAP_FORMAT_SRGB(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _SRGB] = VK_FORMAT_ ## FMT ## _SRGB,
+
+#define MAP_FORMAT_FLOAT(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _FLOAT] = VK_FORMAT_ ## FMT ## _SFLOAT,
+
+ // one component
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8)
+ MAP_FORMAT_SCALED(R8)
+ MAP_FORMAT_INT(R8)
+ MAP_FORMAT_SRGB(R8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16)
+ MAP_FORMAT_SCALED(R16)
+ MAP_FORMAT_INT(R16)
+ MAP_FORMAT_FLOAT(R16)
+ // 32-bits
+ MAP_FORMAT_INT(R32)
+ MAP_FORMAT_FLOAT(R32)
+
+ // two components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8)
+ MAP_FORMAT_SCALED(R8G8)
+ MAP_FORMAT_INT(R8G8)
+ MAP_FORMAT_SRGB(R8G8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16)
+ MAP_FORMAT_SCALED(R16G16)
+ MAP_FORMAT_INT(R16G16)
+ MAP_FORMAT_FLOAT(R16G16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32)
+ MAP_FORMAT_FLOAT(R32G32)
+
+ // three components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8B8)
+ MAP_FORMAT_SCALED(R8G8B8)
+ MAP_FORMAT_INT(R8G8B8)
+ MAP_FORMAT_SRGB(R8G8B8)
+ MAP_FORMAT_NORM(B8G8R8)
+ MAP_FORMAT_SCALED(B8G8R8)
+ MAP_FORMAT_INT(B8G8R8)
+ MAP_FORMAT_SRGB(B8G8R8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16B16)
+ MAP_FORMAT_SCALED(R16G16B16)
+ MAP_FORMAT_INT(R16G16B16)
+ MAP_FORMAT_FLOAT(R16G16B16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32B32)
+ MAP_FORMAT_FLOAT(R32G32B32)
+
+ // four components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8B8A8)
+ MAP_FORMAT_SCALED(R8G8B8A8)
+ MAP_FORMAT_INT(R8G8B8A8)
+ MAP_FORMAT_NORM(B8G8R8A8)
+ MAP_FORMAT_SCALED(B8G8R8A8)
+ MAP_FORMAT_INT(B8G8R8A8)
+ MAP_FORMAT_SRGB(B8G8R8A8)
+ [PIPE_FORMAT_RGBA8888_SRGB] = VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16B16A16)
+ MAP_FORMAT_SCALED(R16G16B16A16)
+ MAP_FORMAT_INT(R16G16B16A16)
+ MAP_FORMAT_FLOAT(R16G16B16A16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32B32A32)
+ MAP_FORMAT_FLOAT(R32G32B32A32)
+
+ // other color formats
+ [PIPE_FORMAT_A4B4G4R4_UNORM] = VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ [PIPE_FORMAT_A4R4G4B4_UNORM] = VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ [PIPE_FORMAT_B4G4R4A4_UNORM] = VK_FORMAT_A4R4G4B4_UNORM_PACK16,
+ [PIPE_FORMAT_R4G4B4A4_UNORM] = VK_FORMAT_A4B4G4R4_UNORM_PACK16,
+ [PIPE_FORMAT_B5G6R5_UNORM] = VK_FORMAT_R5G6B5_UNORM_PACK16,
+ [PIPE_FORMAT_R5G6B5_UNORM] = VK_FORMAT_B5G6R5_UNORM_PACK16,
+
+ [PIPE_FORMAT_A1B5G5R5_UNORM] = VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+ [PIPE_FORMAT_A1R5G5B5_UNORM] = VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ [PIPE_FORMAT_B5G5R5A1_UNORM] = VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+
+ [PIPE_FORMAT_R11G11B10_FLOAT] = VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ [PIPE_FORMAT_R9G9B9E5_FLOAT] = VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ /* ARB_vertex_type_2_10_10_10 */
+ [PIPE_FORMAT_R10G10B10A2_UNORM] = VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_SNORM] = VK_FORMAT_A2B10G10R10_SNORM_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_UNORM] = VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_SNORM] = VK_FORMAT_A2R10G10B10_SNORM_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_USCALED] = VK_FORMAT_A2B10G10R10_USCALED_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_SSCALED] = VK_FORMAT_A2B10G10R10_SSCALED_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_USCALED] = VK_FORMAT_A2R10G10B10_USCALED_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_SSCALED] = VK_FORMAT_A2R10G10B10_SSCALED_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_UINT] = VK_FORMAT_A2B10G10R10_UINT_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_UINT] = VK_FORMAT_A2R10G10B10_UINT_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_SINT] = VK_FORMAT_A2R10G10B10_SINT_PACK32,
+
+ // depth/stencil formats
+ [PIPE_FORMAT_Z32_FLOAT] = VK_FORMAT_D32_SFLOAT,
+ [PIPE_FORMAT_Z32_FLOAT_S8X24_UINT] = VK_FORMAT_D32_SFLOAT_S8_UINT,
+ [PIPE_FORMAT_Z16_UNORM] = VK_FORMAT_D16_UNORM,
+ [PIPE_FORMAT_Z16_UNORM_S8_UINT] = VK_FORMAT_D16_UNORM_S8_UINT,
+ [PIPE_FORMAT_Z24X8_UNORM] = VK_FORMAT_X8_D24_UNORM_PACK32,
+ [PIPE_FORMAT_Z24_UNORM_S8_UINT] = VK_FORMAT_D24_UNORM_S8_UINT,
+ [PIPE_FORMAT_S8_UINT] = VK_FORMAT_S8_UINT,
+
+ // compressed formats
+ [PIPE_FORMAT_DXT1_RGB] = VK_FORMAT_BC1_RGB_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT1_RGBA] = VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT3_RGBA] = VK_FORMAT_BC2_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT5_RGBA] = VK_FORMAT_BC3_UNORM_BLOCK,
+ [PIPE_FORMAT_DXT1_SRGB] = VK_FORMAT_BC1_RGB_SRGB_BLOCK,
+ [PIPE_FORMAT_DXT1_SRGBA] = VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ [PIPE_FORMAT_DXT3_SRGBA] = VK_FORMAT_BC2_SRGB_BLOCK,
+ [PIPE_FORMAT_DXT5_SRGBA] = VK_FORMAT_BC3_SRGB_BLOCK,
+
+ [PIPE_FORMAT_RGTC1_UNORM] = VK_FORMAT_BC4_UNORM_BLOCK,
+ [PIPE_FORMAT_RGTC1_SNORM] = VK_FORMAT_BC4_SNORM_BLOCK,
+ [PIPE_FORMAT_RGTC2_UNORM] = VK_FORMAT_BC5_UNORM_BLOCK,
+ [PIPE_FORMAT_RGTC2_SNORM] = VK_FORMAT_BC5_SNORM_BLOCK,
+ [PIPE_FORMAT_BPTC_RGBA_UNORM] = VK_FORMAT_BC7_UNORM_BLOCK,
+ [PIPE_FORMAT_BPTC_SRGBA] = VK_FORMAT_BC7_SRGB_BLOCK,
+ [PIPE_FORMAT_BPTC_RGB_FLOAT] = VK_FORMAT_BC6H_SFLOAT_BLOCK,
+ [PIPE_FORMAT_BPTC_RGB_UFLOAT] = VK_FORMAT_BC6H_UFLOAT_BLOCK,
+
+ [PIPE_FORMAT_ETC1_RGB8] = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_RGB8] = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_SRGB8] = VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,
+ [PIPE_FORMAT_ETC2_RGB8A1] = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_SRGB8A1] = VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK,
+ [PIPE_FORMAT_ETC2_RGBA8] = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_SRGBA8] = VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
+ [PIPE_FORMAT_ETC2_R11_UNORM] = VK_FORMAT_EAC_R11_UNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_R11_SNORM] = VK_FORMAT_EAC_R11_SNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_RG11_UNORM] = VK_FORMAT_EAC_R11G11_UNORM_BLOCK,
+ [PIPE_FORMAT_ETC2_RG11_SNORM] = VK_FORMAT_EAC_R11G11_SNORM_BLOCK,
+
+ [PIPE_FORMAT_ASTC_4x4] = VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_4x4_SRGB] = VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_5x4] = VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_5x4_SRGB] = VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_5x5] = VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_5x5_SRGB] = VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_6x5] = VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_6x5_SRGB] = VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_6x6] = VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_6x6_SRGB] = VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_8x5] = VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_8x5_SRGB] = VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_8x6] = VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_8x6_SRGB] = VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_8x8] = VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_8x8_SRGB] = VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_10x5] = VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_10x5_SRGB] = VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_10x6] = VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_10x6_SRGB] = VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_10x8] = VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_10x8_SRGB] = VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_10x10] = VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_10x10_SRGB] = VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_12x10] = VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_12x10_SRGB] = VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
+ [PIPE_FORMAT_ASTC_12x12] = VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ [PIPE_FORMAT_ASTC_12x12_SRGB] = VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+};
+
+VkFormat
+vk_format_from_pipe_format(enum pipe_format format)
+{
+ return formats[format];
+}
+
VkImageAspectFlags
vk_format_aspects(VkFormat format)
{
@@ -332,6 +548,10 @@ vk_format_aspects(VkFormat format)
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+ case VK_FORMAT_G8_B8R8_2PLANE_444_UNORM:
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16:
+ case VK_FORMAT_G16_B16R16_2PLANE_444_UNORM:
return (VK_IMAGE_ASPECT_PLANE_0_BIT |
VK_IMAGE_ASPECT_PLANE_1_BIT);
@@ -339,3 +559,291 @@ vk_format_aspects(VkFormat format)
return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
+
+VkFormat
+vk_format_get_plane_format(VkFormat format, unsigned plane_id)
+{
+ assert(plane_id < vk_format_get_plane_count(format));
+ const struct vk_format_ycbcr_info *ycbcr_info =
+ vk_format_get_ycbcr_info(format);
+ if (ycbcr_info && ycbcr_info->n_planes > 1) {
+ const struct vk_format_ycbcr_plane *plane_info = &ycbcr_info->planes[plane_id];
+ return plane_info->format;
+ } else {
+ assert(vk_format_get_plane_count(format) == 1);
+ return format;
+ }
+}
+
+VkFormat
+vk_format_get_aspect_format(VkFormat format, const VkImageAspectFlags aspect)
+{
+ assert(util_bitcount(aspect) == 1);
+ assert(aspect & vk_format_aspects(format));
+
+ switch (aspect) {
+ case VK_IMAGE_ASPECT_COLOR_BIT:
+ return format;
+ case VK_IMAGE_ASPECT_DEPTH_BIT:
+ return vk_format_depth_only(format);
+ case VK_IMAGE_ASPECT_STENCIL_BIT:
+ return vk_format_stencil_only(format);
+ case VK_IMAGE_ASPECT_PLANE_0_BIT:
+ return vk_format_get_plane_format(format, 0);
+ case VK_IMAGE_ASPECT_PLANE_1_BIT:
+ return vk_format_get_plane_format(format, 1);
+ case VK_IMAGE_ASPECT_PLANE_2_BIT:
+ return vk_format_get_plane_format(format, 2);
+ default:
+ unreachable("Cannot translate format aspect");
+ }
+}
+
+void
+vk_component_mapping_to_pipe_swizzle(VkComponentMapping mapping,
+ unsigned char out_swizzle[4])
+{
+ VkComponentSwizzle swizzle[4] = { mapping.r, mapping.g, mapping.b, mapping.a };
+ for (unsigned i = 0; i < 4; i++) {
+ switch (swizzle[i]) {
+ case VK_COMPONENT_SWIZZLE_R:
+ out_swizzle[i] = PIPE_SWIZZLE_X;
+ break;
+ case VK_COMPONENT_SWIZZLE_G:
+ out_swizzle[i] = PIPE_SWIZZLE_Y;
+ break;
+ case VK_COMPONENT_SWIZZLE_B:
+ out_swizzle[i] = PIPE_SWIZZLE_Z;
+ break;
+ case VK_COMPONENT_SWIZZLE_A:
+ out_swizzle[i] = PIPE_SWIZZLE_W;
+ break;
+ case VK_COMPONENT_SWIZZLE_IDENTITY:
+ out_swizzle[i] = PIPE_SWIZZLE_X + i;
+ break;
+ case VK_COMPONENT_SWIZZLE_ZERO:
+ out_swizzle[i] = PIPE_SWIZZLE_0;
+ break;
+ case VK_COMPONENT_SWIZZLE_ONE:
+ out_swizzle[i] = PIPE_SWIZZLE_1;
+ break;
+ default:
+ unreachable("unknown swizzle");
+ }
+ }
+}
+
+#define fmt_unsupported(__vk_fmt) \
+ [VK_ENUM_OFFSET(__vk_fmt)] = { \
+ .n_planes = 0, \
+ }
+
+#define y_plane(__plane_fmt, __ycbcr_swizzle, dhs, dvs) \
+ { .format = __plane_fmt, \
+ .has_chroma = false, \
+ .denominator_scales = { dhs, dvs, }, \
+ .ycbcr_swizzle = __ycbcr_swizzle, \
+ }
+
+#define c_plane(__plane_fmt, __ycbcr_swizzle, dhs, dvs) \
+ { .format = __plane_fmt, \
+ .has_chroma = true, \
+ .denominator_scales = { dhs, dvs, }, \
+ .ycbcr_swizzle = __ycbcr_swizzle, \
+ }
+
+#define ycbcr_fmt(__vk_fmt, __n_planes, ...) \
+ [VK_ENUM_OFFSET(__vk_fmt)] = { \
+ .n_planes = __n_planes, \
+ .planes = { \
+ __VA_ARGS__, \
+ }, \
+ }
+
+#define YCBCR_SWIZ(x, y, z, w) { \
+ VK_COMPONENT_SWIZZLE_##x, \
+ VK_COMPONENT_SWIZZLE_##y, \
+ VK_COMPONENT_SWIZZLE_##z, \
+ VK_COMPONENT_SWIZZLE_##w, \
+ }
+
+static const struct vk_format_ycbcr_info ycbcr_infos[] = {
+ ycbcr_fmt(VK_FORMAT_G8B8G8R8_422_UNORM, 1,
+ y_plane(VK_FORMAT_G8B8G8R8_422_UNORM, YCBCR_SWIZ(R, G, B, ZERO), 1, 1)),
+ ycbcr_fmt(VK_FORMAT_B8G8R8G8_422_UNORM, 1,
+ y_plane(VK_FORMAT_B8G8R8G8_422_UNORM, YCBCR_SWIZ(R, G, B, ZERO), 1, 1)),
+ ycbcr_fmt(VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, 3,
+ y_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 2),
+ c_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, 2,
+ y_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8G8_UNORM, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, 3,
+ y_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 1),
+ c_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, 2,
+ y_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8G8_UNORM, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, 3,
+ y_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 1, 1)),
+
+ fmt_unsupported(VK_FORMAT_R10X6_UNORM_PACK16),
+ fmt_unsupported(VK_FORMAT_R10X6G10X6_UNORM_2PACK16),
+ fmt_unsupported(VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16),
+
+ ycbcr_fmt(VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, 1,
+ y_plane(VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, YCBCR_SWIZ(B, G, R, ZERO), 1, 1)),
+ ycbcr_fmt(VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, 1,
+ y_plane(VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, YCBCR_SWIZ(B, G, R, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, 3,
+ y_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 2),
+ c_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, 2,
+ y_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6G10X6_UNORM_2PACK16, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, 3,
+ y_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 1),
+ c_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, 2,
+ y_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6G10X6_UNORM_2PACK16, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, 3,
+ y_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 1, 1)),
+
+ fmt_unsupported(VK_FORMAT_R12X4_UNORM_PACK16),
+ fmt_unsupported(VK_FORMAT_R12X4G12X4_UNORM_2PACK16),
+ fmt_unsupported(VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16),
+
+ ycbcr_fmt(VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, 1,
+ y_plane(VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, YCBCR_SWIZ(B, G, R, ZERO), 1, 1)),
+ ycbcr_fmt(VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, 1,
+ y_plane(VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, YCBCR_SWIZ(B, G, R, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, 3,
+ y_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 2),
+ c_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, 2,
+ y_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4G12X4_UNORM_2PACK16, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, 3,
+ y_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 1),
+ c_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, 2,
+ y_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4G12X4_UNORM_2PACK16, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, 3,
+ y_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G16B16G16R16_422_UNORM, 1,
+ y_plane(VK_FORMAT_G16B16G16R16_422_UNORM, YCBCR_SWIZ(B, G, R, ZERO), 1, 1)),
+ ycbcr_fmt(VK_FORMAT_B16G16R16G16_422_UNORM, 1,
+ y_plane(VK_FORMAT_B16G16R16G16_422_UNORM, YCBCR_SWIZ(B, G, R, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, 3,
+ y_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 2),
+ c_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, 2,
+ y_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16G16_UNORM, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 2)),
+ ycbcr_fmt(VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, 3,
+ y_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 2, 1),
+ c_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, 2,
+ y_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16G16_UNORM, YCBCR_SWIZ(B, R, ZERO, ZERO), 2, 1)),
+ ycbcr_fmt(VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, 3,
+ y_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(B, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(R, ZERO, ZERO, ZERO), 1, 1)),
+};
+
+static const struct vk_format_ycbcr_info ycbcr_2plane_444_infos[] = {
+ ycbcr_fmt(VK_FORMAT_G8_B8R8_2PLANE_444_UNORM, 2,
+ y_plane(VK_FORMAT_R8_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R8G8_UNORM, YCBCR_SWIZ(B, R, ZERO, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16, 2,
+ y_plane(VK_FORMAT_R10X6_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R10X6G10X6_UNORM_2PACK16, YCBCR_SWIZ(B, R, ZERO, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16, 2,
+ y_plane(VK_FORMAT_R12X4_UNORM_PACK16, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R12X4G12X4_UNORM_2PACK16, YCBCR_SWIZ(B, R, ZERO, ZERO), 1, 1)),
+
+ ycbcr_fmt(VK_FORMAT_G16_B16R16_2PLANE_444_UNORM, 2,
+ y_plane(VK_FORMAT_R16_UNORM, YCBCR_SWIZ(G, ZERO, ZERO, ZERO), 1, 1),
+ c_plane(VK_FORMAT_R16G16_UNORM, YCBCR_SWIZ(B, R, ZERO, ZERO), 1, 1)),
+};
+
+const struct vk_format_ycbcr_info *
+vk_format_get_ycbcr_info(VkFormat format)
+{
+ uint32_t enum_offset = VK_ENUM_OFFSET(format);
+ uint32_t ext_number = VK_ENUM_EXTENSION(format);
+ const struct vk_format_ycbcr_info *info;
+ switch (ext_number) {
+ case _VK_KHR_sampler_ycbcr_conversion_number:
+ if (enum_offset >= ARRAY_SIZE(ycbcr_infos))
+ return NULL;
+ info = &ycbcr_infos[enum_offset];
+ break;
+
+ case _VK_EXT_ycbcr_2plane_444_formats_number:
+ if (enum_offset >= ARRAY_SIZE(ycbcr_2plane_444_infos))
+ return NULL;
+ info = &ycbcr_2plane_444_infos[enum_offset];
+ break;
+
+ default:
+ return NULL;
+ }
+
+ if (info->n_planes == 0)
+ return NULL;
+
+ return info;
+}
+
+static uint32_t
+swizzled_color_component(const VkClearColorValue *color,
+ VkComponentSwizzle swizzle,
+ uint32_t comp, bool is_int)
+{
+ switch (swizzle) {
+ case VK_COMPONENT_SWIZZLE_IDENTITY: return color->uint32[comp];
+ case VK_COMPONENT_SWIZZLE_ZERO: return 0;
+ case VK_COMPONENT_SWIZZLE_ONE: return is_int ? 1 : 0x3f800000;
+ case VK_COMPONENT_SWIZZLE_R: return color->uint32[0];
+ case VK_COMPONENT_SWIZZLE_G: return color->uint32[1];
+ case VK_COMPONENT_SWIZZLE_B: return color->uint32[2];
+ case VK_COMPONENT_SWIZZLE_A: return color->uint32[3];
+ default: unreachable("Invalid component swizzle");
+ }
+}
+
+VkClearColorValue
+vk_swizzle_color_value(VkClearColorValue color,
+ VkComponentMapping swizzle, bool is_int)
+{
+ return (VkClearColorValue) { .uint32 = {
+ swizzled_color_component(&color, swizzle.r, 0, is_int),
+ swizzled_color_component(&color, swizzle.g, 1, is_int),
+ swizzled_color_component(&color, swizzle.b, 2, is_int),
+ swizzled_color_component(&color, swizzle.a, 3, is_int),
+ }};
+}
diff --git a/src/vulkan/util/vk_format.h b/src/vulkan/util/vk_format.h
index f03a35e2ad7..e8bb2d56190 100644
--- a/src/vulkan/util/vk_format.h
+++ b/src/vulkan/util/vk_format.h
@@ -27,17 +27,29 @@
#include <vulkan/vulkan_core.h>
#include "util/format/u_format.h"
+#include "util/u_math.h"
#ifdef __cplusplus
extern "C" {
#endif
+extern const enum pipe_format vk_format_map[];
+
enum pipe_format
vk_format_to_pipe_format(enum VkFormat vkformat);
+VkFormat
+vk_format_from_pipe_format(enum pipe_format format);
+
VkImageAspectFlags
vk_format_aspects(VkFormat format);
+static inline const struct util_format_description *
+vk_format_description(VkFormat format)
+{
+ return util_format_description(vk_format_to_pipe_format(format));
+}
+
static inline bool
vk_format_is_color(VkFormat format)
{
@@ -88,6 +100,170 @@ vk_format_stencil_only(VkFormat format)
return VK_FORMAT_S8_UINT;
}
+void vk_component_mapping_to_pipe_swizzle(VkComponentMapping mapping,
+ unsigned char out_swizzle[4]);
+
+static inline bool
+vk_format_is_int(VkFormat format)
+{
+ return util_format_is_pure_integer(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_sint(VkFormat format)
+{
+ return util_format_is_pure_sint(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_uint(VkFormat format)
+{
+ return util_format_is_pure_uint(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_unorm(VkFormat format)
+{
+ return util_format_is_unorm(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_snorm(VkFormat format)
+{
+ return util_format_is_snorm(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_float(VkFormat format)
+{
+ return util_format_is_float(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_srgb(VkFormat format)
+{
+ return util_format_is_srgb(vk_format_to_pipe_format(format));
+}
+
+static inline bool vk_format_is_alpha(VkFormat format)
+{
+ return util_format_is_alpha(vk_format_to_pipe_format(format));
+}
+
+static inline bool vk_format_is_alpha_on_msb(VkFormat vk_format)
+{
+ const struct util_format_description *desc =
+ vk_format_description(vk_format);
+
+ return (desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB ||
+ desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) &&
+#if UTIL_ARCH_BIG_ENDIAN
+ desc->swizzle[3] == PIPE_SWIZZLE_X;
+#else
+ desc->swizzle[3] == PIPE_SWIZZLE_W;
+#endif
+}
+
+static inline unsigned
+vk_format_get_blocksize(VkFormat format)
+{
+ return util_format_get_blocksize(vk_format_to_pipe_format(format));
+}
+
+static inline unsigned
+vk_format_get_blockwidth(VkFormat format)
+{
+ return util_format_get_blockwidth(vk_format_to_pipe_format(format));
+}
+
+static inline unsigned
+vk_format_get_blockheight(VkFormat format)
+{
+ return util_format_get_blockheight(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_is_compressed(VkFormat format)
+{
+ /* this includes 4:2:2 formats, which are compressed formats for vulkan */
+ return vk_format_get_blockwidth(format) > 1;
+}
+
+static inline bool
+vk_format_is_block_compressed(VkFormat format)
+{
+ return util_format_is_compressed(vk_format_to_pipe_format(format));
+}
+
+static inline unsigned
+vk_format_get_component_bits(VkFormat format, enum util_format_colorspace colorspace,
+ unsigned component)
+{
+ return util_format_get_component_bits(vk_format_to_pipe_format(format),
+ colorspace,
+ component);
+}
+
+static inline unsigned
+vk_format_get_nr_components(VkFormat format)
+{
+ return util_format_get_nr_components(vk_format_to_pipe_format(format));
+}
+
+static inline bool
+vk_format_has_alpha(VkFormat format)
+{
+ return util_format_has_alpha(vk_format_to_pipe_format(format));
+}
+
+static inline unsigned
+vk_format_get_blocksizebits(VkFormat format)
+{
+ return util_format_get_blocksizebits(vk_format_to_pipe_format(format));
+}
+
+VkFormat
+vk_format_get_plane_format(VkFormat format, unsigned plane_id);
+
+VkFormat
+vk_format_get_aspect_format(VkFormat format, const VkImageAspectFlags aspect);
+
+struct vk_format_ycbcr_plane {
+ /* RGBA format for this plane */
+ VkFormat format;
+
+ /* Whether this plane contains chroma channels */
+ bool has_chroma;
+
+ /* For downscaling of YUV planes */
+ uint8_t denominator_scales[2];
+
+ /* How to map sampled ycbcr planes to a single 4 component element.
+ *
+ * We use uint8_t for compactness but it's actually VkComponentSwizzle.
+ */
+ uint8_t ycbcr_swizzle[4];
+};
+
+struct vk_format_ycbcr_info {
+ uint8_t n_planes;
+ struct vk_format_ycbcr_plane planes[3];
+};
+
+const struct vk_format_ycbcr_info *vk_format_get_ycbcr_info(VkFormat format);
+
+static inline unsigned
+vk_format_get_plane_count(VkFormat format)
+{
+ const struct vk_format_ycbcr_info *ycbcr_info =
+ vk_format_get_ycbcr_info(format);
+ return ycbcr_info ? ycbcr_info->n_planes : 1;
+}
+
+VkClearColorValue
+vk_swizzle_color_value(VkClearColorValue color,
+ VkComponentMapping swizzle, bool is_int);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/vulkan/util/vk_icd_gen.py b/src/vulkan/util/vk_icd_gen.py
index d5401e80156..5cdf6728205 100644
--- a/src/vulkan/util/vk_icd_gen.py
+++ b/src/vulkan/util/vk_icd_gen.py
@@ -22,10 +22,10 @@
import argparse
import json
-import os.path
import re
import xml.etree.ElementTree as et
+
def get_xml_patch_version(xml_file):
xml = et.parse(xml_file)
for d in xml.findall('.types/type'):
@@ -34,9 +34,11 @@ def get_xml_patch_version(xml_file):
name = d.find('.name')
if name.text != 'VK_HEADER_VERSION':
- continue;
+ continue
return name.tail.strip()
+ assert False, f"Failed to find VK_HEADER_VERSION in {xml_file}"
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
@@ -48,6 +50,8 @@ if __name__ == '__main__':
help='Path to installed library')
parser.add_argument('--out', required=False,
help='Output json file.')
+ parser.add_argument('--use-backslash', action='store_true',
+ help='Use backslash (Windows).')
args = parser.parse_args()
version = args.api_version
@@ -57,10 +61,14 @@ if __name__ == '__main__':
else:
re.match(r'\d+\.\d+\.\d+', version)
+ lib_path = args.lib_path
+ if args.use_backslash:
+ lib_path = lib_path.replace('/', '\\')
+
json_data = {
'file_format_version': '1.0.0',
'ICD': {
- 'library_path': args.lib_path,
+ 'library_path': lib_path,
'api_version': version,
},
}
@@ -72,7 +80,7 @@ if __name__ == '__main__':
}
if args.out:
- with open(args.out, 'w') as f:
+ with open(args.out, 'w', encoding='utf-8') as f:
json.dump(json_data, f, **json_params)
else:
print(json.dumps(json_data, **json_params))
diff --git a/src/vulkan/util/vk_image.c b/src/vulkan/util/vk_image.c
deleted file mode 100644
index 85e15803b8d..00000000000
--- a/src/vulkan/util/vk_image.c
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Copyright © 2021 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "vk_image.h"
-
-#include <vulkan/vulkan_android.h>
-
-#ifndef _WIN32
-#include <drm-uapi/drm_fourcc.h>
-#endif
-
-#include "vk_alloc.h"
-#include "vk_common_entrypoints.h"
-#include "vk_device.h"
-#include "vk_format.h"
-#include "vk_util.h"
-#include "vulkan/wsi/wsi_common.h"
-
-static VkExtent3D
-sanitize_image_extent(const VkImageType imageType,
- const VkExtent3D imageExtent)
-{
- switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkExtent3D) { imageExtent.width, 1, 1 };
- case VK_IMAGE_TYPE_2D:
- return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
- case VK_IMAGE_TYPE_3D:
- return imageExtent;
- default:
- unreachable("invalid image type");
- }
-}
-
-void
-vk_image_init(struct vk_device *device,
- struct vk_image *image,
- const VkImageCreateInfo *pCreateInfo)
-{
- vk_object_base_init(device, &image->base, VK_OBJECT_TYPE_IMAGE);
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
- assert(pCreateInfo->mipLevels > 0);
- assert(pCreateInfo->arrayLayers > 0);
- assert(pCreateInfo->samples > 0);
- assert(pCreateInfo->extent.width > 0);
- assert(pCreateInfo->extent.height > 0);
- assert(pCreateInfo->extent.depth > 0);
-
- if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
- assert(pCreateInfo->imageType == VK_IMAGE_TYPE_2D);
- if (pCreateInfo->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)
- assert(pCreateInfo->imageType == VK_IMAGE_TYPE_3D);
-
- image->create_flags = pCreateInfo->flags;
- image->image_type = pCreateInfo->imageType;
- vk_image_set_format(image, pCreateInfo->format);
- image->extent = sanitize_image_extent(pCreateInfo->imageType,
- pCreateInfo->extent);
- image->mip_levels = pCreateInfo->mipLevels;
- image->array_layers = pCreateInfo->arrayLayers;
- image->samples = pCreateInfo->samples;
- image->tiling = pCreateInfo->tiling;
- image->usage = pCreateInfo->usage;
-
- if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
- const VkImageStencilUsageCreateInfoEXT *stencil_usage_info =
- vk_find_struct_const(pCreateInfo->pNext,
- IMAGE_STENCIL_USAGE_CREATE_INFO_EXT);
- image->stencil_usage =
- stencil_usage_info ? stencil_usage_info->stencilUsage :
- pCreateInfo->usage;
- } else {
- image->stencil_usage = 0;
- }
-
- const VkExternalMemoryImageCreateInfo *ext_mem_info =
- vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
- if (ext_mem_info)
- image->external_handle_types = ext_mem_info->handleTypes;
- else
- image->external_handle_types = 0;
-
- const struct wsi_image_create_info *wsi_info =
- vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
- image->wsi_legacy_scanout = wsi_info && wsi_info->scanout;
-
-#ifndef _WIN32
- image->drm_format_mod = ((1ULL << 56) - 1) /* DRM_FORMAT_MOD_INVALID */;
-#endif
-
-#ifdef ANDROID
- const VkExternalFormatANDROID *ext_format =
- vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_FORMAT_ANDROID);
- if (ext_format && ext_format->externalFormat != 0) {
- assert(image->format == VK_FORMAT_UNDEFINED);
- assert(image->external_handle_types &
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID);
- image->android_external_format = ext_format->externalFormat;
- } else {
- image->android_external_format = 0;
- }
-#endif
-}
-
-void *
-vk_image_create(struct vk_device *device,
- const VkImageCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc,
- size_t size)
-{
- struct vk_image *image =
- vk_zalloc2(&device->alloc, alloc, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (image == NULL)
- return NULL;
-
- vk_image_init(device, image, pCreateInfo);
-
- return image;
-}
-
-void
-vk_image_finish(struct vk_image *image)
-{
- vk_object_base_finish(&image->base);
-}
-
-void
-vk_image_destroy(struct vk_device *device,
- const VkAllocationCallbacks *alloc,
- struct vk_image *image)
-{
- vk_object_free(device, alloc, image);
-}
-
-#ifndef _WIN32
-VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_GetImageDrmFormatModifierPropertiesEXT(UNUSED VkDevice device,
- VkImage _image,
- VkImageDrmFormatModifierPropertiesEXT *pProperties)
-{
- VK_FROM_HANDLE(vk_image, image, _image);
-
- assert(pProperties->sType ==
- VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT);
-
- assert(image->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
- pProperties->drmFormatModifier = image->drm_format_mod;
-
- return VK_SUCCESS;
-}
-#endif
-
-void
-vk_image_set_format(struct vk_image *image, VkFormat format)
-{
- image->format = format;
- image->aspects = vk_format_aspects(format);
-}
-
-VkImageUsageFlags
-vk_image_usage(const struct vk_image *image,
- VkImageAspectFlags aspect_mask)
-{
- assert(!(aspect_mask & ~image->aspects));
-
- /* From the Vulkan 1.2.131 spec:
- *
- * "If the image was has a depth-stencil format and was created with
- * a VkImageStencilUsageCreateInfo structure included in the pNext
- * chain of VkImageCreateInfo, the usage is calculated based on the
- * subresource.aspectMask provided:
- *
- * - If aspectMask includes only VK_IMAGE_ASPECT_STENCIL_BIT, the
- * implicit usage is equal to
- * VkImageStencilUsageCreateInfo::stencilUsage.
- *
- * - If aspectMask includes only VK_IMAGE_ASPECT_DEPTH_BIT, the
- * implicit usage is equal to VkImageCreateInfo::usage.
- *
- * - If both aspects are included in aspectMask, the implicit usage
- * is equal to the intersection of VkImageCreateInfo::usage and
- * VkImageStencilUsageCreateInfo::stencilUsage.
- */
- if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
- return image->stencil_usage;
- } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT)) {
- return image->usage & image->stencil_usage;
- } else {
- /* This also handles the color case */
- return image->usage;
- }
-}
-
-#define VK_IMAGE_ASPECT_ANY_COLOR_MASK_MESA ( \
- VK_IMAGE_ASPECT_COLOR_BIT | \
- VK_IMAGE_ASPECT_PLANE_0_BIT | \
- VK_IMAGE_ASPECT_PLANE_1_BIT | \
- VK_IMAGE_ASPECT_PLANE_2_BIT)
-
-/** Expands the given aspect mask relative to the image
- *
- * If the image has color plane aspects VK_IMAGE_ASPECT_COLOR_BIT has been
- * requested, this returns the aspects of the underlying image.
- *
- * For example,
- *
- * VK_IMAGE_ASPECT_COLOR_BIT
- *
- * will be converted to
- *
- * VK_IMAGE_ASPECT_PLANE_0_BIT |
- * VK_IMAGE_ASPECT_PLANE_1_BIT |
- * VK_IMAGE_ASPECT_PLANE_2_BIT
- *
- * for an image of format VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM.
- */
-VkImageAspectFlags
-vk_image_expand_aspect_mask(const struct vk_image *image,
- VkImageAspectFlags aspect_mask)
-{
- if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
- assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_MASK_MESA);
- return image->aspects;
- } else {
- assert(aspect_mask && !(aspect_mask & ~image->aspects));
- return aspect_mask;
- }
-}
-
-static VkComponentSwizzle
-remap_swizzle(VkComponentSwizzle swizzle, VkComponentSwizzle component)
-{
- return swizzle == VK_COMPONENT_SWIZZLE_IDENTITY ? component : swizzle;
-}
-
-void
-vk_image_view_init(struct vk_device *device,
- struct vk_image_view *image_view,
- const VkImageViewCreateInfo *pCreateInfo)
-{
- vk_object_base_init(device, &image_view->base, VK_OBJECT_TYPE_IMAGE_VIEW);
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO);
- VK_FROM_HANDLE(vk_image, image, pCreateInfo->image);
-
- image_view->create_flags = pCreateInfo->flags;
- image_view->image = image;
- image_view->view_type = pCreateInfo->viewType;
-
- switch (image_view->view_type) {
- case VK_IMAGE_VIEW_TYPE_1D:
- case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
- assert(image->image_type == VK_IMAGE_TYPE_1D);
- break;
- case VK_IMAGE_VIEW_TYPE_2D:
- case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
- if (image->create_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)
- assert(image->image_type == VK_IMAGE_TYPE_3D);
- else
- assert(image->image_type == VK_IMAGE_TYPE_2D);
- break;
- case VK_IMAGE_VIEW_TYPE_3D:
- assert(image->image_type == VK_IMAGE_TYPE_3D);
- break;
- case VK_IMAGE_VIEW_TYPE_CUBE:
- case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
- assert(image->image_type == VK_IMAGE_TYPE_2D);
- assert(image->create_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT);
- break;
- default:
- unreachable("Invalid image view type");
- }
-
- const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
-
- /* Some drivers may want to create color views of depth/stencil images
- * to implement certain operations, which is not strictly allowed by the
- * Vulkan spec, so handle this case separately.
- */
- bool is_color_view_of_depth_stencil =
- vk_format_is_depth_or_stencil(image->format) &&
- vk_format_is_color(pCreateInfo->format);
- if (is_color_view_of_depth_stencil) {
- assert(range->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
- assert(util_format_get_blocksize(vk_format_to_pipe_format(image->format)) ==
- util_format_get_blocksize(vk_format_to_pipe_format(pCreateInfo->format)));
- image_view->aspects = range->aspectMask;
- } else {
- image_view->aspects =
- vk_image_expand_aspect_mask(image, range->aspectMask);
-
- /* From the Vulkan 1.2.184 spec:
- *
- * "If the image has a multi-planar format and
- * subresourceRange.aspectMask is VK_IMAGE_ASPECT_COLOR_BIT, and image
- * has been created with a usage value not containing any of the
- * VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR,
- * VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR,
- * VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR,
- * VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR,
- * VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR, and
- * VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR flags, then the format must
- * be identical to the image format, and the sampler to be used with the
- * image view must enable sampler Y′CBCR conversion."
- *
- * Since no one implements video yet, we can ignore the bits about video
- * create flags and assume YCbCr formats match.
- */
- if ((image->aspects & VK_IMAGE_ASPECT_PLANE_1_BIT) &&
- (range->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT))
- assert(pCreateInfo->format == image->format);
-
- /* From the Vulkan 1.2.184 spec:
- *
- * "Each depth/stencil format is only compatible with itself."
- */
- if (image_view->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT))
- assert(pCreateInfo->format == image->format);
-
- if (!(image->create_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT))
- assert(pCreateInfo->format == image->format);
- }
-
- /* Restrict the format to only the planes chosen.
- *
- * For combined depth and stencil images, this means the depth-only or
- * stencil-only format if only one aspect is chosen and the full combined
- * format if both aspects are chosen.
- *
- * For single-plane color images, we just take the format as-is. For
- * multi-plane views of multi-plane images, this means we want the full
- * multi-plane format. For single-plane views of multi-plane images, we
- * want a format compatible with the one plane. Fortunately, this is
- * already what the client gives us. The Vulkan 1.2.184 spec says:
- *
- * "If image was created with the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT and
- * the image has a multi-planar format, and if
- * subresourceRange.aspectMask is VK_IMAGE_ASPECT_PLANE_0_BIT,
- * VK_IMAGE_ASPECT_PLANE_1_BIT, or VK_IMAGE_ASPECT_PLANE_2_BIT, format
- * must be compatible with the corresponding plane of the image, and the
- * sampler to be used with the image view must not enable sampler Y′CBCR
- * conversion."
- */
- if (image_view->aspects == VK_IMAGE_ASPECT_STENCIL_BIT) {
- image_view->format = vk_format_stencil_only(pCreateInfo->format);
- } else if (image_view->aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
- image_view->format = vk_format_depth_only(pCreateInfo->format);
- } else {
- image_view->format = pCreateInfo->format;
- }
-
- image_view->swizzle = (VkComponentMapping) {
- .r = remap_swizzle(pCreateInfo->components.r, VK_COMPONENT_SWIZZLE_R),
- .g = remap_swizzle(pCreateInfo->components.g, VK_COMPONENT_SWIZZLE_G),
- .b = remap_swizzle(pCreateInfo->components.b, VK_COMPONENT_SWIZZLE_B),
- .a = remap_swizzle(pCreateInfo->components.a, VK_COMPONENT_SWIZZLE_A),
- };
-
- assert(range->layerCount > 0);
- assert(range->baseMipLevel < image->mip_levels);
-
- image_view->base_mip_level = range->baseMipLevel;
- image_view->level_count = vk_image_subresource_level_count(image, range);
- image_view->base_array_layer = range->baseArrayLayer;
- image_view->layer_count = vk_image_subresource_layer_count(image, range);
-
- image_view->extent =
- vk_image_mip_level_extent(image, image_view->base_mip_level);
-
- assert(image_view->base_mip_level + image_view->level_count
- <= image->mip_levels);
- switch (image->image_type) {
- default:
- unreachable("bad VkImageType");
- case VK_IMAGE_TYPE_1D:
- case VK_IMAGE_TYPE_2D:
- assert(image_view->base_array_layer + image_view->layer_count
- <= image->array_layers);
- break;
- case VK_IMAGE_TYPE_3D:
- assert(image_view->base_array_layer + image_view->layer_count
- <= image_view->extent.depth);
- break;
- }
-
- /* If we are creating a color view from a depth/stencil image we compute
- * usage from the underlying depth/stencil aspects.
- */
- const VkImageUsageFlags image_usage = is_color_view_of_depth_stencil ?
- vk_image_usage(image, image->aspects) :
- vk_image_usage(image, image_view->aspects);
- const VkImageViewUsageCreateInfo *usage_info =
- vk_find_struct_const(pCreateInfo, IMAGE_VIEW_USAGE_CREATE_INFO);
- image_view->usage = usage_info ? usage_info->usage : image_usage;
- assert(!(image_view->usage & ~image_usage));
-}
-
-void
-vk_image_view_finish(struct vk_image_view *image_view)
-{
- vk_object_base_finish(&image_view->base);
-}
-
-void *
-vk_image_view_create(struct vk_device *device,
- const VkImageViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc,
- size_t size)
-{
- struct vk_image_view *image_view =
- vk_zalloc2(&device->alloc, alloc, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (image_view == NULL)
- return NULL;
-
- vk_image_view_init(device, image_view, pCreateInfo);
-
- return image_view;
-}
-
-void
-vk_image_view_destroy(struct vk_device *device,
- const VkAllocationCallbacks *alloc,
- struct vk_image_view *image_view)
-{
- vk_object_free(device, alloc, image_view);
-}
-
-bool
-vk_image_layout_is_read_only(VkImageLayout layout,
- VkImageAspectFlagBits aspect)
-{
- assert(util_bitcount(aspect) == 1);
-
- switch (layout) {
- case VK_IMAGE_LAYOUT_UNDEFINED:
- case VK_IMAGE_LAYOUT_PREINITIALIZED:
- return true; /* These are only used for layout transitions */
-
- case VK_IMAGE_LAYOUT_GENERAL:
- case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
- case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
- case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
- case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
- case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
- return false;
-
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
- case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
- case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
- case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
- case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
- case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
- case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
- case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
- return true;
-
- case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
- return aspect == VK_IMAGE_ASPECT_DEPTH_BIT;
-
- case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
- return aspect == VK_IMAGE_ASPECT_STENCIL_BIT;
-
- case VK_IMAGE_LAYOUT_MAX_ENUM:
- case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR:
- case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR:
- unreachable("Invalid image layout.");
- }
-
- unreachable("Invalid image layout.");
-}
-
-VkImageUsageFlags
-vk_image_layout_to_usage_flags(VkImageLayout layout,
- VkImageAspectFlagBits aspect)
-{
- assert(util_bitcount(aspect) == 1);
-
- switch (layout) {
- case VK_IMAGE_LAYOUT_UNDEFINED:
- case VK_IMAGE_LAYOUT_PREINITIALIZED:
- return 0u;
-
- case VK_IMAGE_LAYOUT_GENERAL:
- return ~0u;
-
- case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
- assert(aspect & VK_IMAGE_ASPECT_ANY_COLOR_MASK_MESA);
- return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
-
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
- assert(aspect & (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT));
- return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
-
- case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
- assert(aspect & VK_IMAGE_ASPECT_DEPTH_BIT);
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
-
- case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
- assert(aspect & VK_IMAGE_ASPECT_STENCIL_BIT);
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
-
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
- assert(aspect & (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT));
- return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
-
- case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
- assert(aspect & VK_IMAGE_ASPECT_DEPTH_BIT);
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
-
- case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
- assert(aspect & VK_IMAGE_ASPECT_STENCIL_BIT);
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
-
- case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
- return VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
-
- case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
- return VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
-
- case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
- return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
- case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
- if (aspect == VK_IMAGE_ASPECT_DEPTH_BIT) {
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
- } else if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
- } else {
- assert(!"Must be a depth/stencil aspect");
- return 0;
- }
-
- case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
- if (aspect == VK_IMAGE_ASPECT_DEPTH_BIT) {
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, aspect);
- } else if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
- return vk_image_layout_to_usage_flags(
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, aspect);
- } else {
- assert(!"Must be a depth/stencil aspect");
- return 0;
- }
-
- case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
- assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
- /* This needs to be handled specially by the caller */
- return 0;
-
- case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
- assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
- return vk_image_layout_to_usage_flags(VK_IMAGE_LAYOUT_GENERAL, aspect);
-
- case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
- assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
- return VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV;
-
- case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
- assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
- return VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT;
-
- case VK_IMAGE_LAYOUT_MAX_ENUM:
- case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR:
- case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR:
- unreachable("Invalid image layout.");
- }
-
- unreachable("Invalid image layout.");
-}
diff --git a/src/vulkan/util/vk_instance.c b/src/vulkan/util/vk_instance.c
deleted file mode 100644
index 5787be170df..00000000000
--- a/src/vulkan/util/vk_instance.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright © 2021 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "vk_instance.h"
-
-#include "vk_alloc.h"
-#include "vk_common_entrypoints.h"
-#include "vk_util.h"
-
-#include "compiler/glsl_types.h"
-
-VkResult
-vk_instance_init(struct vk_instance *instance,
- const struct vk_instance_extension_table *supported_extensions,
- const struct vk_instance_dispatch_table *dispatch_table,
- const VkInstanceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc)
-{
- memset(instance, 0, sizeof(*instance));
- vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
- instance->alloc = *alloc;
-
- instance->app_info = (struct vk_app_info) { .api_version = 0 };
- if (pCreateInfo->pApplicationInfo) {
- const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
-
- instance->app_info.app_name =
- vk_strdup(&instance->alloc, app->pApplicationName,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- instance->app_info.app_version = app->applicationVersion;
-
- instance->app_info.engine_name =
- vk_strdup(&instance->alloc, app->pEngineName,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- instance->app_info.engine_version = app->engineVersion;
-
- instance->app_info.api_version = app->apiVersion;
- }
-
- if (instance->app_info.api_version == 0)
- instance->app_info.api_version = VK_API_VERSION_1_0;
-
- for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- int idx;
- for (idx = 0; idx < VK_INSTANCE_EXTENSION_COUNT; idx++) {
- if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- vk_instance_extensions[idx].extensionName) == 0)
- break;
- }
-
- if (idx >= VK_INSTANCE_EXTENSION_COUNT)
- return VK_ERROR_EXTENSION_NOT_PRESENT;
-
- if (!supported_extensions->extensions[idx])
- return VK_ERROR_EXTENSION_NOT_PRESENT;
-
-#ifdef ANDROID
- if (!vk_android_allowed_instance_extensions.extensions[idx])
- return VK_ERROR_EXTENSION_NOT_PRESENT;
-#endif
-
- instance->enabled_extensions.extensions[idx] = true;
- }
-
- instance->dispatch_table = *dispatch_table;
-
- /* Add common entrypoints without overwriting driver-provided ones. */
- vk_instance_dispatch_table_from_entrypoints(
- &instance->dispatch_table, &vk_common_instance_entrypoints, false);
-
- if (mtx_init(&instance->debug_report.callbacks_mutex, mtx_plain) != 0)
- return VK_ERROR_INITIALIZATION_FAILED;
-
- list_inithead(&instance->debug_report.callbacks);
-
- glsl_type_singleton_init_or_ref();
-
- return VK_SUCCESS;
-}
-
-void
-vk_instance_finish(struct vk_instance *instance)
-{
- glsl_type_singleton_decref();
- mtx_destroy(&instance->debug_report.callbacks_mutex);
- vk_free(&instance->alloc, (char *)instance->app_info.app_name);
- vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
- vk_object_base_finish(&instance->base);
-}
-
-VkResult
-vk_enumerate_instance_extension_properties(
- const struct vk_instance_extension_table *supported_extensions,
- uint32_t *pPropertyCount,
- VkExtensionProperties *pProperties)
-{
- VK_OUTARRAY_MAKE_TYPED(VkExtensionProperties, out, pProperties, pPropertyCount);
-
- for (int i = 0; i < VK_INSTANCE_EXTENSION_COUNT; i++) {
- if (!supported_extensions->extensions[i])
- continue;
-
-#ifdef ANDROID
- if (!vk_android_allowed_instance_extensions.extensions[i])
- continue;
-#endif
-
- vk_outarray_append_typed(VkExtensionProperties, &out, prop) {
- *prop = vk_instance_extensions[i];
- }
- }
-
- return vk_outarray_status(&out);
-}
-
-PFN_vkVoidFunction
-vk_instance_get_proc_addr(const struct vk_instance *instance,
- const struct vk_instance_entrypoint_table *entrypoints,
- const char *name)
-{
- PFN_vkVoidFunction func;
-
- /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
- * when we have to return valid function pointers, NULL, or it's left
- * undefined. See the table for exact details.
- */
- if (name == NULL)
- return NULL;
-
-#define LOOKUP_VK_ENTRYPOINT(entrypoint) \
- if (strcmp(name, "vk" #entrypoint) == 0) \
- return (PFN_vkVoidFunction)entrypoints->entrypoint
-
- LOOKUP_VK_ENTRYPOINT(EnumerateInstanceExtensionProperties);
- LOOKUP_VK_ENTRYPOINT(EnumerateInstanceLayerProperties);
- LOOKUP_VK_ENTRYPOINT(EnumerateInstanceVersion);
- LOOKUP_VK_ENTRYPOINT(CreateInstance);
-
- /* GetInstanceProcAddr() can also be called with a NULL instance.
- * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
- */
- LOOKUP_VK_ENTRYPOINT(GetInstanceProcAddr);
-
-#undef LOOKUP_VK_ENTRYPOINT
-
- if (instance == NULL)
- return NULL;
-
- func = vk_instance_dispatch_table_get_if_supported(&instance->dispatch_table,
- name,
- instance->app_info.api_version,
- &instance->enabled_extensions);
- if (func != NULL)
- return func;
-
- func = vk_physical_device_dispatch_table_get_if_supported(&vk_physical_device_trampolines,
- name,
- instance->app_info.api_version,
- &instance->enabled_extensions);
- if (func != NULL)
- return func;
-
- func = vk_device_dispatch_table_get_if_supported(&vk_device_trampolines,
- name,
- instance->app_info.api_version,
- &instance->enabled_extensions,
- NULL);
- if (func != NULL)
- return func;
-
- return NULL;
-}
-
-PFN_vkVoidFunction
-vk_instance_get_proc_addr_unchecked(const struct vk_instance *instance,
- const char *name)
-{
- PFN_vkVoidFunction func;
-
- if (instance == NULL || name == NULL)
- return NULL;
-
- func = vk_instance_dispatch_table_get(&instance->dispatch_table, name);
- if (func != NULL)
- return func;
-
- func = vk_physical_device_dispatch_table_get(
- &vk_physical_device_trampolines, name);
- if (func != NULL)
- return func;
-
- func = vk_device_dispatch_table_get(&vk_device_trampolines, name);
- if (func != NULL)
- return func;
-
- return NULL;
-}
-
-PFN_vkVoidFunction
-vk_instance_get_physical_device_proc_addr(const struct vk_instance *instance,
- const char *name)
-{
- if (instance == NULL || name == NULL)
- return NULL;
-
- return vk_physical_device_dispatch_table_get_if_supported(&vk_physical_device_trampolines,
- name,
- instance->app_info.api_version,
- &instance->enabled_extensions);
-}
diff --git a/src/vulkan/util/vk_instance.h b/src/vulkan/util/vk_instance.h
deleted file mode 100644
index 5f195ca0d8e..00000000000
--- a/src/vulkan/util/vk_instance.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright © 2021 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-#ifndef VK_INSTANCE_H
-#define VK_INSTANCE_H
-
-#include "vk_dispatch_table.h"
-#include "vk_extensions.h"
-#include "vk_object.h"
-
-#include "c11/threads.h"
-#include "util/list.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct vk_app_info {
- const char* app_name;
- uint32_t app_version;
- const char* engine_name;
- uint32_t engine_version;
- uint32_t api_version;
-};
-
-struct vk_instance {
- struct vk_object_base base;
- VkAllocationCallbacks alloc;
-
- struct vk_app_info app_info;
- struct vk_instance_extension_table enabled_extensions;
-
- struct vk_instance_dispatch_table dispatch_table;
-
- /* VK_EXT_debug_report debug callbacks */
- struct {
- mtx_t callbacks_mutex;
- struct list_head callbacks;
- } debug_report;
-};
-
-VK_DEFINE_HANDLE_CASTS(vk_instance, base, VkInstance,
- VK_OBJECT_TYPE_INSTANCE)
-
-VkResult MUST_CHECK
-vk_instance_init(struct vk_instance *instance,
- const struct vk_instance_extension_table *supported_extensions,
- const struct vk_instance_dispatch_table *dispatch_table,
- const VkInstanceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc);
-
-void
-vk_instance_finish(struct vk_instance *instance);
-
-VkResult
-vk_enumerate_instance_extension_properties(
- const struct vk_instance_extension_table *supported_extensions,
- uint32_t *pPropertyCount,
- VkExtensionProperties *pProperties);
-
-PFN_vkVoidFunction
-vk_instance_get_proc_addr(const struct vk_instance *instance,
- const struct vk_instance_entrypoint_table *entrypoints,
- const char *name);
-
-PFN_vkVoidFunction
-vk_instance_get_proc_addr_unchecked(const struct vk_instance *instance,
- const char *name);
-
-PFN_vkVoidFunction
-vk_instance_get_physical_device_proc_addr(const struct vk_instance *instance,
- const char *name);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* VK_INSTANCE_H */
diff --git a/src/vulkan/util/vk_physical_device_features_gen.py b/src/vulkan/util/vk_physical_device_features_gen.py
new file mode 100644
index 00000000000..27673ca3c0c
--- /dev/null
+++ b/src/vulkan/util/vk_physical_device_features_gen.py
@@ -0,0 +1,473 @@
+COPYRIGHT=u"""
+/* Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+from collections import OrderedDict
+from dataclasses import dataclass
+import os
+import sys
+import typing
+import xml.etree.ElementTree as et
+
+import mako
+from mako.template import Template
+from vk_extensions import get_all_required, filter_api
+
+def str_removeprefix(s, prefix):
+ if s.startswith(prefix):
+ return s[len(prefix):]
+ return s
+
+RENAMED_FEATURES = {
+ # See https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17272#note_1446477 for details
+ ('BufferDeviceAddressFeaturesEXT', 'bufferDeviceAddressCaptureReplay'): 'bufferDeviceAddressCaptureReplayEXT',
+
+ ('MeshShaderFeaturesNV', 'taskShader'): 'taskShaderNV',
+ ('MeshShaderFeaturesNV', 'meshShader'): 'meshShaderNV',
+
+ ('CooperativeMatrixFeaturesNV', 'cooperativeMatrix'): 'cooperativeMatrixNV',
+ ('CooperativeMatrixFeaturesNV', 'cooperativeMatrixRobustBufferAccess'): 'cooperativeMatrixRobustBufferAccessNV',
+}
+
+KNOWN_ALIASES = [
+ (['Vulkan11Features', '16BitStorageFeatures'], ['storageBuffer16BitAccess', 'uniformAndStorageBuffer16BitAccess', 'storagePushConstant16', 'storageInputOutput16']),
+ (['Vulkan11Features', 'MultiviewFeatures'], ['multiview', 'multiviewGeometryShader', 'multiviewTessellationShader']),
+ (['Vulkan11Features', 'VariablePointersFeatures'], ['variablePointersStorageBuffer', 'variablePointers']),
+ (['Vulkan11Features', 'ProtectedMemoryFeatures'], ['protectedMemory']),
+ (['Vulkan11Features', 'SamplerYcbcrConversionFeatures'], ['samplerYcbcrConversion']),
+ (['Vulkan11Features', 'ShaderDrawParametersFeatures'], ['shaderDrawParameters']),
+
+ (['Vulkan12Features', '8BitStorageFeatures'], ['storageBuffer8BitAccess', 'uniformAndStorageBuffer8BitAccess', 'storagePushConstant8']),
+ (['Vulkan12Features', 'ShaderAtomicInt64Features'], ['shaderBufferInt64Atomics', 'shaderSharedInt64Atomics']),
+ (['Vulkan12Features', 'ShaderFloat16Int8Features'], ['shaderFloat16', 'shaderInt8']),
+ (
+ ['Vulkan12Features', 'DescriptorIndexingFeatures'],
+ [
+ 'shaderInputAttachmentArrayDynamicIndexing',
+ 'shaderUniformTexelBufferArrayDynamicIndexing',
+ 'shaderStorageTexelBufferArrayDynamicIndexing',
+ 'shaderUniformBufferArrayNonUniformIndexing',
+ 'shaderSampledImageArrayNonUniformIndexing',
+ 'shaderStorageBufferArrayNonUniformIndexing',
+ 'shaderStorageImageArrayNonUniformIndexing',
+ 'shaderInputAttachmentArrayNonUniformIndexing',
+ 'shaderUniformTexelBufferArrayNonUniformIndexing',
+ 'shaderStorageTexelBufferArrayNonUniformIndexing',
+ 'descriptorBindingUniformBufferUpdateAfterBind',
+ 'descriptorBindingSampledImageUpdateAfterBind',
+ 'descriptorBindingStorageImageUpdateAfterBind',
+ 'descriptorBindingStorageBufferUpdateAfterBind',
+ 'descriptorBindingUniformTexelBufferUpdateAfterBind',
+ 'descriptorBindingStorageTexelBufferUpdateAfterBind',
+ 'descriptorBindingUpdateUnusedWhilePending',
+ 'descriptorBindingPartiallyBound',
+ 'descriptorBindingVariableDescriptorCount',
+ 'runtimeDescriptorArray',
+ ],
+ ),
+ (['Vulkan12Features', 'ScalarBlockLayoutFeatures'], ['scalarBlockLayout']),
+ (['Vulkan12Features', 'ImagelessFramebufferFeatures'], ['imagelessFramebuffer']),
+ (['Vulkan12Features', 'UniformBufferStandardLayoutFeatures'], ['uniformBufferStandardLayout']),
+ (['Vulkan12Features', 'ShaderSubgroupExtendedTypesFeatures'], ['shaderSubgroupExtendedTypes']),
+ (['Vulkan12Features', 'SeparateDepthStencilLayoutsFeatures'], ['separateDepthStencilLayouts']),
+ (['Vulkan12Features', 'HostQueryResetFeatures'], ['hostQueryReset']),
+ (['Vulkan12Features', 'TimelineSemaphoreFeatures'], ['timelineSemaphore']),
+ (['Vulkan12Features', 'BufferDeviceAddressFeatures', 'BufferDeviceAddressFeaturesEXT'], ['bufferDeviceAddress', 'bufferDeviceAddressMultiDevice']),
+ (['Vulkan12Features', 'BufferDeviceAddressFeatures'], ['bufferDeviceAddressCaptureReplay']),
+ (['Vulkan12Features', 'VulkanMemoryModelFeatures'], ['vulkanMemoryModel', 'vulkanMemoryModelDeviceScope', 'vulkanMemoryModelAvailabilityVisibilityChains']),
+
+ (['Vulkan13Features', 'ImageRobustnessFeatures'], ['robustImageAccess']),
+ (['Vulkan13Features', 'InlineUniformBlockFeatures'], ['inlineUniformBlock', 'descriptorBindingInlineUniformBlockUpdateAfterBind']),
+ (['Vulkan13Features', 'PipelineCreationCacheControlFeatures'], ['pipelineCreationCacheControl']),
+ (['Vulkan13Features', 'PrivateDataFeatures'], ['privateData']),
+ (['Vulkan13Features', 'ShaderDemoteToHelperInvocationFeatures'], ['shaderDemoteToHelperInvocation']),
+ (['Vulkan13Features', 'ShaderTerminateInvocationFeatures'], ['shaderTerminateInvocation']),
+ (['Vulkan13Features', 'SubgroupSizeControlFeatures'], ['subgroupSizeControl', 'computeFullSubgroups']),
+ (['Vulkan13Features', 'Synchronization2Features'], ['synchronization2']),
+ (['Vulkan13Features', 'TextureCompressionASTCHDRFeatures'], ['textureCompressionASTC_HDR']),
+ (['Vulkan13Features', 'ZeroInitializeWorkgroupMemoryFeatures'], ['shaderZeroInitializeWorkgroupMemory']),
+ (['Vulkan13Features', 'DynamicRenderingFeatures'], ['dynamicRendering']),
+ (['Vulkan13Features', 'ShaderIntegerDotProductFeatures'], ['shaderIntegerDotProduct']),
+ (['Vulkan13Features', 'Maintenance4Features'], ['maintenance4']),
+]
+
+for (feature_structs, features) in KNOWN_ALIASES:
+ for flag in features:
+ for f in feature_structs:
+ rename = (f, flag)
+ assert rename not in RENAMED_FEATURES, f"{rename} already exists in RENAMED_FEATURES"
+ RENAMED_FEATURES[rename] = flag
+
+def get_renamed_feature(c_type, feature):
+ return RENAMED_FEATURES.get((str_removeprefix(c_type, 'VkPhysicalDevice'), feature), feature)
+
+@dataclass
+class FeatureStruct:
+ c_type: str
+ s_type: str
+ features: typing.List[str]
+
+TEMPLATE_H = Template(COPYRIGHT + """
+/* This file generated from ${filename}, don't edit directly. */
+#ifndef VK_FEATURES_H
+#define VK_FEATURES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_features {
+% for flag in all_flags:
+ bool ${flag};
+% endfor
+};
+
+void
+vk_set_physical_device_features(struct vk_features *all_features,
+ const VkPhysicalDeviceFeatures2 *pFeatures);
+
+void
+vk_set_physical_device_features_1_0(struct vk_features *all_features,
+ const VkPhysicalDeviceFeatures *pFeatures);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+""")
+
+TEMPLATE_C = Template(COPYRIGHT + """
+/* This file generated from ${filename}, don't edit directly. */
+
+#include "vk_common_entrypoints.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_physical_device_features.h"
+#include "vk_util.h"
+
+static VkResult
+check_physical_device_features(struct vk_physical_device *physical_device,
+ const VkPhysicalDeviceFeatures *supported,
+ const VkPhysicalDeviceFeatures *enabled,
+ const char *struct_name)
+{
+% for flag in pdev_features:
+ if (enabled->${flag} && !supported->${flag})
+ return vk_errorf(physical_device, VK_ERROR_FEATURE_NOT_PRESENT,
+ "%s.%s not supported", struct_name, "${flag}");
+% endfor
+
+ return VK_SUCCESS;
+}
+
+VkResult
+vk_physical_device_check_device_features(struct vk_physical_device *physical_device,
+ const VkDeviceCreateInfo *pCreateInfo)
+{
+ VkPhysicalDevice vk_physical_device =
+ vk_physical_device_to_handle(physical_device);
+
+ /* Query the device what kind of features are supported. */
+ VkPhysicalDeviceFeatures2 supported_features2 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+ };
+
+% for f in feature_structs:
+ ${f.c_type} supported_${f.c_type} = { .pNext = NULL };
+% endfor
+
+ vk_foreach_struct_const(features, pCreateInfo->pNext) {
+ VkBaseOutStructure *supported = NULL;
+ switch (features->sType) {
+% for f in feature_structs:
+ case ${f.s_type}:
+ supported = (VkBaseOutStructure *) &supported_${f.c_type};
+ break;
+% endfor
+ default:
+ break;
+ }
+
+ /* Not a feature struct. */
+ if (!supported)
+ continue;
+
+ /* Check for cycles in the list */
+ if (supported->pNext != NULL || supported->sType != 0)
+ return VK_ERROR_UNKNOWN;
+
+ supported->sType = features->sType;
+ __vk_append_struct(&supported_features2, supported);
+ }
+
+ physical_device->dispatch_table.GetPhysicalDeviceFeatures2(
+ vk_physical_device, &supported_features2);
+
+ if (pCreateInfo->pEnabledFeatures) {
+ VkResult result =
+ check_physical_device_features(physical_device,
+ &supported_features2.features,
+ pCreateInfo->pEnabledFeatures,
+ "VkPhysicalDeviceFeatures");
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ /* Iterate through additional feature structs */
+ vk_foreach_struct_const(features, pCreateInfo->pNext) {
+ /* Check each feature boolean for given structure. */
+ switch (features->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
+ const VkPhysicalDeviceFeatures2 *features2 = (const void *)features;
+ VkResult result =
+ check_physical_device_features(physical_device,
+ &supported_features2.features,
+ &features2->features,
+ "VkPhysicalDeviceFeatures2.features");
+ if (result != VK_SUCCESS)
+ return result;
+ break;
+ }
+% for f in feature_structs:
+ case ${f.s_type}: {
+ const ${f.c_type} *a = &supported_${f.c_type};
+ const ${f.c_type} *b = (const void *) features;
+% for flag in f.features:
+ if (b->${flag} && !a->${flag})
+ return vk_errorf(physical_device, VK_ERROR_FEATURE_NOT_PRESENT,
+ "%s.%s not supported", "${f.c_type}", "${flag}");
+% endfor
+ break;
+ }
+% endfor
+ default:
+ break;
+ }
+ } // for each extension structure
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures2 *pFeatures)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+
+% for flag in pdev_features:
+ pFeatures->features.${flag} = pdevice->supported_features.${flag};
+% endfor
+
+ vk_foreach_struct(ext, pFeatures) {
+ switch (ext->sType) {
+% for f in feature_structs:
+ case ${f.s_type}: {
+ ${f.c_type} *features = (void *) ext;
+% for flag in f.features:
+ features->${flag} = pdevice->supported_features.${get_renamed_feature(f.c_type, flag)};
+% endfor
+ break;
+ }
+
+% endfor
+ default:
+ break;
+ }
+ }
+}
+
+void
+vk_set_physical_device_features(struct vk_features *all_features,
+ const VkPhysicalDeviceFeatures2 *pFeatures)
+{
+ vk_foreach_struct_const(ext, pFeatures) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
+ const VkPhysicalDeviceFeatures2 *features = (const void *) ext;
+ vk_set_physical_device_features_1_0(all_features, &features->features);
+ break;
+ }
+
+% for f in feature_structs:
+ case ${f.s_type}: {
+ const ${f.c_type} *features = (const void *) ext;
+% for flag in f.features:
+ if (features->${flag})
+ all_features->${get_renamed_feature(f.c_type, flag)} = true;
+% endfor
+ break;
+ }
+
+% endfor
+ default:
+ break;
+ }
+ }
+}
+
+void
+vk_set_physical_device_features_1_0(struct vk_features *all_features,
+ const VkPhysicalDeviceFeatures *pFeatures)
+{
+% for flag in pdev_features:
+ if (pFeatures->${flag})
+ all_features->${flag} = true;
+% endfor
+}
+""")
+
+def get_pdev_features(doc):
+ _type = doc.find(".types/type[@name='VkPhysicalDeviceFeatures']")
+ if _type is not None:
+ flags = []
+ for p in _type.findall('./member'):
+ assert p.find('./type').text == 'VkBool32'
+ flags.append(p.find('./name').text)
+ return flags
+ return None
+
+def filter_api(elem, api):
+ if 'api' not in elem.attrib:
+ return True
+
+ return api in elem.attrib['api'].split(',')
+
+def get_feature_structs(doc, api, beta):
+ feature_structs = OrderedDict()
+
+ required = get_all_required(doc, 'type', api, beta)
+
+ # parse all struct types where structextends VkPhysicalDeviceFeatures2
+ for _type in doc.findall('./types/type[@category="struct"]'):
+ if _type.attrib.get('structextends') != 'VkPhysicalDeviceFeatures2,VkDeviceCreateInfo':
+ continue
+ if _type.attrib['name'] not in required:
+ continue
+
+ # Skip extensions with a define for now
+ guard = required[_type.attrib['name']].guard
+ if guard is not None and (guard != "VK_ENABLE_BETA_EXTENSIONS" or beta != "true"):
+ continue
+
+ # find Vulkan structure type
+ for elem in _type:
+ if "STRUCTURE_TYPE" in str(elem.attrib):
+ s_type = elem.attrib.get('values')
+
+ # collect a list of feature flags
+ flags = []
+
+ for p in _type.findall('./member'):
+ if not filter_api(p, api):
+ continue
+
+ m_name = p.find('./name').text
+ if m_name == 'pNext':
+ pass
+ elif m_name == 'sType':
+ s_type = p.attrib.get('values')
+ else:
+ assert p.find('./type').text == 'VkBool32'
+ flags.append(m_name)
+
+ feature_struct = FeatureStruct(c_type=_type.attrib.get('name'), s_type=s_type, features=flags)
+ feature_structs[feature_struct.c_type] = feature_struct
+
+ return feature_structs.values()
+
+def get_feature_structs_from_xml(xml_files, beta, api='vulkan'):
+ diagnostics = []
+
+ pdev_features = None
+ feature_structs = []
+
+ for filename in xml_files:
+ doc = et.parse(filename)
+ feature_structs += get_feature_structs(doc, api, beta)
+ if not pdev_features:
+ pdev_features = get_pdev_features(doc)
+
+ unused_renames = {**RENAMED_FEATURES}
+
+ features = OrderedDict()
+
+ for flag in pdev_features:
+ features[flag] = 'VkPhysicalDeviceFeatures'
+
+ for f in feature_structs:
+ for flag in f.features:
+ renamed_flag = get_renamed_feature(f.c_type, flag)
+ if renamed_flag not in features:
+ features[renamed_flag] = f.c_type
+ else:
+ a = str_removeprefix(features[renamed_flag], 'VkPhysicalDevice')
+ b = str_removeprefix(f.c_type, 'VkPhysicalDevice')
+ if (a, flag) not in RENAMED_FEATURES or (b, flag) not in RENAMED_FEATURES:
+ diagnostics.append(f'{a} and {b} both define {flag}')
+
+ unused_renames.pop((str_removeprefix(f.c_type, 'VkPhysicalDevice'), flag), None)
+
+ for rename in unused_renames:
+ diagnostics.append(f'unused rename {rename}')
+
+ assert len(diagnostics) == 0, '\n'.join(diagnostics)
+
+ return pdev_features, feature_structs, features
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--out-c', required=True, help='Output C file.')
+ parser.add_argument('--out-h', required=True, help='Output H file.')
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
+ parser.add_argument('--xml',
+ help='Vulkan API XML file.',
+ required=True, action='append', dest='xml_files')
+ args = parser.parse_args()
+
+ pdev_features, feature_structs, all_flags = get_feature_structs_from_xml(args.xml_files, args.beta)
+
+ environment = {
+ 'filename': os.path.basename(__file__),
+ 'pdev_features': pdev_features,
+ 'feature_structs': feature_structs,
+ 'all_flags': all_flags,
+ 'get_renamed_feature': get_renamed_feature,
+ }
+
+ try:
+ with open(args.out_c, 'w', encoding='utf-8') as f:
+ f.write(TEMPLATE_C.render(**environment))
+ with open(args.out_h, 'w', encoding='utf-8') as f:
+ f.write(TEMPLATE_H.render(**environment))
+ except Exception:
+ # In the event there's an error, this uses some helpers from mako
+ # to print a useful stack trace and prints it, then exits with
+ # status 1, if python is run with debug; otherwise it just raises
+ # the exception
+ print(mako.exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/vulkan/util/vk_physical_device_properties_gen.py b/src/vulkan/util/vk_physical_device_properties_gen.py
new file mode 100644
index 00000000000..b690aa210ea
--- /dev/null
+++ b/src/vulkan/util/vk_physical_device_properties_gen.py
@@ -0,0 +1,332 @@
+COPYRIGHT=u"""
+/* Copyright © 2021 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+from collections import OrderedDict
+from dataclasses import dataclass
+import os
+import sys
+import typing
+import xml.etree.ElementTree as et
+import re
+
+import mako
+from mako.template import Template
+
+from vk_extensions import get_all_required, filter_api
+
+def str_removeprefix(s, prefix):
+ if s.startswith(prefix):
+ return s[len(prefix):]
+ return s
+
+RENAMED_PROPERTIES = {
+ ("DrmPropertiesEXT", "hasPrimary"): "drmHasPrimary",
+ ("DrmPropertiesEXT", "primaryMajor"): "drmPrimaryMajor",
+ ("DrmPropertiesEXT", "primaryMinor"): "drmPrimaryMinor",
+ ("DrmPropertiesEXT", "hasRender"): "drmHasRender",
+ ("DrmPropertiesEXT", "renderMajor"): "drmRenderMajor",
+ ("DrmPropertiesEXT", "renderMinor"): "drmRenderMinor",
+ ("SparseProperties", "residencyStandard2DBlockShape"): "sparseResidencyStandard2DBlockShape",
+ ("SparseProperties", "residencyStandard2DMultisampleBlockShape"): "sparseResidencyStandard2DMultisampleBlockShape",
+ ("SparseProperties", "residencyStandard3DBlockShape"): "sparseResidencyStandard3DBlockShape",
+ ("SparseProperties", "residencyAlignedMipSize"): "sparseResidencyAlignedMipSize",
+ ("SparseProperties", "residencyNonResidentStrict"): "sparseResidencyNonResidentStrict",
+ ("SubgroupProperties", "supportedStages"): "subgroupSupportedStages",
+ ("SubgroupProperties", "supportedOperations"): "subgroupSupportedOperations",
+ ("SubgroupProperties", "quadOperationsInAllStages"): "subgroupQuadOperationsInAllStages",
+}
+
+SPECIALIZED_PROPERTY_STRUCTS = [
+ "HostImageCopyPropertiesEXT",
+]
+
+@dataclass
+class Property:
+ decl: str
+ name: str
+ actual_name: str
+ length: str
+
+ def __init__(self, p, property_struct_name):
+ self.decl = ""
+ for element in p:
+ if element.tag != "comment":
+ self.decl += "".join(element.itertext())
+ if element.tail:
+ self.decl += re.sub(" +", " ", element.tail)
+
+ self.name = p.find("./name").text
+ self.actual_name = RENAMED_PROPERTIES.get((property_struct_name, self.name), self.name)
+
+ length = p.attrib.get("len", "1")
+ self.length = RENAMED_PROPERTIES.get((property_struct_name, length), length)
+
+ self.decl = self.decl.replace(self.name, self.actual_name)
+
+@dataclass
+class PropertyStruct:
+ c_type: str
+ s_type: str
+ name: str
+ properties: typing.List[Property]
+
+def copy_property(dst, src, decl, length="1"):
+ assert "*" not in decl
+
+ if "[" in decl:
+ return "memcpy(%s, %s, sizeof(%s));" % (dst, src, dst)
+ else:
+ return "%s = %s;" % (dst, src)
+
+TEMPLATE_H = Template(COPYRIGHT + """
+/* This file generated from ${filename}, don"t edit directly. */
+#ifndef VK_PROPERTIES_H
+#define VK_PROPERTIES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct vk_properties {
+% for prop in all_properties:
+ ${prop.decl};
+% endfor
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+""")
+
+TEMPLATE_C = Template(COPYRIGHT + """
+/* This file generated from ${filename}, don"t edit directly. */
+
+#include "vk_common_entrypoints.h"
+#include "vk_log.h"
+#include "vk_physical_device.h"
+#include "vk_physical_device_properties.h"
+#include "vk_util.h"
+
+VKAPI_ATTR void VKAPI_CALL
+vk_common_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties2 *pProperties)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+
+% for prop in pdev_properties:
+ ${copy_property("pProperties->properties." + prop.name, "pdevice->properties." + prop.actual_name, prop.decl)}
+% endfor
+
+ vk_foreach_struct(ext, pProperties->pNext) {
+ switch (ext->sType) {
+% for property_struct in property_structs:
+% if property_struct.name not in SPECIALIZED_PROPERTY_STRUCTS:
+ case ${property_struct.s_type}: {
+ ${property_struct.c_type} *properties = (void *)ext;
+% for prop in property_struct.properties:
+ ${copy_property("properties->" + prop.name, "pdevice->properties." + prop.actual_name, prop.decl, "pdevice->properties." + prop.length)}
+% endfor
+ break;
+ }
+% endif
+% endfor
+
+ /* Specialized propery handling defined in vk_physical_device_properties_gen.py */
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT: {
+ VkPhysicalDeviceHostImageCopyPropertiesEXT *properties = (void *)ext;
+
+ if (properties->pCopySrcLayouts) {
+ uint32_t written_layout_count = MIN2(properties->copySrcLayoutCount,
+ pdevice->properties.copySrcLayoutCount);
+ memcpy(properties->pCopySrcLayouts, pdevice->properties.pCopySrcLayouts,
+ sizeof(VkImageLayout) * written_layout_count);
+ properties->copySrcLayoutCount = written_layout_count;
+ } else {
+ properties->copySrcLayoutCount = pdevice->properties.copySrcLayoutCount;
+ }
+
+ if (properties->pCopyDstLayouts) {
+ uint32_t written_layout_count = MIN2(properties->copyDstLayoutCount,
+ pdevice->properties.copyDstLayoutCount);
+ memcpy(properties->pCopyDstLayouts, pdevice->properties.pCopyDstLayouts,
+ sizeof(VkImageLayout) * written_layout_count);
+ properties->copyDstLayoutCount = written_layout_count;
+ } else {
+ properties->copyDstLayoutCount = pdevice->properties.copyDstLayoutCount;
+ }
+
+ memcpy(properties->optimalTilingLayoutUUID, pdevice->properties.optimalTilingLayoutUUID, VK_UUID_SIZE);
+ properties->identicalMemoryTypeRequirements = pdevice->properties.identicalMemoryTypeRequirements;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+}
+""")
+
+def get_pdev_properties(doc, struct_name):
+ _type = doc.find(".types/type[@name=\"VkPhysicalDevice%s\"]" % struct_name)
+ if _type is not None:
+ properties = []
+ for p in _type.findall("./member"):
+ properties.append(Property(p, struct_name))
+ return properties
+ return None
+
+def filter_api(elem, api):
+ if "api" not in elem.attrib:
+ return True
+
+ return api in elem.attrib["api"].split(",")
+
+def get_property_structs(doc, api, beta):
+ property_structs = OrderedDict()
+
+ required = get_all_required(doc, "type", api, beta)
+
+ # parse all struct types where structextends VkPhysicalDeviceProperties2
+ for _type in doc.findall("./types/type[@category=\"struct\"]"):
+ if _type.attrib.get("structextends") != "VkPhysicalDeviceProperties2":
+ continue
+
+ full_name = _type.attrib["name"]
+ if full_name not in required:
+ continue
+
+ # Skip extensions with a define for now
+ guard = required[full_name].guard
+ if guard is not None and (guard != "VK_ENABLE_BETA_EXTENSIONS" or beta != "true"):
+ continue
+
+ # find Vulkan structure type
+ for elem in _type:
+ if "STRUCTURE_TYPE" in str(elem.attrib):
+ s_type = elem.attrib.get("values")
+
+ name = str_removeprefix(full_name, "VkPhysicalDevice")
+
+ # collect a list of properties
+ properties = []
+
+ for p in _type.findall("./member"):
+ if not filter_api(p, api):
+ continue
+
+ m_name = p.find("./name").text
+ if m_name == "pNext":
+ pass
+ elif m_name == "sType":
+ s_type = p.attrib.get("values")
+ else:
+ properties.append(Property(p, name))
+
+ property_struct = PropertyStruct(c_type=full_name, s_type=s_type, name=name, properties=properties)
+ property_structs[property_struct.c_type] = property_struct
+
+ return property_structs.values()
+
+def get_property_structs_from_xml(xml_files, beta, api="vulkan"):
+ diagnostics = []
+
+ pdev_properties = None
+ property_structs = []
+
+ for filename in xml_files:
+ doc = et.parse(filename)
+ property_structs += get_property_structs(doc, api, beta)
+ if not pdev_properties:
+ pdev_properties = get_pdev_properties(doc, "Properties")
+ pdev_properties = [prop for prop in pdev_properties if prop.name != "limits" and prop.name != "sparseProperties"]
+
+ limits = get_pdev_properties(doc, "Limits")
+ for limit in limits:
+ limit.name = "limits." + limit.name
+ pdev_properties += limits
+
+ sparse_properties = get_pdev_properties(doc, "SparseProperties")
+ for prop in sparse_properties:
+ prop.name = "sparseProperties." + prop.name
+ pdev_properties += sparse_properties
+
+ # Gather all properties, make sure that aliased declarations match up.
+ property_names = OrderedDict()
+ all_properties = []
+ for prop in pdev_properties:
+ property_names[prop.actual_name] = prop
+ all_properties.append(prop)
+
+ for property_struct in property_structs:
+ for prop in property_struct.properties:
+ if prop.actual_name not in property_names:
+ property_names[prop.actual_name] = prop
+ all_properties.append(prop)
+ elif prop.decl != property_names[prop.actual_name].decl:
+ diagnostics.append("Declaration mismatch ('%s' vs. '%s')" % (prop.decl, property_names[prop.actual_name].decl))
+
+ return pdev_properties, property_structs, all_properties
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--out-c", required=True, help="Output C file.")
+ parser.add_argument("--out-h", required=True, help="Output H file.")
+ parser.add_argument("--beta", required=True, help="Enable beta extensions.")
+ parser.add_argument("--xml",
+ help="Vulkan API XML file.",
+ required=True, action="append", dest="xml_files")
+ args = parser.parse_args()
+
+ pdev_properties, property_structs, all_properties = get_property_structs_from_xml(args.xml_files, args.beta)
+
+ environment = {
+ "filename": os.path.basename(__file__),
+ "pdev_properties": pdev_properties,
+ "property_structs": property_structs,
+ "all_properties": all_properties,
+ "copy_property": copy_property,
+ "SPECIALIZED_PROPERTY_STRUCTS": SPECIALIZED_PROPERTY_STRUCTS,
+ }
+
+ try:
+ with open(args.out_c, "w", encoding='utf-8') as f:
+ f.write(TEMPLATE_C.render(**environment))
+ with open(args.out_h, "w", encoding='utf-8') as f:
+ f.write(TEMPLATE_H.render(**environment))
+ except Exception:
+ # In the event there"s an error, this uses some helpers from mako
+ # to print a useful stack trace and prints it, then exits with
+ # status 1, if python is run with debug; otherwise it just raises
+ # the exception
+ print(mako.exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/vulkan/util/vk_render_pass.c b/src/vulkan/util/vk_render_pass.c
deleted file mode 100644
index fa736ec9ccc..00000000000
--- a/src/vulkan/util/vk_render_pass.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright © 2020 Valve Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "vk_alloc.h"
-#include "vk_common_entrypoints.h"
-#include "vk_device.h"
-#include "vk_format.h"
-#include "vk_util.h"
-
-#include "util/log.h"
-
-static void
-translate_references(VkAttachmentReference2 **reference_ptr,
- uint32_t reference_count,
- const VkAttachmentReference *reference,
- const VkRenderPassCreateInfo *pass_info,
- bool is_input_attachment)
-{
- VkAttachmentReference2 *reference2 = *reference_ptr;
- *reference_ptr += reference_count;
- for (uint32_t i = 0; i < reference_count; i++) {
- reference2[i] = (VkAttachmentReference2) {
- .sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,
- .pNext = NULL,
- .attachment = reference[i].attachment,
- .layout = reference[i].layout,
- };
-
- if (is_input_attachment &&
- reference2[i].attachment != VK_ATTACHMENT_UNUSED) {
- assert(reference2[i].attachment < pass_info->attachmentCount);
- const VkAttachmentDescription *att =
- &pass_info->pAttachments[reference2[i].attachment];
- reference2[i].aspectMask = vk_format_aspects(att->format);
- }
- }
-}
-
-VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_CreateRenderPass(VkDevice _device,
- const VkRenderPassCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkRenderPass *pRenderPass)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
-
- uint32_t reference_count = 0;
- for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
- reference_count += pCreateInfo->pSubpasses[i].inputAttachmentCount;
- reference_count += pCreateInfo->pSubpasses[i].colorAttachmentCount;
- if (pCreateInfo->pSubpasses[i].pResolveAttachments)
- reference_count += pCreateInfo->pSubpasses[i].colorAttachmentCount;
- if (pCreateInfo->pSubpasses[i].pDepthStencilAttachment)
- reference_count += 1;
- }
-
- VK_MULTIALLOC(ma);
- VK_MULTIALLOC_DECL(&ma, VkRenderPassCreateInfo2, create_info, 1);
- VK_MULTIALLOC_DECL(&ma, VkSubpassDescription2, subpasses,
- pCreateInfo->subpassCount);
- VK_MULTIALLOC_DECL(&ma, VkAttachmentDescription2, attachments,
- pCreateInfo->attachmentCount);
- VK_MULTIALLOC_DECL(&ma, VkSubpassDependency2, dependencies,
- pCreateInfo->dependencyCount);
- VK_MULTIALLOC_DECL(&ma, VkAttachmentReference2, references,
- reference_count);
- if (!vk_multialloc_alloc2(&ma, &device->alloc, pAllocator,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- VkAttachmentReference2 *reference_ptr = references;
-
- const VkRenderPassMultiviewCreateInfo *multiview_info = NULL;
- const VkRenderPassInputAttachmentAspectCreateInfo *aspect_info = NULL;
- vk_foreach_struct(ext, pCreateInfo->pNext) {
- switch (ext->sType) {
- case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
- aspect_info = (const VkRenderPassInputAttachmentAspectCreateInfo *)ext;
- /* We don't care about this information */
- break;
-
- case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
- multiview_info = (const VkRenderPassMultiviewCreateInfo*) ext;
- break;
-
- default:
- mesa_logd("%s: ignored VkStructureType %u\n", __func__, ext->sType);
- break;
- }
- }
-
- for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
- attachments[i] = (VkAttachmentDescription2) {
- .sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2,
- .pNext = NULL,
- .flags = pCreateInfo->pAttachments[i].flags,
- .format = pCreateInfo->pAttachments[i].format,
- .samples = pCreateInfo->pAttachments[i].samples,
- .loadOp = pCreateInfo->pAttachments[i].loadOp,
- .storeOp = pCreateInfo->pAttachments[i].storeOp,
- .stencilLoadOp = pCreateInfo->pAttachments[i].stencilLoadOp,
- .stencilStoreOp = pCreateInfo->pAttachments[i].stencilStoreOp,
- .initialLayout = pCreateInfo->pAttachments[i].initialLayout,
- .finalLayout = pCreateInfo->pAttachments[i].finalLayout,
- };
- }
-
- for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
- subpasses[i] = (VkSubpassDescription2) {
- .sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2,
- .pNext = NULL,
- .flags = pCreateInfo->pSubpasses[i].flags,
- .pipelineBindPoint = pCreateInfo->pSubpasses[i].pipelineBindPoint,
- .viewMask = 0,
- .inputAttachmentCount = pCreateInfo->pSubpasses[i].inputAttachmentCount,
- .colorAttachmentCount = pCreateInfo->pSubpasses[i].colorAttachmentCount,
- .preserveAttachmentCount = pCreateInfo->pSubpasses[i].preserveAttachmentCount,
- .pPreserveAttachments = pCreateInfo->pSubpasses[i].pPreserveAttachments,
- };
-
- if (multiview_info && multiview_info->subpassCount) {
- assert(multiview_info->subpassCount == pCreateInfo->subpassCount);
- subpasses[i].viewMask = multiview_info->pViewMasks[i];
- }
-
- subpasses[i].pInputAttachments = reference_ptr;
- translate_references(&reference_ptr,
- subpasses[i].inputAttachmentCount,
- pCreateInfo->pSubpasses[i].pInputAttachments,
- pCreateInfo, true);
- subpasses[i].pColorAttachments = reference_ptr;
- translate_references(&reference_ptr,
- subpasses[i].colorAttachmentCount,
- pCreateInfo->pSubpasses[i].pColorAttachments,
- pCreateInfo, false);
- subpasses[i].pResolveAttachments = NULL;
- if (pCreateInfo->pSubpasses[i].pResolveAttachments) {
- subpasses[i].pResolveAttachments = reference_ptr;
- translate_references(&reference_ptr,
- subpasses[i].colorAttachmentCount,
- pCreateInfo->pSubpasses[i].pResolveAttachments,
- pCreateInfo, false);
- }
- subpasses[i].pDepthStencilAttachment = NULL;
- if (pCreateInfo->pSubpasses[i].pDepthStencilAttachment) {
- subpasses[i].pDepthStencilAttachment = reference_ptr;
- translate_references(&reference_ptr, 1,
- pCreateInfo->pSubpasses[i].pDepthStencilAttachment,
- pCreateInfo, false);
- }
- }
-
- assert(reference_ptr == references + reference_count);
-
- if (aspect_info != NULL) {
- for (uint32_t i = 0; i < aspect_info->aspectReferenceCount; i++) {
- const VkInputAttachmentAspectReference *ref =
- &aspect_info->pAspectReferences[i];
-
- assert(ref->subpass < pCreateInfo->subpassCount);
- VkSubpassDescription2 *subpass = &subpasses[ref->subpass];
-
- assert(ref->inputAttachmentIndex < subpass->inputAttachmentCount);
- VkAttachmentReference2 *att = (VkAttachmentReference2 *)
- &subpass->pInputAttachments[ref->inputAttachmentIndex];
-
- att->aspectMask = ref->aspectMask;
- }
- }
-
- for (uint32_t i = 0; i < pCreateInfo->dependencyCount; i++) {
- dependencies[i] = (VkSubpassDependency2) {
- .sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
- .pNext = NULL,
- .srcSubpass = pCreateInfo->pDependencies[i].srcSubpass,
- .dstSubpass = pCreateInfo->pDependencies[i].dstSubpass,
- .srcStageMask = pCreateInfo->pDependencies[i].srcStageMask,
- .dstStageMask = pCreateInfo->pDependencies[i].dstStageMask,
- .srcAccessMask = pCreateInfo->pDependencies[i].srcAccessMask,
- .dstAccessMask = pCreateInfo->pDependencies[i].dstAccessMask,
- .dependencyFlags = pCreateInfo->pDependencies[i].dependencyFlags,
- .viewOffset = 0,
- };
-
- if (multiview_info && multiview_info->dependencyCount) {
- assert(multiview_info->dependencyCount == pCreateInfo->dependencyCount);
- dependencies[i].viewOffset = multiview_info->pViewOffsets[i];
- }
- }
-
- *create_info = (VkRenderPassCreateInfo2) {
- .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2,
- .pNext = pCreateInfo->pNext,
- .flags = pCreateInfo->flags,
- .attachmentCount = pCreateInfo->attachmentCount,
- .pAttachments = attachments,
- .subpassCount = pCreateInfo->subpassCount,
- .pSubpasses = subpasses,
- .dependencyCount = pCreateInfo->dependencyCount,
- .pDependencies = dependencies,
- };
-
- if (multiview_info && multiview_info->correlationMaskCount > 0) {
- create_info->correlatedViewMaskCount = multiview_info->correlationMaskCount;
- create_info->pCorrelatedViewMasks = multiview_info->pCorrelationMasks;
- }
-
- VkResult result =
- device->dispatch_table.CreateRenderPass2(_device, create_info,
- pAllocator, pRenderPass);
-
- vk_free2(&device->alloc, pAllocator, create_info);
-
- return result;
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents)
-{
- /* We don't have a vk_command_buffer object but we can assume, since we're
- * using common dispatch, that it's a vk_object of some sort.
- */
- struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
-
- VkSubpassBeginInfo info = {
- .sType = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO,
- .contents = contents,
- };
-
- disp->device->dispatch_table.CmdBeginRenderPass2(commandBuffer,
- pRenderPassBegin, &info);
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_CmdEndRenderPass(VkCommandBuffer commandBuffer)
-{
- /* We don't have a vk_command_buffer object but we can assume, since we're
- * using common dispatch, that it's a vk_object of some sort.
- */
- struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
-
- VkSubpassEndInfo info = {
- .sType = VK_STRUCTURE_TYPE_SUBPASS_END_INFO,
- };
-
- disp->device->dispatch_table.CmdEndRenderPass2(commandBuffer, &info);
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_CmdNextSubpass(VkCommandBuffer commandBuffer,
- VkSubpassContents contents)
-{
- /* We don't have a vk_command_buffer object but we can assume, since we're
- * using common dispatch, that it's a vk_object of some sort.
- */
- struct vk_object_base *disp = (struct vk_object_base *)commandBuffer;
-
- VkSubpassBeginInfo begin_info = {
- .sType = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO,
- .contents = contents,
- };
-
- VkSubpassEndInfo end_info = {
- .sType = VK_STRUCTURE_TYPE_SUBPASS_END_INFO,
- };
-
- disp->device->dispatch_table.CmdNextSubpass2(commandBuffer, &begin_info,
- &end_info);
-}
diff --git a/src/vulkan/util/vk_shader_module.c b/src/vulkan/util/vk_shader_module.c
deleted file mode 100644
index fd3671744fb..00000000000
--- a/src/vulkan/util/vk_shader_module.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "vk_shader_module.h"
-#include "util/mesa-sha1.h"
-#include "vk_common_entrypoints.h"
-#include "vk_device.h"
-
-VKAPI_ATTR VkResult VKAPI_CALL
-vk_common_CreateShaderModule(VkDevice _device,
- const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkShaderModule *pShaderModule)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
- struct vk_shader_module *module;
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
- assert(pCreateInfo->flags == 0);
-
- module = vk_object_alloc(device, pAllocator,
- sizeof(*module) + pCreateInfo->codeSize,
- VK_OBJECT_TYPE_SHADER_MODULE);
- if (module == NULL)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- module->size = pCreateInfo->codeSize;
- module->nir = NULL;
- memcpy(module->data, pCreateInfo->pCode, module->size);
-
- _mesa_sha1_compute(module->data, module->size, module->sha1);
-
- *pShaderModule = vk_shader_module_to_handle(module);
-
- return VK_SUCCESS;
-}
-
-VKAPI_ATTR void VKAPI_CALL
-vk_common_DestroyShaderModule(VkDevice _device,
- VkShaderModule _module,
- const VkAllocationCallbacks *pAllocator)
-{
- VK_FROM_HANDLE(vk_device, device, _device);
- VK_FROM_HANDLE(vk_shader_module, module, _module);
-
- if (!module)
- return;
-
- /* NIR modules (which are only created internally by the driver) are not
- * dynamically allocated so we should never call this for them.
- * Instead the driver is responsible for freeing the NIR code when it is
- * no longer needed.
- */
- assert(module->nir == NULL);
-
- vk_object_free(device, pAllocator, module);
-}
diff --git a/src/vulkan/util/vk_struct_type_cast_gen.py b/src/vulkan/util/vk_struct_type_cast_gen.py
new file mode 100644
index 00000000000..eb77372ecf8
--- /dev/null
+++ b/src/vulkan/util/vk_struct_type_cast_gen.py
@@ -0,0 +1,116 @@
+# Copyright © 2023 Igalia S.L.
+# SPDX-License-Identifier: MIT
+
+"""Create shortcuts for casting Vulkan structs when knowing their stype."""
+
+import argparse
+import functools
+import os
+import re
+import textwrap
+import xml.etree.ElementTree as et
+
+from mako.template import Template
+from vk_extensions import Extension, filter_api, get_all_required
+
+COPYRIGHT = textwrap.dedent(u"""\
+ * Copyright © 2023 Igalia S.L.
+ * SPDX-License-Identifier: MIT
+ """)
+
+H_TEMPLATE = Template(textwrap.dedent(u"""\
+ /* Autogenerated file -- do not edit
+ * generated by ${file}
+ *
+ ${copyright}
+ */
+
+ #ifndef MESA_VK_STRUCT_CASTS_H
+ #define MESA_VK_STRUCT_CASTS_H
+
+ #include <vulkan/vulkan.h>
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+
+ % for s in structs:
+ #define ${s.stype}_cast ${s.name}
+ % endfor
+
+ #ifdef __cplusplus
+ } /* extern "C" */
+ #endif
+
+ #endif"""))
+
+
+class VkStruct(object):
+ """Simple struct-like class representing a single Vulkan struct identified with a VkStructureType"""
+ def __init__(self, name, stype):
+ self.name = name
+ self.stype = stype
+
+
+def struct_get_stype(xml_node):
+ for member in xml_node.findall('./member'):
+ name = member.findall('./name')
+ if len(name) > 0 and name[0].text == "sType":
+ return member.get('values')
+ return None
+
+
+def parse_xml(filename, structs, beta):
+ xml = et.parse(filename)
+ api = 'vulkan'
+
+ required_types = get_all_required(xml, 'type', api, beta)
+
+ for struct_type in xml.findall('./types/type[@category="struct"]'):
+ if not filter_api(struct_type, api):
+ continue
+
+ name = struct_type.attrib['name']
+ if name not in required_types:
+ continue
+
+ stype = struct_get_stype(struct_type)
+ if stype is not None:
+ structs.append(VkStruct(name, stype))
+
+ for struct_type in xml.findall('.//enum[@alias][@extends=\'VkStructureType\']'):
+ name = struct_type.attrib['name']
+ alias = struct_type.attrib['alias']
+ structs.append(VkStruct(alias + "_cast", name))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--xml', required=True,
+ help='Vulkan API XML files',
+ action='append',
+ dest='xml_files')
+ parser.add_argument('--outdir',
+ help='Directory to put the generated files in',
+ required=True)
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
+
+ args = parser.parse_args()
+
+ structs = []
+
+ for filename in args.xml_files:
+ parse_xml(filename, structs, args.beta)
+
+ structs = sorted(structs, key=lambda s: s.name)
+
+ for template, file_ in [(H_TEMPLATE, os.path.join(args.outdir, 'vk_struct_type_cast.h'))]:
+ with open(file_, 'w', encoding='utf-8') as f:
+ f.write(template.render(
+ file=os.path.basename(__file__),
+ structs=structs,
+ copyright=COPYRIGHT))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/vulkan/util/vk_synchronization_helpers_gen.py b/src/vulkan/util/vk_synchronization_helpers_gen.py
new file mode 100644
index 00000000000..bc256752ba2
--- /dev/null
+++ b/src/vulkan/util/vk_synchronization_helpers_gen.py
@@ -0,0 +1,225 @@
+COPYRIGHT=u"""
+/* Copyright © 2023 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+"""
+
+import argparse
+import os
+import textwrap
+import xml.etree.ElementTree as et
+
+from mako.template import Template
+from vk_extensions import get_api_list
+
+TEMPLATE_C = Template(COPYRIGHT + """\
+#include "vk_synchronization.h"
+
+VkPipelineStageFlags2
+vk_expand_pipeline_stage_flags2(VkPipelineStageFlags2 stages)
+{
+% for (group_stage, stages) in group_stages.items():
+ if (stages & ${group_stage})
+ stages |= ${' |\\n '.join(stages)};
+
+% endfor
+ if (stages & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT) {
+% for (guard, stage) in all_commands_stages:
+% if guard is not None:
+#ifdef ${guard}
+% endif
+ stages |= ${stage};
+% if guard is not None:
+#endif
+% endif
+% endfor
+ }
+
+ return stages;
+}
+
+VkAccessFlags2
+vk_read_access2_for_pipeline_stage_flags2(VkPipelineStageFlags2 stages)
+{
+ VkAccessFlags2 access = 0;
+
+% for ((guard, stages), access) in stages_read_access.items():
+% if guard is not None:
+#ifdef ${guard}
+% endif
+ if (stages & (${' |\\n '.join(stages)}))
+ access |= ${' |\\n '.join(access)};
+% if guard is not None:
+#endif
+% endif
+
+% endfor
+ return access;
+}
+
+VkAccessFlags2
+vk_write_access2_for_pipeline_stage_flags2(VkPipelineStageFlags2 stages)
+{
+ VkAccessFlags2 access = 0;
+
+% for ((guard, stages), access) in stages_write_access.items():
+% if guard is not None:
+#ifdef ${guard}
+% endif
+ if (stages & (${' |\\n '.join(stages)}))
+ access |= ${' |\\n '.join(access)};
+% if guard is not None:
+#endif
+% endif
+
+% endfor
+ return access;
+}
+""")
+
+def get_guards(xml, api):
+ guards = {}
+ for ext_elem in xml.findall('./extensions/extension'):
+ supported = get_api_list(ext_elem.attrib['supported'])
+ if api not in supported:
+ continue
+
+ for enum in ext_elem.findall('./require/enum[@extends]'):
+ if enum.attrib['extends'] not in ('VkPipelineStageFlagBits2',
+ 'VkAccessFlagBits2'):
+ continue
+
+ if 'protect' not in enum.attrib:
+ continue
+
+ name = enum.attrib['name']
+ guard = enum.attrib['protect']
+ guards[name] = guard
+
+ return guards
+
+def get_all_commands_stages(xml, guards):
+ stages = []
+ for stage in xml.findall('./sync/syncstage'):
+ stage_name = stage.attrib['name']
+
+ exclude = [
+ # This isn't a real stage
+ 'VK_PIPELINE_STAGE_2_NONE',
+
+ # These are real stages but they're a bit weird to include in
+ # ALL_COMMANDS because they're context-dependent, depending on
+ # whether they're part of srcStagesMask or dstStagesMask.
+ #
+ # We could avoid all grouped stages but then if someone adds
+ # another group later, the behavior of this function may change in
+ # a backwards-compatible way. Also, the other ones aren't really
+ # hurting anything if we add them in.
+ 'VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT',
+ 'VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT',
+
+ # This is all COMMANDS, not host.
+ 'VK_PIPELINE_STAGE_2_HOST_BIT',
+ ]
+ if stage_name in exclude:
+ continue
+
+ guard = guards.get(stage_name, None)
+ stages.append((guard, stage_name))
+
+ return stages
+
+def get_group_stages(xml):
+ group_stages = {}
+ for stage in xml.findall('./sync/syncstage'):
+ name = stage.attrib['name']
+ equiv = stage.find('./syncequivalent')
+ if equiv is not None:
+ stages = equiv.attrib['stage'].split(',')
+ group_stages[name] = stages
+
+ return group_stages
+
+def access_is_read(name):
+ if 'READ' in name:
+ assert 'WRITE' not in name
+ return True
+ elif 'WRITE' in name:
+ return False
+ else:
+ print(name)
+ assert False, "Invalid access bit name"
+
+def get_stages_access(xml, read, guards):
+ stages_access = {}
+ for access in xml.findall('./sync/syncaccess'):
+ access_name = access.attrib['name']
+ if access_name == 'VK_ACCESS_2_NONE':
+ continue
+
+ if access_is_read(access_name) != read:
+ continue
+
+ guard = guards.get(access_name, None)
+ support = access.find('./syncsupport')
+ if support is not None:
+ stages = support.attrib['stage'].split(',')
+ stages.sort()
+ key = (guard, tuple(stages))
+ if key in stages_access:
+ stages_access[key].append(access_name)
+ else:
+ stages_access[key] = [access_name]
+
+ return stages_access
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--beta', required=True, help='Enable beta extensions.')
+ parser.add_argument('--xml', required=True, help='Vulkan API XML file')
+ parser.add_argument('--out-c', required=True, help='Output C file.')
+ args = parser.parse_args()
+
+ xml = et.parse(args.xml);
+
+ guards = get_guards(xml, 'vulkan')
+ environment = {
+ 'all_commands_stages': get_all_commands_stages(xml, guards),
+ 'group_stages': get_group_stages(xml),
+ 'stages_read_access': get_stages_access(xml, True, guards),
+ 'stages_write_access': get_stages_access(xml, False, guards),
+ }
+
+ try:
+ with open(args.out_c, 'w', encoding='utf-8') as f:
+ f.write(TEMPLATE_C.render(**environment))
+ except Exception:
+ # In the event there's an error, this imports some helpers from mako
+ # to print a useful stack trace and prints it, then exits with
+ # status 1, if python is run with debug; otherwise it just raises
+ # the exception
+ import sys
+ from mako import exceptions
+ print(exceptions.text_error_template().render(), file=sys.stderr)
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/vulkan/util/vk_util.c b/src/vulkan/util/vk_util.c
index 68d444e1607..841212034ec 100644
--- a/src/vulkan/util/vk_util.c
+++ b/src/vulkan/util/vk_util.c
@@ -27,7 +27,7 @@
#include <stdlib.h>
#include <string.h>
#include "vk_util.h"
-#include "util/debug.h"
+#include "util/u_debug.h"
#include "compiler/spirv/nir_spirv.h"
@@ -63,7 +63,7 @@ uint32_t vk_get_version_override(void)
int major = atoi(str);
int minor = minor_str ? atoi(minor_str + 1) : 0;
- int patch = patch_str ? atoi(patch_str + 1) : 0;
+ int patch = patch_str ? atoi(patch_str + 1) : VK_HEADER_VERSION;
/* Do some basic version sanity checking */
if (major < 1 || minor < 0 || patch < 0 || minor > 1023 || patch > 4095)
@@ -75,7 +75,7 @@ uint32_t vk_get_version_override(void)
void
vk_warn_non_conformant_implementation(const char *driver_name)
{
- if (env_var_as_boolean("MESA_VK_IGNORE_CONFORMANCE_WARNING", false))
+ if (debug_get_bool_option("MESA_VK_IGNORE_CONFORMANCE_WARNING", false))
return;
fprintf(stderr, "WARNING: %s is not a conformant Vulkan implementation, "
diff --git a/src/vulkan/util/vk_util.h b/src/vulkan/util/vk_util.h
index 0e98b71cf68..d29db67a4d0 100644
--- a/src/vulkan/util/vk_util.h
+++ b/src/vulkan/util/vk_util.h
@@ -26,23 +26,96 @@
#include "util/bitscan.h"
#include "util/macros.h"
#include "compiler/shader_enums.h"
+#include <stdlib.h>
#include <string.h>
+#include "vk_struct_type_cast.h"
+
#ifdef __cplusplus
extern "C" {
#endif
/* common inlines and macros for vulkan drivers */
-#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+struct vk_pnext_iterator {
+ VkBaseOutStructure *pos;
+#ifndef NDEBUG
+ VkBaseOutStructure *half_pos;
+ unsigned idx;
+#endif
+ bool done;
+};
+
+static inline struct vk_pnext_iterator
+vk_pnext_iterator_init(void *start)
+{
+ struct vk_pnext_iterator iter;
+
+ iter.pos = (VkBaseOutStructure *)start;
+#ifndef NDEBUG
+ iter.half_pos = (VkBaseOutStructure *)start;
+ iter.idx = 0;
+#endif
+ iter.done = false;
+
+ return iter;
+}
+
+static inline struct vk_pnext_iterator
+vk_pnext_iterator_init_const(const void *start)
+{
+ return vk_pnext_iterator_init((void *)start);
+}
+
+static inline VkBaseOutStructure *
+vk_pnext_iterator_next(struct vk_pnext_iterator *iter)
+{
+ iter->pos = iter->pos->pNext;
+
+#ifndef NDEBUG
+ if (iter->idx++ & 1) {
+ /** This the "tortoise and the hare" algorithm. We increment
+ * chaser->pNext every other time *iter gets incremented. Because *iter
+ * is incrementing twice as fast as chaser->pNext, the distance between
+ * them in the list increases by one for each time we get here. If we
+ * have a loop, eventually, both iterators will be inside the loop and
+ * this distance will be an integer multiple of the loop length, at
+ * which point the two pointers will be equal.
+ */
+ iter->half_pos = iter->half_pos->pNext;
+ if (iter->half_pos == iter->pos)
+ assert(!"Vulkan input pNext chain has a loop!");
+ }
+#endif
+
+ return iter->pos;
+}
-#define vk_foreach_struct(__iter, __start) \
- for (struct VkBaseOutStructure *__iter = (struct VkBaseOutStructure *)(__start); \
- __iter; __iter = __iter->pNext)
+/* Because the outer loop only executes once, independently of what happens in
+ * the inner loop, breaks and continues should work exactly the same as if
+ * there were only one for loop.
+ */
+#define vk_foreach_struct(__e, __start) \
+ for (struct vk_pnext_iterator __iter = vk_pnext_iterator_init(__start); \
+ !__iter.done; __iter.done = true) \
+ for (VkBaseOutStructure *__e = __iter.pos; \
+ __e; __e = vk_pnext_iterator_next(&__iter))
+
+#define vk_foreach_struct_const(__e, __start) \
+ for (struct vk_pnext_iterator __iter = \
+ vk_pnext_iterator_init_const(__start); \
+ !__iter.done; __iter.done = true) \
+ for (const VkBaseInStructure *__e = (VkBaseInStructure *)__iter.pos; \
+ __e; __e = (VkBaseInStructure *)vk_pnext_iterator_next(&__iter))
-#define vk_foreach_struct_const(__iter, __start) \
- for (const struct VkBaseInStructure *__iter = (const struct VkBaseInStructure *)(__start); \
- __iter; __iter = __iter->pNext)
+static inline void
+vk_copy_struct_guts(VkBaseOutStructure *dst, VkBaseInStructure *src, size_t struct_size)
+{
+ STATIC_ASSERT(sizeof(*dst) == sizeof(*src));
+ memcpy(dst + 1, src + 1, struct_size - sizeof(VkBaseOutStructure));
+}
/**
* A wrapper for a Vulkan output array. A Vulkan output array is one that
@@ -57,15 +130,16 @@ extern "C" {
* uint32_t* pQueueFamilyPropertyCount,
* VkQueueFamilyProperties* pQueueFamilyProperties)
* {
- * VK_OUTARRAY_MAKE(props, pQueueFamilyProperties,
- * pQueueFamilyPropertyCount);
+ * VK_OUTARRAY_MAKE_TYPED(VkQueueFamilyProperties, props,
+ * pQueueFamilyProperties,
+ * pQueueFamilyPropertyCount);
*
- * vk_outarray_append(&props, p) {
+ * vk_outarray_append_typed(VkQueueFamilyProperties, &props, p) {
* p->queueFlags = ...;
* p->queueCount = ...;
* }
*
- * vk_outarray_append(&props, p) {
+ * vk_outarray_append_typed(VkQueueFamilyProperties, &props, p) {
* p->queueFlags = ...;
* p->queueCount = ...;
* }
@@ -150,8 +224,6 @@ __vk_outarray_next(struct __vk_outarray *a, size_t elem_size)
#define vk_outarray_init(a, data, len) \
__vk_outarray_init(&(a)->base, (data), (len))
-#define VK_OUTARRAY_MAKE(name, data, len) \
- VK_OUTARRAY_MAKE_TYPED(__typeof__((data)[0]), name, data, len)
#define VK_OUTARRAY_MAKE_TYPED(type, name, data, len) \
vk_outarray(type) name; \
vk_outarray_init(&name, (data), (len))
@@ -170,13 +242,13 @@ __vk_outarray_next(struct __vk_outarray *a, size_t elem_size)
*
* This is a block-based macro. For example:
*
- * vk_outarray_append(&a, elem) {
+ * vk_outarray_append_typed(T, &a, elem) {
* elem->foo = ...;
* elem->bar = ...;
* }
*
* The array `a` has type `vk_outarray(elem_t) *`. It is usually declared with
- * VK_OUTARRAY_MAKE(). The variable `elem` is block-scoped and has type
+ * VK_OUTARRAY_MAKE_TYPED(). The variable `elem` is block-scoped and has type
* `elem_t *`.
*
* The macro unconditionally increments the array's `wanted_len`. If the array
@@ -184,8 +256,6 @@ __vk_outarray_next(struct __vk_outarray *a, size_t elem_size)
* executes the block. When the block is executed, `elem` is non-null and
* points to the newly appended element.
*/
-#define vk_outarray_append(a, elem) \
- vk_outarray_append_typed(vk_outarray_typeof_elem(a), a, elem)
#define vk_outarray_append_typed(type, a, elem) \
for (type *elem = vk_outarray_next_typed(type, a); \
elem != NULL; elem = NULL)
@@ -201,11 +271,13 @@ __vk_find_struct(void *start, VkStructureType sType)
return NULL;
}
-#define vk_find_struct(__start, __sType) \
- __vk_find_struct((__start), VK_STRUCTURE_TYPE_##__sType)
+#define vk_find_struct(__start, __sType) \
+ (VK_STRUCTURE_TYPE_##__sType##_cast *)__vk_find_struct( \
+ (__start), VK_STRUCTURE_TYPE_##__sType)
-#define vk_find_struct_const(__start, __sType) \
- (const void *)__vk_find_struct((void *)(__start), VK_STRUCTURE_TYPE_##__sType)
+#define vk_find_struct_const(__start, __sType) \
+ (const VK_STRUCTURE_TYPE_##__sType##_cast *)__vk_find_struct( \
+ (void *)(__start), VK_STRUCTURE_TYPE_##__sType)
static inline void
__vk_append_struct(void *start, void *element)
@@ -260,14 +332,14 @@ mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
/* iterate over a sequence of indexed multidraws for VK_EXT_multi_draw extension */
/* 'i' must be explicitly declared */
#define vk_foreach_multi_draw_indexed(_draw, _i, _pDrawInfo, _num_draws, _stride) \
- for (const VkMultiDrawIndexedInfoEXT *_draw = (const void*)(_pDrawInfo); \
+ for (const VkMultiDrawIndexedInfoEXT *_draw = (const VkMultiDrawIndexedInfoEXT*)(_pDrawInfo); \
(_i) < (_num_draws); \
(_i)++, (_draw) = (const VkMultiDrawIndexedInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
/* iterate over a sequence of multidraws for VK_EXT_multi_draw extension */
/* 'i' must be explicitly declared */
#define vk_foreach_multi_draw(_draw, _i, _pDrawInfo, _num_draws, _stride) \
- for (const VkMultiDrawInfoEXT *_draw = (const void*)(_pDrawInfo); \
+ for (const VkMultiDrawInfoEXT *_draw = (const VkMultiDrawInfoEXT*)(_pDrawInfo); \
(_i) < (_num_draws); \
(_i)++, (_draw) = (const VkMultiDrawInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
@@ -280,13 +352,43 @@ vk_spec_info_to_nir_spirv(const VkSpecializationInfo *spec_info,
#define STACK_ARRAY_SIZE 8
+/* Sometimes gcc may claim -Wmaybe-uninitialized for the stack array in some
+ * places it can't verify that when size is 0 nobody down the call chain reads
+ * the array. Please don't try to fix it by zero-initializing the array here
+ * since it's used in a lot of different places. An "if (size == 0) return;"
+ * may work for you.
+ */
#define STACK_ARRAY(type, name, size) \
- type _stack_##name[STACK_ARRAY_SIZE], *const name = \
- (size) <= STACK_ARRAY_SIZE ? _stack_##name : malloc((size) * sizeof(type))
+ type _stack_##name[STACK_ARRAY_SIZE]; \
+ type *const name = \
+ ((size) <= STACK_ARRAY_SIZE ? _stack_##name : (type *)malloc((size) * sizeof(type)))
#define STACK_ARRAY_FINISH(name) \
if (name != _stack_##name) free(name)
+static inline uint8_t
+vk_index_type_to_bytes(enum VkIndexType type)
+{
+ switch (type) {
+ case VK_INDEX_TYPE_NONE_KHR: return 0;
+ case VK_INDEX_TYPE_UINT8_KHR: return 1;
+ case VK_INDEX_TYPE_UINT16: return 2;
+ case VK_INDEX_TYPE_UINT32: return 4;
+ default: unreachable("Invalid index type");
+ }
+}
+
+static inline uint32_t
+vk_index_to_restart(enum VkIndexType type)
+{
+ switch (type) {
+ case VK_INDEX_TYPE_UINT8_KHR: return 0xff;
+ case VK_INDEX_TYPE_UINT16: return 0xffff;
+ case VK_INDEX_TYPE_UINT32: return 0xffffffff;
+ default: unreachable("unexpected index type");
+ }
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/src/vulkan/vulkan-android.sym b/src/vulkan/vulkan-android.sym
new file mode 100644
index 00000000000..7148e0920c6
--- /dev/null
+++ b/src/vulkan/vulkan-android.sym
@@ -0,0 +1,12 @@
+{
+ global:
+ # Andoid looks for this global in HAL modules. In the source it occurs
+ # as HAL_MODULE_INFO_SYM (which is just a #define for HMI) and it's an
+ # instance of struct hwvulkan_module_t.
+ HMI;
+
+ local:
+ # When static linking LLVM, all its symbols are public API.
+ # That may cause symbol collision, so explicitly demote everything.
+ *;
+};
diff --git a/src/vulkan/vulkan-icd-android-symbols.txt b/src/vulkan/vulkan-icd-android-symbols.txt
new file mode 100644
index 00000000000..7e547aa5f8a
--- /dev/null
+++ b/src/vulkan/vulkan-icd-android-symbols.txt
@@ -0,0 +1,4 @@
+# Android looks for this global in HAL modules. In the source it occurs
+# as HAL_MODULE_INFO_SYM (which is just a #define for HMI) and it's an
+# instance of struct hwvulkan_module_t.
+HMI
diff --git a/src/vulkan/vulkan-icd-symbols.txt b/src/vulkan/vulkan-icd-symbols.txt
index b463ca83ae6..feca419c0ca 100644
--- a/src/vulkan/vulkan-icd-symbols.txt
+++ b/src/vulkan/vulkan-icd-symbols.txt
@@ -12,8 +12,3 @@ vk_icdNegotiateLoaderICDInterfaceVersion
# Version 4 - Add unknown physical device extension querying via
# vk_icdGetPhysicalDeviceProcAddr.
(optional) vk_icdGetPhysicalDeviceProcAddr
-
-# Andoid looks for this global in HAL modules. In the source it occurs
-# as HAL_MODULE_INFO_SYM (which is just a #define for HMI) and it's an
-# instance of struct hwvulkan_module_t.
-(optional) HMI
diff --git a/src/vulkan/vulkan.sym b/src/vulkan/vulkan.sym
new file mode 100644
index 00000000000..c85a22e905a
--- /dev/null
+++ b/src/vulkan/vulkan.sym
@@ -0,0 +1,11 @@
+{
+ global:
+ vk_icdGetInstanceProcAddr;
+ vk_icdGetPhysicalDeviceProcAddr;
+ vk_icdNegotiateLoaderICDInterfaceVersion;
+
+ local:
+ # When static linking LLVM, all its symbols are public API.
+ # That may cause symbol collision, so explicitly demote everything.
+ *;
+};
diff --git a/src/vulkan/vulkan_api.def.in b/src/vulkan/vulkan_api.def.in
new file mode 100644
index 00000000000..678e4d99278
--- /dev/null
+++ b/src/vulkan/vulkan_api.def.in
@@ -0,0 +1,4 @@
+; stdcall calling convention have @number suffix on 32 bits architecture for gcc
+vk_icdNegotiateLoaderICDInterfaceVersion@4
+vk_icdGetInstanceProcAddr@8
+vk_icdGetPhysicalDeviceProcAddr@8
diff --git a/src/vulkan/wsi/meson.build b/src/vulkan/wsi/meson.build
index 6900b0d971a..67c3691619b 100644
--- a/src/vulkan/wsi/meson.build
+++ b/src/vulkan/wsi/meson.build
@@ -19,6 +19,8 @@
# SOFTWARE.
files_vulkan_wsi = files('wsi_common.c')
+links_vulkan_wsi = []
+platform_deps = []
if dep_libdrm.found()
files_vulkan_wsi += files('wsi_common_drm.c')
@@ -30,26 +32,71 @@ endif
if with_platform_wayland
files_vulkan_wsi += files('wsi_common_wayland.c')
- files_vulkan_wsi += [
- linux_dmabuf_unstable_v1_client_protocol_h,
- linux_dmabuf_unstable_v1_protocol_c,
- ]
+ files_vulkan_wsi += wp_files['linux-dmabuf-unstable-v1']
+ files_vulkan_wsi += wp_files['presentation-time']
+ files_vulkan_wsi += wp_files['tearing-control-v1']
+ links_vulkan_wsi += libloader_wayland_helper
+ files_vulkan_wsi += wp_files['linux-drm-syncobj-v1']
endif
if with_platform_windows
- files_vulkan_wsi += files('wsi_common_win32.c')
+ files_vulkan_wsi += files('wsi_common_win32.cpp')
+ platform_deps += dep_dxheaders
+else
+ files_vulkan_wsi += files('wsi_common_headless.c')
endif
if system_has_kms_drm and not with_platform_android
files_vulkan_wsi += files('wsi_common_display.c')
endif
+wsi_entrypoints = custom_target(
+ 'wsi_entrypoints',
+ input : [vk_entrypoints_gen, vk_api_xml],
+ output : ['wsi_common_entrypoints.h', 'wsi_common_entrypoints.c'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--proto', '--weak',
+ '--out-h', '@OUTPUT0@', '--out-c', '@OUTPUT1@', '--prefix', 'wsi',
+ '--beta', with_vulkan_beta.to_string()
+ ],
+ depend_files : vk_entrypoints_gen_depend_files,
+)
+
libvulkan_wsi = static_library(
'vulkan_wsi',
- files_vulkan_wsi,
+ [files_vulkan_wsi, wsi_entrypoints],
include_directories : [inc_include, inc_src],
- dependencies : [vulkan_wsi_deps, dep_libdrm, idep_vulkan_util, idep_xmlconfig],
- c_args : [vulkan_wsi_args],
+ dependencies : [
+ vulkan_wsi_deps, dep_libdrm, dep_libudev, idep_vulkan_util_headers,
+ idep_vulkan_runtime_headers, idep_xmlconfig, idep_mesautil, platform_deps,
+ idep_blake3
+ ],
+ link_with: links_vulkan_wsi,
gnu_symbol_visibility : 'hidden',
build_by_default : false,
)
+
+idep_vulkan_wsi_headers = declare_dependency(
+ sources : wsi_entrypoints[0],
+ dependencies : idep_vulkan_wsi_defines,
+ include_directories : include_directories('.')
+)
+
+# This is likely a bug in the Meson VS backend, as MSVC with ninja works fine.
+# See this discussion here:
+# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10506
+if get_option('backend').startswith('vs')
+ idep_vulkan_wsi = declare_dependency(
+ link_with : libvulkan_wsi,
+ dependencies : idep_vulkan_wsi_headers
+ )
+else
+ idep_vulkan_wsi = declare_dependency(
+ # Instruct users of this library to link with --whole-archive. Otherwise,
+ # our weak function overloads may not resolve properly.
+ link_whole : libvulkan_wsi,
+ dependencies : [
+ idep_vulkan_wsi_headers, dep_libudev
+ ]
+ )
+endif
diff --git a/src/vulkan/wsi/wsi_common.c b/src/vulkan/wsi/wsi_common.c
index 292bb976da8..2b7d8717e19 100644
--- a/src/vulkan/wsi/wsi_common.c
+++ b/src/vulkan/wsi/wsi_common.c
@@ -22,16 +22,42 @@
*/
#include "wsi_common_private.h"
+#include "wsi_common_entrypoints.h"
+#include "util/u_debug.h"
#include "util/macros.h"
#include "util/os_file.h"
#include "util/os_time.h"
#include "util/xmlconfig.h"
+#include "vk_device.h"
+#include "vk_fence.h"
+#include "vk_format.h"
+#include "vk_instance.h"
+#include "vk_physical_device.h"
+#include "vk_queue.h"
+#include "vk_semaphore.h"
+#include "vk_sync.h"
+#include "vk_sync_dummy.h"
#include "vk_util.h"
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+uint64_t WSI_DEBUG;
+
+static const struct debug_control debug_control[] = {
+ { "buffer", WSI_DEBUG_BUFFER },
+ { "sw", WSI_DEBUG_SW },
+ { "noshm", WSI_DEBUG_NOSHM },
+ { "linear", WSI_DEBUG_LINEAR },
+ { "dxgi", WSI_DEBUG_DXGI },
+ { NULL, },
+};
+
VkResult
wsi_device_init(struct wsi_device *wsi,
VkPhysicalDevice pdevice,
@@ -39,25 +65,36 @@ wsi_device_init(struct wsi_device *wsi,
const VkAllocationCallbacks *alloc,
int display_fd,
const struct driOptionCache *dri_options,
- bool sw_device)
+ const struct wsi_device_options *device_options)
{
const char *present_mode;
UNUSED VkResult result;
+ WSI_DEBUG = parse_debug_string(getenv("MESA_VK_WSI_DEBUG"), debug_control);
+
+ util_cpu_trace_init();
+
memset(wsi, 0, sizeof(*wsi));
wsi->instance_alloc = *alloc;
wsi->pdevice = pdevice;
- wsi->sw = sw_device;
+ wsi->supports_scanout = true;
+ wsi->sw = device_options->sw_device || (WSI_DEBUG & WSI_DEBUG_SW);
+ wsi->wants_linear = (WSI_DEBUG & WSI_DEBUG_LINEAR) != 0;
+ wsi->x11.extra_xwayland_image = device_options->extra_xwayland_image;
#define WSI_GET_CB(func) \
PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
+ WSI_GET_CB(GetPhysicalDeviceExternalSemaphoreProperties);
WSI_GET_CB(GetPhysicalDeviceProperties2);
WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
#undef WSI_GET_CB
+ wsi->drm_info.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT;
wsi->pci_bus_info.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
+ wsi->pci_bus_info.pNext = &wsi->drm_info;
VkPhysicalDeviceProperties2 pdp2 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &wsi->pci_bus_info,
@@ -65,11 +102,67 @@ wsi_device_init(struct wsi_device *wsi,
GetPhysicalDeviceProperties2(pdevice, &pdp2);
wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
+ assert(pdp2.properties.limits.optimalBufferCopyRowPitchAlignment <= UINT32_MAX);
+ wsi->optimalBufferCopyRowPitchAlignment =
+ pdp2.properties.limits.optimalBufferCopyRowPitchAlignment;
wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
+ assert(wsi->queue_family_count <= 64);
+ VkQueueFamilyProperties queue_properties[64];
+ GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, queue_properties);
+
+ for (unsigned i = 0; i < wsi->queue_family_count; i++) {
+ VkFlags req_flags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
+ if (queue_properties[i].queueFlags & req_flags)
+ wsi->queue_supports_blit |= BITFIELD64_BIT(i);
+ }
+
+ for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
+ handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+ handle_type <<= 1) {
+ VkPhysicalDeviceExternalSemaphoreInfo esi = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
+ .handleType = handle_type,
+ };
+ VkExternalSemaphoreProperties esp = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
+ };
+ GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
+
+ if (esp.externalSemaphoreFeatures &
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
+ wsi->semaphore_export_handle_types |= handle_type;
+
+ VkSemaphoreTypeCreateInfo timeline_tci = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
+ .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
+ };
+ esi.pNext = &timeline_tci;
+ GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
+
+ if (esp.externalSemaphoreFeatures &
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
+ wsi->timeline_semaphore_export_handle_types |= handle_type;
+ }
+
+ const struct vk_device_extension_table *supported_extensions =
+ &vk_physical_device_from_handle(pdevice)->supported_extensions;
+ wsi->has_import_memory_host =
+ supported_extensions->EXT_external_memory_host;
+ wsi->khr_present_wait =
+ supported_extensions->KHR_present_id &&
+ supported_extensions->KHR_present_wait;
+ wsi->has_timeline_semaphore =
+ supported_extensions->KHR_timeline_semaphore;
+
+ /* We cannot expose KHR_present_wait without timeline semaphores. */
+ assert(!wsi->khr_present_wait || supported_extensions->KHR_timeline_semaphore);
+
+ list_inithead(&wsi->hotplug_fences);
+
#define WSI_GET_CB(func) \
wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
WSI_GET_CB(AllocateMemory);
@@ -77,32 +170,40 @@ wsi_device_init(struct wsi_device *wsi,
WSI_GET_CB(BindBufferMemory);
WSI_GET_CB(BindImageMemory);
WSI_GET_CB(BeginCommandBuffer);
+ WSI_GET_CB(CmdPipelineBarrier);
+ WSI_GET_CB(CmdCopyImage);
WSI_GET_CB(CmdCopyImageToBuffer);
WSI_GET_CB(CreateBuffer);
WSI_GET_CB(CreateCommandPool);
WSI_GET_CB(CreateFence);
WSI_GET_CB(CreateImage);
+ WSI_GET_CB(CreateSemaphore);
WSI_GET_CB(DestroyBuffer);
WSI_GET_CB(DestroyCommandPool);
WSI_GET_CB(DestroyFence);
WSI_GET_CB(DestroyImage);
+ WSI_GET_CB(DestroySemaphore);
WSI_GET_CB(EndCommandBuffer);
WSI_GET_CB(FreeMemory);
WSI_GET_CB(FreeCommandBuffers);
WSI_GET_CB(GetBufferMemoryRequirements);
+ WSI_GET_CB(GetFenceStatus);
WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
WSI_GET_CB(GetImageMemoryRequirements);
WSI_GET_CB(GetImageSubresourceLayout);
if (!wsi->sw)
WSI_GET_CB(GetMemoryFdKHR);
WSI_GET_CB(GetPhysicalDeviceFormatProperties);
- WSI_GET_CB(GetPhysicalDeviceFormatProperties2KHR);
+ WSI_GET_CB(GetPhysicalDeviceFormatProperties2);
WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
+ WSI_GET_CB(GetSemaphoreFdKHR);
WSI_GET_CB(ResetFences);
WSI_GET_CB(QueueSubmit);
WSI_GET_CB(WaitForFences);
WSI_GET_CB(MapMemory);
WSI_GET_CB(UnmapMemory);
+ if (wsi->khr_present_wait)
+ WSI_GET_CB(WaitSemaphores);
#undef WSI_GET_CB
#ifdef VK_USE_PLATFORM_XCB_KHR
@@ -129,6 +230,12 @@ wsi_device_init(struct wsi_device *wsi,
goto fail;
#endif
+#ifndef VK_USE_PLATFORM_WIN32_KHR
+ result = wsi_headless_init_wsi(wsi, alloc, pdevice);
+ if (result != VK_SUCCESS)
+ goto fail;
+#endif
+
present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
if (present_mode) {
if (!strcmp(present_mode, "fifo")) {
@@ -144,6 +251,9 @@ wsi_device_init(struct wsi_device *wsi,
}
}
+ wsi->force_headless_swapchain =
+ debug_get_bool_option("MESA_VK_WSI_HEADLESS_SWAPCHAIN", false);
+
if (dri_options) {
if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
@@ -153,23 +263,26 @@ wsi_device_init(struct wsi_device *wsi,
wsi->force_bgra8_unorm_first =
driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
}
+
+ if (driCheckOption(dri_options, "vk_wsi_force_swapchain_to_current_extent", DRI_BOOL)) {
+ wsi->force_swapchain_to_currentExtent =
+ driQueryOptionb(dri_options, "vk_wsi_force_swapchain_to_current_extent");
+ }
}
return VK_SUCCESS;
-#if defined(VK_USE_PLATFORM_XCB_KHR) || \
- defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
- defined(VK_USE_PLATFORM_WIN32_KHR) || \
- defined(VK_USE_PLATFORM_DISPLAY_KHR)
fail:
wsi_device_finish(wsi, alloc);
return result;
-#endif
}
void
wsi_device_finish(struct wsi_device *wsi,
const VkAllocationCallbacks *alloc)
{
+#ifndef VK_USE_PLATFORM_WIN32_KHR
+ wsi_headless_finish_wsi(wsi, alloc);
+#endif
#ifdef VK_USE_PLATFORM_DISPLAY_KHR
wsi_display_finish_wsi(wsi, alloc);
#endif
@@ -184,43 +297,195 @@ wsi_device_finish(struct wsi_device *wsi,
#endif
}
+VKAPI_ATTR void VKAPI_CALL
+wsi_DestroySurfaceKHR(VkInstance _instance,
+ VkSurfaceKHR _surface,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+
+ if (!surface)
+ return;
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ if (surface->platform == VK_ICD_WSI_PLATFORM_WAYLAND) {
+ wsi_wl_surface_destroy(surface, _instance, pAllocator);
+ return;
+ }
+#endif
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ if (surface->platform == VK_ICD_WSI_PLATFORM_WIN32) {
+ wsi_win32_surface_destroy(surface, _instance, pAllocator);
+ return;
+ }
+#endif
+
+ vk_free2(&instance->alloc, pAllocator, surface);
+}
+
+void
+wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
+ int fd)
+{
+#ifdef VK_USE_PLATFORM_DISPLAY_KHR
+ wsi_display_setup_syncobj_fd(wsi_device, fd);
+#endif
+}
+
+static enum wsi_swapchain_blit_type
+get_blit_type(const struct wsi_device *wsi,
+ const struct wsi_base_image_params *params,
+ VkDevice device)
+{
+ switch (params->image_type) {
+ case WSI_IMAGE_TYPE_CPU: {
+ const struct wsi_cpu_image_params *cpu_params =
+ container_of(params, const struct wsi_cpu_image_params, base);
+ return wsi_cpu_image_needs_buffer_blit(wsi, cpu_params) ?
+ WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
+ }
+#ifdef HAVE_LIBDRM
+ case WSI_IMAGE_TYPE_DRM: {
+ const struct wsi_drm_image_params *drm_params =
+ container_of(params, const struct wsi_drm_image_params, base);
+ return wsi_drm_image_needs_buffer_blit(wsi, drm_params) ?
+ WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
+ }
+#endif
+#ifdef _WIN32
+ case WSI_IMAGE_TYPE_DXGI: {
+ const struct wsi_dxgi_image_params *dxgi_params =
+ container_of(params, const struct wsi_dxgi_image_params, base);
+ return wsi_dxgi_image_needs_blit(wsi, dxgi_params, device);
+ }
+#endif
+ default:
+ unreachable("Invalid image type");
+ }
+}
+
+static VkResult
+configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_base_image_params *params,
+ struct wsi_image_info *info)
+{
+ info->image_type = params->image_type;
+ switch (params->image_type) {
+ case WSI_IMAGE_TYPE_CPU: {
+ const struct wsi_cpu_image_params *cpu_params =
+ container_of(params, const struct wsi_cpu_image_params, base);
+ return wsi_configure_cpu_image(chain, pCreateInfo, cpu_params, info);
+ }
+#ifdef HAVE_LIBDRM
+ case WSI_IMAGE_TYPE_DRM: {
+ const struct wsi_drm_image_params *drm_params =
+ container_of(params, const struct wsi_drm_image_params, base);
+ return wsi_drm_configure_image(chain, pCreateInfo, drm_params, info);
+ }
+#endif
+#ifdef _WIN32
+ case WSI_IMAGE_TYPE_DXGI: {
+ const struct wsi_dxgi_image_params *dxgi_params =
+ container_of(params, const struct wsi_dxgi_image_params, base);
+ return wsi_dxgi_configure_image(chain, pCreateInfo, dxgi_params, info);
+ }
+#endif
+ default:
+ unreachable("Invalid image type");
+ }
+}
+
+#if defined(HAVE_PTHREAD) && !defined(_WIN32)
+bool
+wsi_init_pthread_cond_monotonic(pthread_cond_t *cond)
+{
+ pthread_condattr_t condattr;
+ bool ret = false;
+
+ if (pthread_condattr_init(&condattr) != 0)
+ goto fail_attr_init;
+
+ if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0)
+ goto fail_attr_set;
+
+ if (pthread_cond_init(cond, &condattr) != 0)
+ goto fail_cond_init;
+
+ ret = true;
+
+fail_cond_init:
+fail_attr_set:
+ pthread_condattr_destroy(&condattr);
+fail_attr_init:
+ return ret;
+}
+#endif
+
VkResult
wsi_swapchain_init(const struct wsi_device *wsi,
struct wsi_swapchain *chain,
- VkDevice device,
+ VkDevice _device,
const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_base_image_params *image_params,
const VkAllocationCallbacks *pAllocator)
{
+ VK_FROM_HANDLE(vk_device, device, _device);
VkResult result;
memset(chain, 0, sizeof(*chain));
- vk_object_base_init(NULL, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
+ vk_object_base_init(device, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
chain->wsi = wsi;
- chain->device = device;
+ chain->device = _device;
chain->alloc = *pAllocator;
- chain->use_prime_blit = false;
+ chain->blit.type = get_blit_type(wsi, image_params, _device);
+
+ chain->blit.queue = VK_NULL_HANDLE;
+ if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT && wsi->get_blit_queue)
+ chain->blit.queue = wsi->get_blit_queue(_device);
+
+ int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
chain->cmd_pools =
- vk_zalloc(pAllocator, sizeof(VkCommandPool) * wsi->queue_family_count, 8,
+ vk_zalloc(pAllocator, sizeof(VkCommandPool) * cmd_pools_count, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!chain->cmd_pools)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
+ for (uint32_t i = 0; i < cmd_pools_count; i++) {
+ int queue_family_index = i;
+
+ if (chain->blit.queue != VK_NULL_HANDLE) {
+ VK_FROM_HANDLE(vk_queue, queue, chain->blit.queue);
+ queue_family_index = queue->queue_family_index;
+ } else {
+ /* Queues returned by get_blit_queue() might not be listed in
+ * GetPhysicalDeviceQueueFamilyProperties, so this check is skipped for those queues.
+ */
+ if (!(wsi->queue_supports_blit & BITFIELD64_BIT(queue_family_index)))
+ continue;
+ }
+
const VkCommandPoolCreateInfo cmd_pool_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = NULL,
.flags = 0,
- .queueFamilyIndex = i,
+ .queueFamilyIndex = queue_family_index,
};
- result = wsi->CreateCommandPool(device, &cmd_pool_info, &chain->alloc,
+ result = wsi->CreateCommandPool(_device, &cmd_pool_info, &chain->alloc,
&chain->cmd_pools[i]);
if (result != VK_SUCCESS)
goto fail;
}
+ result = configure_image(chain, pCreateInfo, image_params,
+ &chain->image_info);
+ if (result != VK_SUCCESS)
+ goto fail;
+
return VK_SUCCESS;
fail:
@@ -240,7 +505,7 @@ wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
bool supported = false;
VkResult result;
- result = iface->get_present_modes(surface, &present_mode_count, NULL);
+ result = iface->get_present_modes(surface, wsi, &present_mode_count, NULL);
if (result != VK_SUCCESS)
return supported;
@@ -248,7 +513,7 @@ wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
if (!present_modes)
return supported;
- result = iface->get_present_modes(surface, &present_mode_count,
+ result = iface->get_present_modes(surface, wsi, &present_mode_count,
present_modes);
if (result != VK_SUCCESS)
goto fail;
@@ -284,14 +549,30 @@ wsi_swapchain_get_present_mode(struct wsi_device *wsi,
void
wsi_swapchain_finish(struct wsi_swapchain *chain)
{
+ wsi_destroy_image_info(chain, &chain->image_info);
+
if (chain->fences) {
for (unsigned i = 0; i < chain->image_count; i++)
chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
vk_free(&chain->alloc, chain->fences);
}
+ if (chain->blit.semaphores) {
+ for (unsigned i = 0; i < chain->image_count; i++)
+ chain->wsi->DestroySemaphore(chain->device, chain->blit.semaphores[i], &chain->alloc);
- for (uint32_t i = 0; i < chain->wsi->queue_family_count; i++) {
+ vk_free(&chain->alloc, chain->blit.semaphores);
+ }
+ chain->wsi->DestroySemaphore(chain->device, chain->dma_buf_semaphore,
+ &chain->alloc);
+ chain->wsi->DestroySemaphore(chain->device, chain->present_id_timeline,
+ &chain->alloc);
+
+ int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ?
+ 1 : chain->wsi->queue_family_count;
+ for (uint32_t i = 0; i < cmd_pools_count; i++) {
+ if (!chain->cmd_pools[i])
+ continue;
chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
&chain->alloc);
}
@@ -300,45 +581,256 @@ wsi_swapchain_finish(struct wsi_swapchain *chain)
vk_object_base_finish(&chain->base);
}
+VkResult
+wsi_configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ VkExternalMemoryHandleTypeFlags handle_types,
+ struct wsi_image_info *info)
+{
+ memset(info, 0, sizeof(*info));
+ uint32_t queue_family_count = 1;
+
+ if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
+ queue_family_count = pCreateInfo->queueFamilyIndexCount;
+
+ /*
+ * TODO: there should be no reason to allocate this, but
+ * 15331 shows that games crashed without doing this.
+ */
+ uint32_t *queue_family_indices =
+ vk_alloc(&chain->alloc,
+ sizeof(*queue_family_indices) *
+ queue_family_count,
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!queue_family_indices)
+ goto err_oom;
+
+ if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
+ for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; i++)
+ queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
+
+ info->create = (VkImageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .flags = VK_IMAGE_CREATE_ALIAS_BIT,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .format = pCreateInfo->imageFormat,
+ .extent = {
+ .width = pCreateInfo->imageExtent.width,
+ .height = pCreateInfo->imageExtent.height,
+ .depth = 1,
+ },
+ .mipLevels = 1,
+ .arrayLayers = 1,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
+ .usage = pCreateInfo->imageUsage,
+ .sharingMode = pCreateInfo->imageSharingMode,
+ .queueFamilyIndexCount = queue_family_count,
+ .pQueueFamilyIndices = queue_family_indices,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ };
+
+ if (handle_types != 0) {
+ info->ext_mem = (VkExternalMemoryImageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
+ .handleTypes = handle_types,
+ };
+ __vk_append_struct(&info->create, &info->ext_mem);
+ }
+
+ info->wsi = (struct wsi_image_create_info) {
+ .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
+ };
+ __vk_append_struct(&info->create, &info->wsi);
+
+ if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
+ info->create.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
+ VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
+
+ const VkImageFormatListCreateInfo *format_list_in =
+ vk_find_struct_const(pCreateInfo->pNext,
+ IMAGE_FORMAT_LIST_CREATE_INFO);
+
+ assume(format_list_in && format_list_in->viewFormatCount > 0);
+
+ const uint32_t view_format_count = format_list_in->viewFormatCount;
+ VkFormat *view_formats =
+ vk_alloc(&chain->alloc, sizeof(VkFormat) * view_format_count,
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!view_formats)
+ goto err_oom;
+
+ ASSERTED bool format_found = false;
+ for (uint32_t i = 0; i < format_list_in->viewFormatCount; i++) {
+ if (pCreateInfo->imageFormat == format_list_in->pViewFormats[i])
+ format_found = true;
+ view_formats[i] = format_list_in->pViewFormats[i];
+ }
+ assert(format_found);
+
+ info->format_list = (VkImageFormatListCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,
+ .viewFormatCount = view_format_count,
+ .pViewFormats = view_formats,
+ };
+ __vk_append_struct(&info->create, &info->format_list);
+ }
+
+ return VK_SUCCESS;
+
+err_oom:
+ wsi_destroy_image_info(chain, info);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+}
+
+void
+wsi_destroy_image_info(const struct wsi_swapchain *chain,
+ struct wsi_image_info *info)
+{
+ if (info->create.pQueueFamilyIndices != NULL) {
+ vk_free(&chain->alloc, (void *)info->create.pQueueFamilyIndices);
+ info->create.pQueueFamilyIndices = NULL;
+ }
+ if (info->format_list.pViewFormats != NULL) {
+ vk_free(&chain->alloc, (void *)info->format_list.pViewFormats);
+ info->format_list.pViewFormats = NULL;
+ }
+ if (info->drm_mod_list.pDrmFormatModifiers != NULL) {
+ vk_free(&chain->alloc, (void *)info->drm_mod_list.pDrmFormatModifiers);
+ info->drm_mod_list.pDrmFormatModifiers = NULL;
+ }
+ if (info->modifier_props != NULL) {
+ vk_free(&chain->alloc, info->modifier_props);
+ info->modifier_props = NULL;
+ }
+}
+
+VkResult
+wsi_create_image(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result;
+
+ memset(image, 0, sizeof(*image));
+
+#ifndef _WIN32
+ image->dma_buf_fd = -1;
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++)
+ image->explicit_sync[i].fd = -1;
+#endif
+
+ result = wsi->CreateImage(chain->device, &info->create,
+ &chain->alloc, &image->image);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ result = info->create_mem(chain, info, image);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ result = wsi->BindImageMemory(chain->device, image->image,
+ image->memory, 0);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ if (info->finish_create) {
+ result = info->finish_create(chain, info, image);
+ if (result != VK_SUCCESS)
+ goto fail;
+ }
+
+ if (info->explicit_sync) {
+#if HAVE_LIBDRM
+ result = wsi_create_image_explicit_sync_drm(chain, image);
+ if (result != VK_SUCCESS)
+ goto fail;
+#else
+ result = VK_ERROR_FEATURE_NOT_PRESENT;
+ goto fail;
+#endif
+ }
+
+ return VK_SUCCESS;
+
+fail:
+ wsi_destroy_image(chain, image);
+ return result;
+}
+
void
wsi_destroy_image(const struct wsi_swapchain *chain,
struct wsi_image *image)
{
const struct wsi_device *wsi = chain->wsi;
- if (image->prime.blit_cmd_buffers) {
- for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
+#ifndef _WIN32
+ if (image->dma_buf_fd >= 0)
+ close(image->dma_buf_fd);
+#endif
+
+ if (image->explicit_sync[WSI_ES_ACQUIRE].semaphore) {
+#if HAVE_LIBDRM
+ wsi_destroy_image_explicit_sync_drm(chain, image);
+#endif
+ }
+
+ if (image->cpu_map != NULL) {
+ wsi->UnmapMemory(chain->device, image->blit.buffer != VK_NULL_HANDLE ?
+ image->blit.memory : image->memory);
+ }
+
+ if (image->blit.cmd_buffers) {
+ int cmd_buffer_count =
+ chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
+
+ for (uint32_t i = 0; i < cmd_buffer_count; i++) {
+ if (!chain->cmd_pools[i])
+ continue;
wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
- 1, &image->prime.blit_cmd_buffers[i]);
+ 1, &image->blit.cmd_buffers[i]);
}
- vk_free(&chain->alloc, image->prime.blit_cmd_buffers);
+ vk_free(&chain->alloc, image->blit.cmd_buffers);
}
wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
wsi->DestroyImage(chain->device, image->image, &chain->alloc);
- wsi->FreeMemory(chain->device, image->prime.memory, &chain->alloc);
- wsi->DestroyBuffer(chain->device, image->prime.buffer, &chain->alloc);
+ wsi->DestroyImage(chain->device, image->blit.image, &chain->alloc);
+ wsi->FreeMemory(chain->device, image->blit.memory, &chain->alloc);
+ wsi->DestroyBuffer(chain->device, image->blit.buffer, &chain->alloc);
}
-VkResult
-wsi_common_get_surface_support(struct wsi_device *wsi_device,
- uint32_t queueFamilyIndex,
- VkSurfaceKHR _surface,
- VkBool32* pSupported)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ VkSurfaceKHR _surface,
+ VkBool32 *pSupported)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
- return iface->get_support(surface, wsi_device,
- queueFamilyIndex, pSupported);
+ VkResult res = iface->get_support(surface, wsi_device,
+ queueFamilyIndex, pSupported);
+ if (res == VK_SUCCESS) {
+ bool blit = (wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)) != 0;
+ *pSupported = (bool)*pSupported && blit;
+ }
+
+ return res;
}
-VkResult
-wsi_common_get_surface_capabilities(struct wsi_device *wsi_device,
- VkSurfaceKHR _surface,
- VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR _surface,
+ VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
VkSurfaceCapabilities2KHR caps2 = {
@@ -353,25 +845,30 @@ wsi_common_get_surface_capabilities(struct wsi_device *wsi_device,
return result;
}
-VkResult
-wsi_common_get_surface_capabilities2(struct wsi_device *wsi_device,
- const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
- VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
+ VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
pSurfaceCapabilities);
}
-VkResult
-wsi_common_get_surface_capabilities2ext(
- struct wsi_device *wsi_device,
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
+ VkPhysicalDevice physicalDevice,
VkSurfaceKHR _surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
assert(pSurfaceCapabilities->sType ==
@@ -410,99 +907,204 @@ wsi_common_get_surface_capabilities2ext(
return result;
}
-VkResult
-wsi_common_get_surface_formats(struct wsi_device *wsi_device,
- VkSurfaceKHR _surface,
- uint32_t *pSurfaceFormatCount,
- VkSurfaceFormatKHR *pSurfaceFormats)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR _surface,
+ uint32_t *pSurfaceFormatCount,
+ VkSurfaceFormatKHR *pSurfaceFormats)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
return iface->get_formats(surface, wsi_device,
pSurfaceFormatCount, pSurfaceFormats);
}
-VkResult
-wsi_common_get_surface_formats2(struct wsi_device *wsi_device,
- const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
- uint32_t *pSurfaceFormatCount,
- VkSurfaceFormat2KHR *pSurfaceFormats)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
+ uint32_t *pSurfaceFormatCount,
+ VkSurfaceFormat2KHR *pSurfaceFormats)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
pSurfaceFormatCount, pSurfaceFormats);
}
-VkResult
-wsi_common_get_surface_present_modes(struct wsi_device *wsi_device,
- VkSurfaceKHR _surface,
- uint32_t *pPresentModeCount,
- VkPresentModeKHR *pPresentModes)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR _surface,
+ uint32_t *pPresentModeCount,
+ VkPresentModeKHR *pPresentModes)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
- return iface->get_present_modes(surface, pPresentModeCount,
+ return iface->get_present_modes(surface, wsi_device, pPresentModeCount,
pPresentModes);
}
-VkResult
-wsi_common_get_present_rectangles(struct wsi_device *wsi_device,
- VkSurfaceKHR _surface,
- uint32_t* pRectCount,
- VkRect2D* pRects)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR _surface,
+ uint32_t *pRectCount,
+ VkRect2D *pRects)
{
+ VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
+ struct wsi_device *wsi_device = device->wsi_device;
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
return iface->get_present_rectangles(surface, wsi_device,
pRectCount, pRects);
}
-VkResult
-wsi_common_create_swapchain(struct wsi_device *wsi,
- VkDevice device,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSwapchainKHR *pSwapchain)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateSwapchainKHR(VkDevice _device,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSwapchainKHR *pSwapchain)
{
+ MESA_TRACE_FUNC();
+ VK_FROM_HANDLE(vk_device, device, _device);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
- struct wsi_interface *iface = wsi->wsi[surface->platform];
+ struct wsi_device *wsi_device = device->physical->wsi_device;
+ struct wsi_interface *iface = wsi_device->force_headless_swapchain ?
+ wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] :
+ wsi_device->wsi[surface->platform];
+ const VkAllocationCallbacks *alloc;
struct wsi_swapchain *swapchain;
- VkResult result = iface->create_swapchain(surface, device, wsi,
- pCreateInfo, pAllocator,
+ if (pAllocator)
+ alloc = pAllocator;
+ else
+ alloc = &device->alloc;
+
+ VkSwapchainCreateInfoKHR info = *pCreateInfo;
+
+ if (wsi_device->force_swapchain_to_currentExtent) {
+ VkSurfaceCapabilities2KHR caps2 = {
+ .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
+ };
+ iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
+ info.imageExtent = caps2.surfaceCapabilities.currentExtent;
+ }
+
+ /* Ignore DEFERRED_MEMORY_ALLOCATION_BIT. Would require deep plumbing to be able to take advantage of it.
+ * bool deferred_allocation = pCreateInfo->flags & VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT;
+ */
+
+ VkResult result = iface->create_swapchain(surface, _device, wsi_device,
+ &info, alloc,
&swapchain);
if (result != VK_SUCCESS)
return result;
- swapchain->fences = vk_zalloc(pAllocator,
+ swapchain->fences = vk_zalloc(alloc,
sizeof (*swapchain->fences) * swapchain->image_count,
sizeof (*swapchain->fences),
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!swapchain->fences) {
- swapchain->destroy(swapchain, pAllocator);
+ swapchain->destroy(swapchain, alloc);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
+ if (wsi_device->khr_present_wait) {
+ const VkSemaphoreTypeCreateInfo type_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
+ .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
+ };
+
+ const VkSemaphoreCreateInfo sem_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = &type_info,
+ .flags = 0,
+ };
+
+ /* We assume here that a driver exposing present_wait also exposes VK_KHR_timeline_semaphore. */
+ result = wsi_device->CreateSemaphore(_device, &sem_info, alloc, &swapchain->present_id_timeline);
+ if (result != VK_SUCCESS) {
+ swapchain->destroy(swapchain, alloc);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ }
+
+ if (swapchain->blit.queue != VK_NULL_HANDLE) {
+ swapchain->blit.semaphores = vk_zalloc(alloc,
+ sizeof (*swapchain->blit.semaphores) * swapchain->image_count,
+ sizeof (*swapchain->blit.semaphores),
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!swapchain->blit.semaphores) {
+ wsi_device->DestroySemaphore(_device, swapchain->present_id_timeline, alloc);
+ swapchain->destroy(swapchain, alloc);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ }
+
*pSwapchain = wsi_swapchain_to_handle(swapchain);
return VK_SUCCESS;
}
-void
-wsi_common_destroy_swapchain(VkDevice device,
- VkSwapchainKHR _swapchain,
- const VkAllocationCallbacks *pAllocator)
+VKAPI_ATTR void VKAPI_CALL
+wsi_DestroySwapchainKHR(VkDevice _device,
+ VkSwapchainKHR _swapchain,
+ const VkAllocationCallbacks *pAllocator)
{
+ MESA_TRACE_FUNC();
+ VK_FROM_HANDLE(vk_device, device, _device);
VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
+ const VkAllocationCallbacks *alloc;
+
if (!swapchain)
return;
- swapchain->destroy(swapchain, pAllocator);
+ if (pAllocator)
+ alloc = pAllocator;
+ else
+ alloc = &device->alloc;
+
+ swapchain->destroy(swapchain, alloc);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_ReleaseSwapchainImagesEXT(VkDevice _device,
+ const VkReleaseSwapchainImagesInfoEXT *pReleaseInfo)
+{
+ VK_FROM_HANDLE(wsi_swapchain, swapchain, pReleaseInfo->swapchain);
+
+ for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
+ uint32_t index = pReleaseInfo->pImageIndices[i];
+ assert(index < swapchain->image_count);
+ struct wsi_image *image = swapchain->get_wsi_image(swapchain, index);
+ assert(image->acquired);
+ image->acquired = false;
+ }
+
+ VkResult result = swapchain->release_images(swapchain,
+ pReleaseInfo->imageIndexCount,
+ pReleaseInfo->pImageIndices);
+
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (swapchain->wsi->set_memory_ownership) {
+ for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
+ uint32_t image_index = pReleaseInfo->pImageIndices[i];
+ VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
+ swapchain->wsi->set_memory_ownership(swapchain->device, mem, false);
+ }
+ }
+
+ return VK_SUCCESS;
}
VkResult
@@ -522,39 +1124,230 @@ wsi_common_get_images(VkSwapchainKHR _swapchain,
return vk_outarray_status(&images);
}
+VkImage
+wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index)
+{
+ VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
+ assert(index < swapchain->image_count);
+ return swapchain->get_wsi_image(swapchain, index)->image;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetSwapchainImagesKHR(VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t *pSwapchainImageCount,
+ VkImage *pSwapchainImages)
+{
+ MESA_TRACE_FUNC();
+ return wsi_common_get_images(swapchain,
+ pSwapchainImageCount,
+ pSwapchainImages);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_AcquireNextImageKHR(VkDevice _device,
+ VkSwapchainKHR swapchain,
+ uint64_t timeout,
+ VkSemaphore semaphore,
+ VkFence fence,
+ uint32_t *pImageIndex)
+{
+ MESA_TRACE_FUNC();
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ const VkAcquireNextImageInfoKHR acquire_info = {
+ .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
+ .swapchain = swapchain,
+ .timeout = timeout,
+ .semaphore = semaphore,
+ .fence = fence,
+ .deviceMask = 0,
+ };
+
+ return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
+ pImageIndex);
+}
+
+static VkResult
+wsi_signal_semaphore_for_image(struct vk_device *device,
+ const struct wsi_swapchain *chain,
+ const struct wsi_image *image,
+ VkSemaphore _semaphore)
+{
+ if (device->physical->supported_sync_types == NULL)
+ return VK_SUCCESS;
+
+ VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
+
+ vk_semaphore_reset_temporary(device, semaphore);
+
+#ifdef HAVE_LIBDRM
+ VkResult result = chain->image_info.explicit_sync ?
+ wsi_create_sync_for_image_syncobj(chain, image,
+ VK_SYNC_FEATURE_GPU_WAIT,
+ &semaphore->temporary) :
+ wsi_create_sync_for_dma_buf_wait(chain, image,
+ VK_SYNC_FEATURE_GPU_WAIT,
+ &semaphore->temporary);
+ if (result != VK_ERROR_FEATURE_NOT_PRESENT)
+ return result;
+#endif
+
+ if (chain->wsi->signal_semaphore_with_memory) {
+ return device->create_sync_for_memory(device, image->memory,
+ false /* signal_memory */,
+ &semaphore->temporary);
+ } else {
+ return vk_sync_create(device, &vk_sync_dummy_type,
+ 0 /* flags */, 0 /* initial_value */,
+ &semaphore->temporary);
+ }
+}
+
+static VkResult
+wsi_signal_fence_for_image(struct vk_device *device,
+ const struct wsi_swapchain *chain,
+ const struct wsi_image *image,
+ VkFence _fence)
+{
+ if (device->physical->supported_sync_types == NULL)
+ return VK_SUCCESS;
+
+ VK_FROM_HANDLE(vk_fence, fence, _fence);
+
+ vk_fence_reset_temporary(device, fence);
+
+#ifdef HAVE_LIBDRM
+ VkResult result = chain->image_info.explicit_sync ?
+ wsi_create_sync_for_image_syncobj(chain, image,
+ VK_SYNC_FEATURE_CPU_WAIT,
+ &fence->temporary) :
+ wsi_create_sync_for_dma_buf_wait(chain, image,
+ VK_SYNC_FEATURE_CPU_WAIT,
+ &fence->temporary);
+ if (result != VK_ERROR_FEATURE_NOT_PRESENT)
+ return result;
+#endif
+
+ if (chain->wsi->signal_fence_with_memory) {
+ return device->create_sync_for_memory(device, image->memory,
+ false /* signal_memory */,
+ &fence->temporary);
+ } else {
+ return vk_sync_create(device, &vk_sync_dummy_type,
+ 0 /* flags */, 0 /* initial_value */,
+ &fence->temporary);
+ }
+}
+
VkResult
wsi_common_acquire_next_image2(const struct wsi_device *wsi,
- VkDevice device,
+ VkDevice _device,
const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex)
{
VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
+ VK_FROM_HANDLE(vk_device, device, _device);
VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
pImageIndex);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
return result;
+ struct wsi_image *image =
+ swapchain->get_wsi_image(swapchain, *pImageIndex);
- if (wsi->set_memory_ownership) {
- VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, *pImageIndex)->memory;
- wsi->set_memory_ownership(swapchain->device, mem, true);
+ image->acquired = true;
+
+ if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
+ VkResult signal_result =
+ wsi_signal_semaphore_for_image(device, swapchain, image,
+ pAcquireInfo->semaphore);
+ if (signal_result != VK_SUCCESS)
+ return signal_result;
}
- if (pAcquireInfo->semaphore != VK_NULL_HANDLE &&
- wsi->signal_semaphore_for_memory != NULL) {
- struct wsi_image *image =
- swapchain->get_wsi_image(swapchain, *pImageIndex);
- wsi->signal_semaphore_for_memory(device, pAcquireInfo->semaphore,
- image->memory);
+ if (pAcquireInfo->fence != VK_NULL_HANDLE) {
+ VkResult signal_result =
+ wsi_signal_fence_for_image(device, swapchain, image,
+ pAcquireInfo->fence);
+ if (signal_result != VK_SUCCESS)
+ return signal_result;
}
- if (pAcquireInfo->fence != VK_NULL_HANDLE &&
- wsi->signal_fence_for_memory != NULL) {
- struct wsi_image *image =
- swapchain->get_wsi_image(swapchain, *pImageIndex);
- wsi->signal_fence_for_memory(device, pAcquireInfo->fence,
- image->memory);
+ if (wsi->set_memory_ownership)
+ wsi->set_memory_ownership(swapchain->device, image->memory, true);
+
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_AcquireNextImage2KHR(VkDevice _device,
+ const VkAcquireNextImageInfoKHR *pAcquireInfo,
+ uint32_t *pImageIndex)
+{
+ MESA_TRACE_FUNC();
+ VK_FROM_HANDLE(vk_device, device, _device);
+
+ return wsi_common_acquire_next_image2(device->physical->wsi_device,
+ _device, pAcquireInfo, pImageIndex);
+}
+
+static VkResult wsi_signal_present_id_timeline(struct wsi_swapchain *swapchain,
+ VkQueue queue, uint64_t present_id,
+ VkFence present_fence)
+{
+ assert(swapchain->present_id_timeline || present_fence);
+
+ const VkTimelineSemaphoreSubmitInfo timeline_info = {
+ .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
+ .pSignalSemaphoreValues = &present_id,
+ .signalSemaphoreValueCount = 1,
+ };
+
+ const VkSubmitInfo submit_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .pNext = &timeline_info,
+ .signalSemaphoreCount = 1,
+ .pSignalSemaphores = &swapchain->present_id_timeline,
+ };
+
+ uint32_t submit_count = present_id ? 1 : 0;
+ return swapchain->wsi->QueueSubmit(queue, submit_count, &submit_info, present_fence);
+}
+
+static VkResult
+handle_trace(VkQueue queue, struct vk_device *device)
+{
+ struct vk_instance *instance = device->physical->instance;
+ if (!instance->trace_mode)
+ return VK_SUCCESS;
+
+ simple_mtx_lock(&device->trace_mtx);
+
+ bool frame_trigger = device->current_frame == instance->trace_frame;
+ if (device->current_frame <= instance->trace_frame)
+ device->current_frame++;
+
+ bool file_trigger = false;
+#ifndef _WIN32
+ if (instance->trace_trigger_file && access(instance->trace_trigger_file, W_OK) == 0) {
+ if (unlink(instance->trace_trigger_file) == 0) {
+ file_trigger = true;
+ } else {
+ /* Do not enable tracing if we cannot remove the file,
+ * because by then we'll trace every frame ... */
+ fprintf(stderr, "Could not remove trace trigger file, ignoring\n");
+ }
}
+#endif
+
+ VkResult result = VK_SUCCESS;
+ if (frame_trigger || file_trigger || device->trace_hotkey_trigger)
+ result = device->capture_trace(queue);
+
+ device->trace_hotkey_trigger = false;
+
+ simple_mtx_unlock(&device->trace_mtx);
return result;
}
@@ -566,93 +1359,192 @@ wsi_common_queue_present(const struct wsi_device *wsi,
int queue_family_index,
const VkPresentInfoKHR *pPresentInfo)
{
- VkResult final_result = VK_SUCCESS;
+ VkResult final_result = handle_trace(queue, vk_device_from_handle(device));
+
+ STACK_ARRAY(VkPipelineStageFlags, stage_flags,
+ MAX2(1, pPresentInfo->waitSemaphoreCount));
+ for (uint32_t s = 0; s < MAX2(1, pPresentInfo->waitSemaphoreCount); s++)
+ stage_flags[s] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
const VkPresentRegionsKHR *regions =
vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
+ const VkPresentIdKHR *present_ids =
+ vk_find_struct_const(pPresentInfo->pNext, PRESENT_ID_KHR);
+ const VkSwapchainPresentFenceInfoEXT *present_fence_info =
+ vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_FENCE_INFO_EXT);
+ const VkSwapchainPresentModeInfoEXT *present_mode_info =
+ vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_MODE_INFO_EXT);
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
uint32_t image_index = pPresentInfo->pImageIndices[i];
VkResult result;
+ /* Update the present mode for this present and any subsequent present. */
+ if (present_mode_info && present_mode_info->pPresentModes && swapchain->set_present_mode)
+ swapchain->set_present_mode(swapchain, present_mode_info->pPresentModes[i]);
+
if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
const VkFenceCreateInfo fence_info = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = NULL,
- .flags = 0,
+ .flags = VK_FENCE_CREATE_SIGNALED_BIT,
};
result = wsi->CreateFence(device, &fence_info,
&swapchain->alloc,
&swapchain->fences[image_index]);
if (result != VK_SUCCESS)
goto fail_present;
+
+ if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT &&
+ swapchain->blit.queue != VK_NULL_HANDLE) {
+ const VkSemaphoreCreateInfo sem_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = NULL,
+ .flags = 0,
+ };
+ result = wsi->CreateSemaphore(device, &sem_info,
+ &swapchain->alloc,
+ &swapchain->blit.semaphores[image_index]);
+ if (result != VK_SUCCESS)
+ goto fail_present;
+ }
} else {
+ MESA_TRACE_SCOPE("throttle");
result =
wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
true, ~0ull);
if (result != VK_SUCCESS)
goto fail_present;
-
- result =
- wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
- if (result != VK_SUCCESS)
- goto fail_present;
}
- struct wsi_image *image =
- swapchain->get_wsi_image(swapchain, image_index);
+ result = wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
+ if (result != VK_SUCCESS)
+ goto fail_present;
- struct wsi_memory_signal_submit_info mem_signal = {
- .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
- .pNext = NULL,
- .memory = image->memory,
+ VkTimelineSemaphoreSubmitInfo timeline_submit_info = {
+ .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
};
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
- .pNext = &mem_signal,
};
- VkPipelineStageFlags *stage_flags = NULL;
if (i == 0) {
/* We only need/want to wait on semaphores once. After that, we're
* guaranteed ordering since it all happens on the same queue.
*/
submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
+ submit_info.pWaitDstStageMask = stage_flags;
+ }
- /* Set up the pWaitDstStageMasks */
- stage_flags = vk_alloc(&swapchain->alloc,
- sizeof(VkPipelineStageFlags) *
- pPresentInfo->waitSemaphoreCount,
- 8,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (!stage_flags) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail_present;
- }
- for (uint32_t s = 0; s < pPresentInfo->waitSemaphoreCount; s++)
- stage_flags[s] = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ struct wsi_image *image =
+ swapchain->get_wsi_image(swapchain, image_index);
- submit_info.pWaitDstStageMask = stage_flags;
+ VkQueue submit_queue = queue;
+ if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
+ if (swapchain->blit.queue == VK_NULL_HANDLE) {
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers =
+ &image->blit.cmd_buffers[queue_family_index];
+ } else {
+ /* If we are using a blit using the driver's private queue, then
+ * do an empty submit signalling a semaphore, and then submit the
+ * blit waiting on that. This ensures proper queue ordering of
+ * vkQueueSubmit() calls.
+ */
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores =
+ &swapchain->blit.semaphores[image_index];
+
+ result = wsi->QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
+ if (result != VK_SUCCESS)
+ goto fail_present;
+
+ /* Now prepare the blit submit. It needs to then wait on the
+ * semaphore we signaled above.
+ */
+ submit_queue = swapchain->blit.queue;
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = submit_info.pSignalSemaphores;
+ submit_info.signalSemaphoreCount = 0;
+ submit_info.pSignalSemaphores = NULL;
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &image->blit.cmd_buffers[0];
+ submit_info.pWaitDstStageMask = stage_flags;
+ }
}
- if (swapchain->use_prime_blit) {
- /* If we are using prime blits, we need to perform the blit now. The
- * command buffer is attached to the image.
- */
- submit_info.commandBufferCount = 1;
- submit_info.pCommandBuffers =
- &image->prime.blit_cmd_buffers[queue_family_index];
- mem_signal.memory = image->prime.memory;
+ VkFence fence = swapchain->fences[image_index];
+
+ struct wsi_memory_signal_submit_info mem_signal;
+ bool has_signal_dma_buf = false;
+ bool explicit_sync = swapchain->image_info.explicit_sync;
+ if (explicit_sync) {
+ /* We will signal this acquire value ourselves when GPU work is done. */
+ image->explicit_sync[WSI_ES_ACQUIRE].timeline++;
+ /* The compositor will signal this value when it is done with the image. */
+ image->explicit_sync[WSI_ES_RELEASE].timeline++;
+
+ timeline_submit_info.signalSemaphoreValueCount = 1;
+ timeline_submit_info.pSignalSemaphoreValues = &image->explicit_sync[WSI_ES_ACQUIRE].timeline;
+
+ assert(submit_info.signalSemaphoreCount == 0);
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &image->explicit_sync[WSI_ES_ACQUIRE].semaphore;
+ __vk_append_struct(&submit_info, &timeline_submit_info);
+ } else {
+#ifdef HAVE_LIBDRM
+ result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
+ if (result == VK_SUCCESS) {
+ assert(submit_info.signalSemaphoreCount == 0);
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
+ has_signal_dma_buf = true;
+ } else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
+ result = VK_SUCCESS;
+ has_signal_dma_buf = false;
+ } else {
+ goto fail_present;
+ }
+#endif
+
+ if (!has_signal_dma_buf) {
+ /* If we don't have dma-buf signaling, signal the memory object by
+ * chaining wsi_memory_signal_submit_info into VkSubmitInfo.
+ */
+ result = VK_SUCCESS;
+ has_signal_dma_buf = false;
+ mem_signal = (struct wsi_memory_signal_submit_info) {
+ .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
+ .memory = image->memory,
+ };
+ __vk_append_struct(&submit_info, &mem_signal);
+ }
}
- result = wsi->QueueSubmit(queue, 1, &submit_info, swapchain->fences[image_index]);
- vk_free(&swapchain->alloc, stage_flags);
+ result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
if (result != VK_SUCCESS)
goto fail_present;
+ /* The app can only submit images they have acquired. */
+ assert(image->acquired);
+ image->acquired = false;
+ image->present_serial = ++swapchain->present_serial;
+
+ if (!explicit_sync) {
+#ifdef HAVE_LIBDRM
+ if (has_signal_dma_buf) {
+ result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
+ if (result != VK_SUCCESS)
+ goto fail_present;
+ }
+#else
+ assert(!has_signal_dma_buf);
+#endif
+ }
+
if (wsi->sw)
wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
true, ~0ull);
@@ -661,7 +1553,20 @@ wsi_common_queue_present(const struct wsi_device *wsi,
if (regions && regions->pRegions)
region = &regions->pRegions[i];
- result = swapchain->queue_present(swapchain, image_index, region);
+ uint64_t present_id = 0;
+ if (present_ids && present_ids->pPresentIds)
+ present_id = present_ids->pPresentIds[i];
+ VkFence present_fence = VK_NULL_HANDLE;
+ if (present_fence_info && present_fence_info->pFences)
+ present_fence = present_fence_info->pFences[i];
+
+ if (present_id || present_fence) {
+ result = wsi_signal_present_id_timeline(swapchain, queue, present_id, present_fence);
+ if (result != VK_SUCCESS)
+ goto fail_present;
+ }
+
+ result = swapchain->queue_present(swapchain, image_index, present_id, region);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
goto fail_present;
@@ -679,11 +1584,663 @@ wsi_common_queue_present(const struct wsi_device *wsi,
final_result = result;
}
+ STACK_ARRAY_FINISH(stage_flags);
+
return final_result;
}
-uint64_t
-wsi_common_get_current_time(void)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
+{
+ MESA_TRACE_FUNC();
+ VK_FROM_HANDLE(vk_queue, queue, _queue);
+
+ return wsi_common_queue_present(queue->base.device->physical->wsi_device,
+ vk_device_to_handle(queue->base.device),
+ _queue,
+ queue->queue_family_index,
+ pPresentInfo);
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
+ VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
+{
+ memset(pCapabilities->presentMask, 0,
+ sizeof(pCapabilities->presentMask));
+ pCapabilities->presentMask[0] = 0x1;
+ pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
+ VkSurfaceKHR surface,
+ VkDeviceGroupPresentModeFlagsKHR *pModes)
+{
+ *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
+
+ return VK_SUCCESS;
+}
+
+bool
+wsi_common_vk_instance_supports_present_wait(const struct vk_instance *instance)
+{
+ /* We can only expose KHR_present_wait and KHR_present_id
+ * if we are guaranteed support on all potential VkSurfaceKHR objects. */
+ if (instance->enabled_extensions.KHR_win32_surface ||
+ instance->enabled_extensions.KHR_android_surface) {
+ return false;
+ }
+
+ return true;
+}
+
+VkResult
+wsi_common_create_swapchain_image(const struct wsi_device *wsi,
+ const VkImageCreateInfo *pCreateInfo,
+ VkSwapchainKHR _swapchain,
+ VkImage *pImage)
+{
+ VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
+
+#ifndef NDEBUG
+ const VkImageCreateInfo *swcInfo = &chain->image_info.create;
+ assert(pCreateInfo->flags == 0);
+ assert(pCreateInfo->imageType == swcInfo->imageType);
+ assert(pCreateInfo->format == swcInfo->format);
+ assert(pCreateInfo->extent.width == swcInfo->extent.width);
+ assert(pCreateInfo->extent.height == swcInfo->extent.height);
+ assert(pCreateInfo->extent.depth == swcInfo->extent.depth);
+ assert(pCreateInfo->mipLevels == swcInfo->mipLevels);
+ assert(pCreateInfo->arrayLayers == swcInfo->arrayLayers);
+ assert(pCreateInfo->samples == swcInfo->samples);
+ assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
+ assert(!(pCreateInfo->usage & ~swcInfo->usage));
+
+ vk_foreach_struct_const(ext, pCreateInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: {
+ const VkImageFormatListCreateInfo *iflci =
+ (const VkImageFormatListCreateInfo *)ext;
+ const VkImageFormatListCreateInfo *swc_iflci =
+ &chain->image_info.format_list;
+
+ for (uint32_t i = 0; i < iflci->viewFormatCount; i++) {
+ bool found = false;
+ for (uint32_t j = 0; j < swc_iflci->viewFormatCount; j++) {
+ if (iflci->pViewFormats[i] == swc_iflci->pViewFormats[j]) {
+ found = true;
+ break;
+ }
+ }
+ assert(found);
+ }
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+ break;
+
+ default:
+ assert(!"Unsupported image create extension");
+ }
+ }
+#endif
+
+ return wsi->CreateImage(chain->device, &chain->image_info.create,
+ &chain->alloc, pImage);
+}
+
+VkResult
+wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
+ VkImage vk_image,
+ VkSwapchainKHR _swapchain,
+ uint32_t image_idx)
+{
+ VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
+ struct wsi_image *image = chain->get_wsi_image(chain, image_idx);
+
+ return wsi->BindImageMemory(chain->device, vk_image, image->memory, 0);
+}
+
+VkResult
+wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain *chain,
+ uint64_t present_id, uint64_t timeout)
+{
+ assert(chain->present_id_timeline);
+ const VkSemaphoreWaitInfo wait_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,
+ .semaphoreCount = 1,
+ .pSemaphores = &chain->present_id_timeline,
+ .pValues = &present_id,
+ };
+
+ return chain->wsi->WaitSemaphores(chain->device, &wait_info, timeout);
+}
+
+uint32_t
+wsi_select_memory_type(const struct wsi_device *wsi,
+ VkMemoryPropertyFlags req_props,
+ VkMemoryPropertyFlags deny_props,
+ uint32_t type_bits)
+{
+ assert(type_bits != 0);
+
+ VkMemoryPropertyFlags common_props = ~0;
+ u_foreach_bit(t, type_bits) {
+ const VkMemoryType type = wsi->memory_props.memoryTypes[t];
+
+ common_props &= type.propertyFlags;
+
+ if (deny_props & type.propertyFlags)
+ continue;
+
+ if (!(req_props & ~type.propertyFlags))
+ return t;
+ }
+
+ if ((deny_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
+ (common_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
+ /* If they asked for non-device-local and all the types are device-local
+ * (this is commonly true for UMA platforms), try again without denying
+ * device-local types
+ */
+ deny_props &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ return wsi_select_memory_type(wsi, req_props, deny_props, type_bits);
+ }
+
+ unreachable("No memory type found");
+}
+
+uint32_t
+wsi_select_device_memory_type(const struct wsi_device *wsi,
+ uint32_t type_bits)
+{
+ return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ 0 /* deny_props */, type_bits);
+}
+
+static uint32_t
+wsi_select_host_memory_type(const struct wsi_device *wsi,
+ uint32_t type_bits)
+{
+ return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ 0 /* deny_props */, type_bits);
+}
+
+VkResult
+wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image,
+ VkExternalMemoryHandleTypeFlags handle_types)
+{
+ assert(chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
+
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result;
+
+ const VkExternalMemoryBufferCreateInfo buffer_external_info = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
+ .pNext = NULL,
+ .handleTypes = handle_types,
+ };
+ const VkBufferCreateInfo buffer_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .pNext = &buffer_external_info,
+ .size = info->linear_size,
+ .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ };
+ result = wsi->CreateBuffer(chain->device, &buffer_info,
+ &chain->alloc, &image->blit.buffer);
+ if (result != VK_SUCCESS)
+ return result;
+
+ VkMemoryRequirements reqs;
+ wsi->GetBufferMemoryRequirements(chain->device, image->blit.buffer, &reqs);
+ assert(reqs.size <= info->linear_size);
+
+ struct wsi_memory_allocate_info memory_wsi_info = {
+ .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
+ .pNext = NULL,
+ .implicit_sync = info->image_type == WSI_IMAGE_TYPE_DRM &&
+ !info->explicit_sync,
+ };
+ VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
+ .pNext = &memory_wsi_info,
+ .image = VK_NULL_HANDLE,
+ .buffer = image->blit.buffer,
+ };
+ VkMemoryAllocateInfo buf_mem_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &buf_mem_dedicated_info,
+ .allocationSize = info->linear_size,
+ .memoryTypeIndex =
+ info->select_blit_dst_memory_type(wsi, reqs.memoryTypeBits),
+ };
+
+ void *sw_host_ptr = NULL;
+ if (info->alloc_shm)
+ sw_host_ptr = info->alloc_shm(image, info->linear_size);
+
+ VkExportMemoryAllocateInfo memory_export_info;
+ VkImportMemoryHostPointerInfoEXT host_ptr_info;
+ if (sw_host_ptr != NULL) {
+ host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
+ .pHostPointer = sw_host_ptr,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
+ };
+ __vk_append_struct(&buf_mem_info, &host_ptr_info);
+ } else if (handle_types != 0) {
+ memory_export_info = (VkExportMemoryAllocateInfo) {
+ .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
+ .handleTypes = handle_types,
+ };
+ __vk_append_struct(&buf_mem_info, &memory_export_info);
+ }
+
+ result = wsi->AllocateMemory(chain->device, &buf_mem_info,
+ &chain->alloc, &image->blit.memory);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = wsi->BindBufferMemory(chain->device, image->blit.buffer,
+ image->blit.memory, 0);
+ if (result != VK_SUCCESS)
+ return result;
+
+ wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
+
+ const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
+ .pNext = NULL,
+ .image = image->image,
+ .buffer = VK_NULL_HANDLE,
+ };
+ const VkMemoryAllocateInfo memory_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &memory_dedicated_info,
+ .allocationSize = reqs.size,
+ .memoryTypeIndex =
+ info->select_image_memory_type(wsi, reqs.memoryTypeBits),
+ };
+
+ result = wsi->AllocateMemory(chain->device, &memory_info,
+ &chain->alloc, &image->memory);
+ if (result != VK_SUCCESS)
+ return result;
+
+ image->num_planes = 1;
+ image->sizes[0] = info->linear_size;
+ image->row_pitches[0] = info->linear_stride;
+ image->offsets[0] = 0;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result;
+
+ int cmd_buffer_count =
+ chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
+ image->blit.cmd_buffers =
+ vk_zalloc(&chain->alloc,
+ sizeof(VkCommandBuffer) * cmd_buffer_count, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!image->blit.cmd_buffers)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ for (uint32_t i = 0; i < cmd_buffer_count; i++) {
+ if (!chain->cmd_pools[i])
+ continue;
+
+ const VkCommandBufferAllocateInfo cmd_buffer_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .pNext = NULL,
+ .commandPool = chain->cmd_pools[i],
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = 1,
+ };
+ result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
+ &image->blit.cmd_buffers[i]);
+ if (result != VK_SUCCESS)
+ return result;
+
+ const VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ };
+ wsi->BeginCommandBuffer(image->blit.cmd_buffers[i], &begin_info);
+
+ VkImageMemoryBarrier img_mem_barriers[] = {
+ {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .pNext = NULL,
+ .srcAccessMask = 0,
+ .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
+ .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = image->image,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ },
+ {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .pNext = NULL,
+ .srcAccessMask = 0,
+ .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
+ .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = image->blit.image,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ },
+ };
+ uint32_t img_mem_barrier_count =
+ chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT ? 1 : 2;
+ wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ 0,
+ 0, NULL,
+ 0, NULL,
+ 1, img_mem_barriers);
+
+ if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
+ struct VkBufferImageCopy buffer_image_copy = {
+ .bufferOffset = 0,
+ .bufferRowLength = info->linear_stride /
+ vk_format_get_blocksize(info->create.format),
+ .bufferImageHeight = 0,
+ .imageSubresource = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .mipLevel = 0,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ .imageOffset = { .x = 0, .y = 0, .z = 0 },
+ .imageExtent = info->create.extent,
+ };
+ wsi->CmdCopyImageToBuffer(image->blit.cmd_buffers[i],
+ image->image,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ image->blit.buffer,
+ 1, &buffer_image_copy);
+ } else {
+ struct VkImageCopy image_copy = {
+ .srcSubresource = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .mipLevel = 0,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ .srcOffset = { .x = 0, .y = 0, .z = 0 },
+ .dstSubresource = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .mipLevel = 0,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ .dstOffset = { .x = 0, .y = 0, .z = 0 },
+ .extent = info->create.extent,
+ };
+
+ wsi->CmdCopyImage(image->blit.cmd_buffers[i],
+ image->image,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ image->blit.image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1, &image_copy);
+ }
+
+ img_mem_barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ img_mem_barriers[0].dstAccessMask = 0;
+ img_mem_barriers[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
+ img_mem_barriers[0].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ img_mem_barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ img_mem_barriers[1].dstAccessMask = 0;
+ img_mem_barriers[1].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ img_mem_barriers[1].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+ 0,
+ 0, NULL,
+ 0, NULL,
+ img_mem_barrier_count, img_mem_barriers);
+
+ result = wsi->EndCommandBuffer(image->blit.cmd_buffers[i]);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ uint32_t stride_align, uint32_t size_align,
+ struct wsi_image_info *info)
+{
+ const struct wsi_device *wsi = chain->wsi;
+
+ assert(util_is_power_of_two_nonzero(stride_align));
+ assert(util_is_power_of_two_nonzero(size_align));
+
+ info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ info->wsi.blit_src = true;
+
+ const uint32_t cpp = vk_format_get_blocksize(pCreateInfo->imageFormat);
+ info->linear_stride = pCreateInfo->imageExtent.width * cpp;
+ info->linear_stride = align(info->linear_stride, stride_align);
+
+ /* Since we can pick the stride to be whatever we want, also align to the
+ * device's optimalBufferCopyRowPitchAlignment so we get efficient copies.
+ */
+ assert(wsi->optimalBufferCopyRowPitchAlignment > 0);
+ info->linear_stride = align(info->linear_stride,
+ wsi->optimalBufferCopyRowPitchAlignment);
+
+ info->linear_size = (uint64_t)info->linear_stride *
+ pCreateInfo->imageExtent.height;
+ info->linear_size = align64(info->linear_size, size_align);
+
+ info->finish_create = wsi_finish_create_blit_context;
+}
+
+void
+wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain *chain,
+ struct wsi_image_info *info)
+{
+ info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ info->wsi.blit_src = true;
+ info->finish_create = wsi_finish_create_blit_context;
+}
+
+static VkResult
+wsi_create_cpu_linear_image_mem(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result;
+
+ VkMemoryRequirements reqs;
+ wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
+
+ VkSubresourceLayout layout;
+ wsi->GetImageSubresourceLayout(chain->device, image->image,
+ &(VkImageSubresource) {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .mipLevel = 0,
+ .arrayLayer = 0,
+ }, &layout);
+ assert(layout.offset == 0);
+
+ const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
+ .image = image->image,
+ .buffer = VK_NULL_HANDLE,
+ };
+ VkMemoryAllocateInfo memory_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &memory_dedicated_info,
+ .allocationSize = reqs.size,
+ .memoryTypeIndex =
+ wsi_select_host_memory_type(wsi, reqs.memoryTypeBits),
+ };
+
+ void *sw_host_ptr = NULL;
+ if (info->alloc_shm)
+ sw_host_ptr = info->alloc_shm(image, layout.size);
+
+ VkImportMemoryHostPointerInfoEXT host_ptr_info;
+ if (sw_host_ptr != NULL) {
+ host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
+ .pHostPointer = sw_host_ptr,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
+ };
+ __vk_append_struct(&memory_info, &host_ptr_info);
+ }
+
+ result = wsi->AllocateMemory(chain->device, &memory_info,
+ &chain->alloc, &image->memory);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = wsi->MapMemory(chain->device, image->memory,
+ 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
+ if (result != VK_SUCCESS)
+ return result;
+
+ image->num_planes = 1;
+ image->sizes[0] = reqs.size;
+ image->row_pitches[0] = layout.rowPitch;
+ image->offsets[0] = 0;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ VkResult result;
+
+ result = wsi_create_buffer_blit_context(chain, info, image, 0);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = chain->wsi->MapMemory(chain->device, image->blit.memory,
+ 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
+ if (result != VK_SUCCESS)
+ return result;
+
+ return VK_SUCCESS;
+}
+
+bool
+wsi_cpu_image_needs_buffer_blit(const struct wsi_device *wsi,
+ const struct wsi_cpu_image_params *params)
+{
+ if (WSI_DEBUG & WSI_DEBUG_BUFFER)
+ return true;
+
+ if (wsi->wants_linear)
+ return false;
+
+ return true;
+}
+
+VkResult
+wsi_configure_cpu_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_cpu_image_params *params,
+ struct wsi_image_info *info)
+{
+ assert(params->base.image_type == WSI_IMAGE_TYPE_CPU);
+ assert(chain->blit.type == WSI_SWAPCHAIN_NO_BLIT ||
+ chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
+
+ VkExternalMemoryHandleTypeFlags handle_types = 0;
+ if (params->alloc_shm && chain->blit.type != WSI_SWAPCHAIN_NO_BLIT)
+ handle_types = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
+
+ VkResult result = wsi_configure_image(chain, pCreateInfo,
+ handle_types, info);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
+ wsi_configure_buffer_image(chain, pCreateInfo,
+ 1 /* stride_align */,
+ 1 /* size_align */,
+ info);
+
+ info->select_blit_dst_memory_type = wsi_select_host_memory_type;
+ info->select_image_memory_type = wsi_select_device_memory_type;
+ info->create_mem = wsi_create_cpu_buffer_image_mem;
+ } else {
+ /* Force the image to be linear */
+ info->create.tiling = VK_IMAGE_TILING_LINEAR;
+
+ info->create_mem = wsi_create_cpu_linear_image_mem;
+ }
+
+ info->alloc_shm = params->alloc_shm;
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_WaitForPresentKHR(VkDevice device, VkSwapchainKHR _swapchain,
+ uint64_t presentId, uint64_t timeout)
+{
+ VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
+ assert(swapchain->wait_for_present);
+ return swapchain->wait_for_present(swapchain, presentId, timeout);
+}
+
+VkImageUsageFlags
+wsi_caps_get_image_usage(void)
+{
+ return VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT |
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+}
+
+bool
+wsi_device_supports_explicit_sync(struct wsi_device *device)
{
- return os_time_get_nano();
+ return !device->sw && device->has_timeline_semaphore &&
+ (device->timeline_semaphore_export_handle_types &
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
}
diff --git a/src/vulkan/wsi/wsi_common.h b/src/vulkan/wsi/wsi_common.h
index 473efcc16a4..014cb718b17 100644
--- a/src/vulkan/wsi/wsi_common.h
+++ b/src/vulkan/wsi/wsi_common.h
@@ -26,10 +26,24 @@
#include <stdint.h>
#include <stdbool.h>
+#include "util/log.h"
#include "vk_alloc.h"
+#include "vk_dispatch_table.h"
#include <vulkan/vulkan.h>
#include <vulkan/vk_icd.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef WSI_ENTRYPOINTS_H
+extern const struct vk_instance_entrypoint_table wsi_instance_entrypoints;
+extern const struct vk_physical_device_entrypoint_table wsi_physical_device_entrypoints;
+extern const struct vk_device_entrypoint_table wsi_device_entrypoints;
+#endif
+
+#include <util/list.h>
+
/* This is guaranteed to not collide with anything because it's in the
* VK_KHR_swapchain namespace but not actually used by the extension.
*/
@@ -38,6 +52,11 @@
#define VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA (VkStructureType)1000001005
#define VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA (VkStructureType)1000001006
+#define VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA_cast struct wsi_image_create_info
+#define VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA_cast struct wsi_memory_allocate_info
+#define VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA_cast struct wsi_surface_supported_counters
+#define VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA_cast struct wsi_memory_signal_submit_info
+
/* This is always chained to VkImageCreateInfo when a wsi image is created.
* It indicates that the image can be transitioned to/from
* VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.
@@ -47,8 +66,8 @@ struct wsi_image_create_info {
const void *pNext;
bool scanout;
- /* if true, the image is a prime blit source */
- bool prime_blit_src;
+ /* if true, the image is a blit source */
+ bool blit_src;
};
struct wsi_memory_allocate_info {
@@ -73,20 +92,12 @@ struct wsi_memory_signal_submit_info {
VkDeviceMemory memory;
};
-struct wsi_fence {
- VkDevice device;
- const struct wsi_device *wsi_device;
- VkDisplayKHR display;
- const VkAllocationCallbacks *alloc;
- VkResult (*wait)(struct wsi_fence *fence, uint64_t abs_timeout);
- void (*destroy)(struct wsi_fence *fence);
-};
-
struct wsi_interface;
+struct vk_instance;
struct driOptionCache;
-#define VK_ICD_WSI_PLATFORM_MAX (VK_ICD_WSI_PLATFORM_DISPLAY + 1)
+#define VK_ICD_WSI_PLATFORM_MAX (VK_ICD_WSI_PLATFORM_HEADLESS + 1)
struct wsi_device {
/* Allocator for the instance */
@@ -95,11 +106,25 @@ struct wsi_device {
VkPhysicalDevice pdevice;
VkPhysicalDeviceMemoryProperties memory_props;
uint32_t queue_family_count;
+ uint64_t queue_supports_blit;
+ VkPhysicalDeviceDrmPropertiesEXT drm_info;
VkPhysicalDevicePCIBusInfoPropertiesEXT pci_bus_info;
+ VkExternalSemaphoreHandleTypeFlags semaphore_export_handle_types;
+ VkExternalSemaphoreHandleTypeFlags timeline_semaphore_export_handle_types;
+
+ bool has_import_memory_host;
+ bool has_timeline_semaphore;
+
+ /** Indicates if wsi_image_create_info::scanout is supported
+ *
+ * If false, WSI will always use either modifiers or the prime blit path.
+ */
+ bool supports_scanout;
bool supports_modifiers;
uint32_t maxImageDimension2D;
+ uint32_t optimalBufferCopyRowPitchAlignment;
VkPresentModeKHR override_present_mode;
bool force_bgra8_unorm_first;
@@ -107,6 +132,14 @@ struct wsi_device {
* available. Not all window systems might support this. */
bool enable_adaptive_sync;
+ /* List of fences to signal when hotplug event happens. */
+ struct list_head hotplug_fences;
+
+ /* Create headless swapchains. */
+ bool force_headless_swapchain;
+
+ bool force_swapchain_to_currentExtent;
+
struct {
/* Override the minimum number of images on the swapchain.
* 0 = no override */
@@ -126,25 +159,49 @@ struct wsi_device {
* true.
*/
bool xwaylandWaitReady;
+
+ /* adds an extra minImageCount when running under xwayland */
+ bool extra_xwayland_image;
+
+ /* Never report VK_SUBOPTIMAL_KHR. Used to workaround
+ * games that cannot handle SUBOPTIMAL correctly. */
+ bool ignore_suboptimal;
} x11;
+ struct {
+ void *(*get_d3d12_command_queue)(VkDevice device);
+ /* Needs to be per VkDevice, not VkPhysicalDevice, depends on queue config */
+ bool (*requires_blits)(VkDevice device);
+ VkResult (*create_image_memory)(VkDevice device, void *resource,
+ const VkAllocationCallbacks *alloc,
+ VkDeviceMemory *out);
+ } win32;
+
bool sw;
+ /* Set to true if the implementation is ok with linear WSI images. */
+ bool wants_linear;
+
/* Signals the semaphore such that any wait on the semaphore will wait on
* any reads or writes on the give memory object. This is used to
- * implement the semaphore signal operation in vkAcquireNextImage.
+ * implement the semaphore signal operation in vkAcquireNextImage. This
+ * requires the driver to implement vk_device::create_sync_for_memory.
*/
- void (*signal_semaphore_for_memory)(VkDevice device,
- VkSemaphore semaphore,
- VkDeviceMemory memory);
+ bool signal_semaphore_with_memory;
/* Signals the fence such that any wait on the fence will wait on any reads
* or writes on the give memory object. This is used to implement the
- * semaphore signal operation in vkAcquireNextImage.
+ * semaphore signal operation in vkAcquireNextImage. This requires the
+ * driver to implement vk_device::create_sync_for_memory. The resulting
+ * vk_sync must support CPU waits.
*/
- void (*signal_fence_for_memory)(VkDevice device,
- VkFence fence,
- VkDeviceMemory memory);
+ bool signal_fence_with_memory;
+
+ /* Whether present_wait functionality is enabled on the device.
+ * In this case, we have to create an extra timeline semaphore
+ * to be able to synchronize with the WSI present semaphore being unsignalled.
+ * This requires VK_KHR_timeline_semaphore. */
+ bool khr_present_wait;
/*
* This sets the ownership for a WSI memory object:
@@ -168,37 +225,50 @@ struct wsi_device {
*/
bool (*can_present_on_device)(VkPhysicalDevice pdevice, int fd);
+ /*
+ * A driver can implement this callback to return a special queue to execute
+ * buffer blits.
+ */
+ VkQueue (*get_blit_queue)(VkDevice device);
+
#define WSI_CB(cb) PFN_vk##cb cb
WSI_CB(AllocateMemory);
WSI_CB(AllocateCommandBuffers);
WSI_CB(BindBufferMemory);
WSI_CB(BindImageMemory);
WSI_CB(BeginCommandBuffer);
+ WSI_CB(CmdPipelineBarrier);
+ WSI_CB(CmdCopyImage);
WSI_CB(CmdCopyImageToBuffer);
WSI_CB(CreateBuffer);
WSI_CB(CreateCommandPool);
WSI_CB(CreateFence);
WSI_CB(CreateImage);
+ WSI_CB(CreateSemaphore);
WSI_CB(DestroyBuffer);
WSI_CB(DestroyCommandPool);
WSI_CB(DestroyFence);
WSI_CB(DestroyImage);
+ WSI_CB(DestroySemaphore);
WSI_CB(EndCommandBuffer);
WSI_CB(FreeMemory);
WSI_CB(FreeCommandBuffers);
WSI_CB(GetBufferMemoryRequirements);
+ WSI_CB(GetFenceStatus);
WSI_CB(GetImageDrmFormatModifierPropertiesEXT);
WSI_CB(GetImageMemoryRequirements);
WSI_CB(GetImageSubresourceLayout);
WSI_CB(GetMemoryFdKHR);
WSI_CB(GetPhysicalDeviceFormatProperties);
- WSI_CB(GetPhysicalDeviceFormatProperties2KHR);
+ WSI_CB(GetPhysicalDeviceFormatProperties2);
WSI_CB(GetPhysicalDeviceImageFormatProperties2);
+ WSI_CB(GetSemaphoreFdKHR);
WSI_CB(ResetFences);
WSI_CB(QueueSubmit);
WSI_CB(WaitForFences);
WSI_CB(MapMemory);
WSI_CB(UnmapMemory);
+ WSI_CB(WaitSemaphores);
#undef WSI_CB
struct wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
@@ -206,6 +276,11 @@ struct wsi_device {
typedef PFN_vkVoidFunction (VKAPI_PTR *WSI_FN_GetPhysicalDeviceProcAddr)(VkPhysicalDevice physicalDevice, const char* pName);
+struct wsi_device_options {
+ bool sw_device;
+ bool extra_xwayland_image;
+};
+
VkResult
wsi_device_init(struct wsi_device *wsi,
VkPhysicalDevice pdevice,
@@ -213,12 +288,17 @@ wsi_device_init(struct wsi_device *wsi,
const VkAllocationCallbacks *alloc,
int display_fd,
const struct driOptionCache *dri_options,
- bool sw_device);
+ const struct wsi_device_options *device_options);
void
wsi_device_finish(struct wsi_device *wsi,
const VkAllocationCallbacks *alloc);
+/* Setup file descriptor to be used with imported sync_fd's in wsi fences. */
+void
+wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
+ int fd);
+
#define ICD_DEFINE_NONDISP_HANDLE_CASTS(__VkIcdType, __VkType) \
\
static inline __VkIcdType * \
@@ -239,56 +319,13 @@ wsi_device_finish(struct wsi_device *wsi,
ICD_DEFINE_NONDISP_HANDLE_CASTS(VkIcdSurfaceBase, VkSurfaceKHR)
VkResult
-wsi_common_get_surface_support(struct wsi_device *wsi_device,
- uint32_t queueFamilyIndex,
- VkSurfaceKHR surface,
- VkBool32* pSupported);
-
-VkResult
-wsi_common_get_surface_capabilities(struct wsi_device *wsi_device,
- VkSurfaceKHR surface,
- VkSurfaceCapabilitiesKHR *pSurfaceCapabilities);
-
-VkResult
-wsi_common_get_surface_capabilities2(struct wsi_device *wsi_device,
- const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
- VkSurfaceCapabilities2KHR *pSurfaceCapabilities);
-
-VkResult
-wsi_common_get_surface_formats(struct wsi_device *wsi_device,
- VkSurfaceKHR surface,
- uint32_t *pSurfaceFormatCount,
- VkSurfaceFormatKHR *pSurfaceFormats);
-
-VkResult
-wsi_common_get_surface_formats2(struct wsi_device *wsi_device,
- const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
- uint32_t *pSurfaceFormatCount,
- VkSurfaceFormat2KHR *pSurfaceFormats);
-
-VkResult
-wsi_common_get_surface_present_modes(struct wsi_device *wsi_device,
- VkSurfaceKHR surface,
- uint32_t *pPresentModeCount,
- VkPresentModeKHR *pPresentModes);
-
-VkResult
-wsi_common_get_present_rectangles(struct wsi_device *wsi,
- VkSurfaceKHR surface,
- uint32_t* pRectCount,
- VkRect2D* pRects);
-
-VkResult
-wsi_common_get_surface_capabilities2ext(
- struct wsi_device *wsi_device,
- VkSurfaceKHR surface,
- VkSurfaceCapabilities2EXT *pSurfaceCapabilities);
-
-VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,
uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages);
+VkImage
+wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index);
+
VkResult
wsi_common_acquire_next_image2(const struct wsi_device *wsi,
VkDevice device,
@@ -296,24 +333,43 @@ wsi_common_acquire_next_image2(const struct wsi_device *wsi,
uint32_t *pImageIndex);
VkResult
-wsi_common_create_swapchain(struct wsi_device *wsi,
- VkDevice device,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSwapchainKHR *pSwapchain);
-void
-wsi_common_destroy_swapchain(VkDevice device,
- VkSwapchainKHR swapchain,
- const VkAllocationCallbacks *pAllocator);
-
-VkResult
wsi_common_queue_present(const struct wsi_device *wsi,
VkDevice device_h,
VkQueue queue_h,
int queue_family_index,
const VkPresentInfoKHR *pPresentInfo);
-uint64_t
-wsi_common_get_current_time(void);
+VkResult
+wsi_common_create_swapchain_image(const struct wsi_device *wsi,
+ const VkImageCreateInfo *pCreateInfo,
+ VkSwapchainKHR _swapchain,
+ VkImage *pImage);
+VkResult
+wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
+ VkImage vk_image,
+ VkSwapchainKHR _swapchain,
+ uint32_t image_idx);
+
+bool
+wsi_common_vk_instance_supports_present_wait(const struct vk_instance *instance);
+
+VkImageUsageFlags
+wsi_caps_get_image_usage(void);
+
+bool
+wsi_device_supports_explicit_sync(struct wsi_device *device);
+
+#define wsi_common_vk_warn_once(warning) \
+ do { \
+ static int warned = false; \
+ if (!warned) { \
+ mesa_loge(warning); \
+ warned = true; \
+ } \
+ } while (0)
+
+#ifdef __cplusplus
+}
+#endif
#endif
diff --git a/src/vulkan/wsi/wsi_common_display.c b/src/vulkan/wsi/wsi_common_display.c
index 312dfad20aa..3dc4a670cd7 100644
--- a/src/vulkan/wsi/wsi_common_display.c
+++ b/src/vulkan/wsi/wsi_common_display.c
@@ -23,6 +23,7 @@
#include "util/macros.h"
#include <stdlib.h>
#include <stdio.h>
+#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
@@ -32,6 +33,9 @@
#include <math.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
+#ifdef HAVE_LIBUDEV
+#include <libudev.h>
+#endif
#include "drm-uapi/drm_fourcc.h"
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
#include <xcb/randr.h>
@@ -39,8 +43,16 @@
#endif
#include "util/hash_table.h"
#include "util/list.h"
-
+#include "util/os_time.h"
+#include "util/timespec.h"
+
+#include "vk_device.h"
+#include "vk_fence.h"
+#include "vk_instance.h"
+#include "vk_physical_device.h"
+#include "vk_sync.h"
#include "vk_util.h"
+#include "wsi_common_entrypoints.h"
#include "wsi_common_private.h"
#include "wsi_common_display.h"
#include "wsi_common_queue.h"
@@ -92,10 +104,16 @@ struct wsi_display {
int fd;
+ /* Used with syncobj imported from driver side. */
+ int syncobj_fd;
+
pthread_mutex_t wait_mutex;
pthread_cond_t wait_cond;
pthread_t wait_thread;
+ pthread_cond_t hotplug_cond;
+ pthread_t hotplug_thread;
+
struct list_head connectors; /* list of all discovered connectors */
};
@@ -122,6 +140,7 @@ struct wsi_display_image {
uint32_t fb_id;
uint32_t buffer[4];
uint64_t flip_sequence;
+ uint64_t present_id;
};
struct wsi_display_swapchain {
@@ -130,15 +149,28 @@ struct wsi_display_swapchain {
VkIcdSurfaceDisplay *surface;
uint64_t flip_sequence;
VkResult status;
+
+ pthread_mutex_t present_id_mutex;
+ pthread_cond_t present_id_cond;
+ uint64_t present_id;
+ VkResult present_id_error;
+
struct wsi_display_image images[0];
};
struct wsi_display_fence {
- struct wsi_fence base;
+ struct list_head link;
+ struct wsi_display *wsi;
bool event_received;
bool destroyed;
uint32_t syncobj; /* syncobj to signal on event */
uint64_t sequence;
+ bool device_event; /* fence is used for device events */
+};
+
+struct wsi_display_sync {
+ struct vk_sync sync;
+ struct wsi_display_fence *fence;
};
static uint64_t fence_sequence;
@@ -174,7 +206,7 @@ wsi_display_mode_refresh(struct wsi_display_mode *wsi)
static uint64_t wsi_rel_to_abs_time(uint64_t rel_time)
{
- uint64_t current_time = wsi_common_get_current_time();
+ uint64_t current_time = os_time_get_nano();
/* check for overflow */
if (rel_time > UINT64_MAX - current_time)
@@ -184,8 +216,7 @@ static uint64_t wsi_rel_to_abs_time(uint64_t rel_time)
}
static struct wsi_display_mode *
-wsi_display_find_drm_mode(struct wsi_device *wsi_device,
- struct wsi_display_connector *connector,
+wsi_display_find_drm_mode(struct wsi_display_connector *connector,
drmModeModeInfoPtr mode)
{
wsi_for_each_display_mode(display_mode, connector) {
@@ -196,8 +227,7 @@ wsi_display_find_drm_mode(struct wsi_device *wsi_device,
}
static void
-wsi_display_invalidate_connector_modes(struct wsi_device *wsi_device,
- struct wsi_display_connector *connector)
+wsi_display_invalidate_connector_modes(struct wsi_display_connector *connector)
{
wsi_for_each_display_mode(display_mode, connector) {
display_mode->valid = false;
@@ -212,7 +242,7 @@ wsi_display_register_drm_mode(struct wsi_device *wsi_device,
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
struct wsi_display_mode *display_mode =
- wsi_display_find_drm_mode(wsi_device, connector, drm_mode);
+ wsi_display_find_drm_mode(connector, drm_mode);
if (display_mode) {
display_mode->valid = true;
@@ -270,6 +300,8 @@ wsi_display_alloc_connector(struct wsi_display *wsi,
struct wsi_display_connector *connector =
vk_zalloc(wsi->alloc, sizeof (struct wsi_display_connector),
8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ if (!connector)
+ return NULL;
connector->id = connector_id;
connector->wsi = wsi;
@@ -327,7 +359,7 @@ wsi_display_get_connector(struct wsi_device *wsi_device,
}
/* Mark all connector modes as invalid */
- wsi_display_invalidate_connector_modes(wsi_device, connector);
+ wsi_display_invalidate_connector_modes(connector);
/*
* List current modes, adding new ones and marking existing ones as
@@ -358,8 +390,7 @@ mode_size(struct wsi_display_mode *mode)
}
static void
-wsi_display_fill_in_display_properties(struct wsi_device *wsi_device,
- struct wsi_display_connector *connector,
+wsi_display_fill_in_display_properties(struct wsi_display_connector *connector,
VkDisplayProperties2KHR *properties2)
{
assert(properties2->sType == VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR);
@@ -410,22 +441,20 @@ wsi_display_fill_in_display_properties(struct wsi_device *wsi_device,
properties->persistentContent = VK_FALSE;
}
-/*
- * Implement vkGetPhysicalDeviceDisplayPropertiesKHR (VK_KHR_display)
- */
-VkResult
-wsi_display_get_physical_device_display_properties(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayPropertiesKHR *properties)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pPropertyCount,
+ VkDisplayPropertiesKHR *pProperties)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
- if (properties == NULL) {
- return wsi_display_get_physical_device_display_properties2(
- physical_device, wsi_device, property_count, NULL);
+ if (pProperties == NULL) {
+ return wsi_GetPhysicalDeviceDisplayProperties2KHR(physicalDevice,
+ pPropertyCount,
+ NULL);
} else {
/* If we're actually returning properties, allocate a temporary array of
* VkDisplayProperties2KHR structs, call properties2 to fill them out,
@@ -435,20 +464,21 @@ wsi_display_get_physical_device_display_properties(
* allocations so this should get lost in the noise.
*/
VkDisplayProperties2KHR *props2 =
- vk_zalloc(wsi->alloc, sizeof(*props2) * *property_count, 8,
+ vk_zalloc(wsi->alloc, sizeof(*props2) * *pPropertyCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (props2 == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- for (uint32_t i = 0; i < *property_count; i++)
+ for (uint32_t i = 0; i < *pPropertyCount; i++)
props2[i].sType = VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR;
- VkResult result = wsi_display_get_physical_device_display_properties2(
- physical_device, wsi_device, property_count, props2);
+ VkResult result =
+ wsi_GetPhysicalDeviceDisplayProperties2KHR(physicalDevice,
+ pPropertyCount, props2);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
- for (uint32_t i = 0; i < *property_count; i++)
- properties[i] = props2[i].displayProperties;
+ for (uint32_t i = 0; i < *pPropertyCount; i++)
+ pProperties[i] = props2[i].displayProperties;
}
vk_free(wsi->alloc, props2);
@@ -457,54 +487,68 @@ wsi_display_get_physical_device_display_properties(
}
}
-VkResult
-wsi_display_get_physical_device_display_properties2(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayProperties2KHR *properties)
+static VkResult
+wsi_get_connectors(VkPhysicalDevice physicalDevice)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
if (wsi->fd < 0)
- goto bail;
+ return VK_SUCCESS;
drmModeResPtr mode_res = drmModeGetResources(wsi->fd);
if (!mode_res)
- goto bail;
-
- VK_OUTARRAY_MAKE(conn, properties, property_count);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
/* Get current information */
-
for (int c = 0; c < mode_res->count_connectors; c++) {
struct wsi_display_connector *connector =
wsi_display_get_connector(wsi_device, wsi->fd,
mode_res->connectors[c]);
-
if (!connector) {
drmModeFreeResources(mode_res);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
+ }
+
+ drmModeFreeResources(mode_res);
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pPropertyCount,
+ VkDisplayProperties2KHR *pProperties)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
+ struct wsi_display *wsi =
+ (struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
+
+ /* Get current information */
+ VkResult result = wsi_get_connectors(physicalDevice);
+ if (result != VK_SUCCESS)
+ goto bail;
+ VK_OUTARRAY_MAKE_TYPED(VkDisplayProperties2KHR, conn,
+ pProperties, pPropertyCount);
+
+ wsi_for_each_connector(connector, wsi) {
if (connector->connected) {
- vk_outarray_append(&conn, prop) {
- wsi_display_fill_in_display_properties(wsi_device,
- connector,
- prop);
+ vk_outarray_append_typed(VkDisplayProperties2KHR, &conn, prop) {
+ wsi_display_fill_in_display_properties(connector, prop);
}
}
}
- drmModeFreeResources(mode_res);
-
return vk_outarray_status(&conn);
bail:
- *property_count = 0;
- return VK_SUCCESS;
+ *pPropertyCount = 0;
+ return result;
}
/*
@@ -512,7 +556,6 @@ bail:
*/
static void
wsi_display_fill_in_display_plane_properties(
- struct wsi_device *wsi_device,
struct wsi_display_connector *connector,
VkDisplayPlaneProperties2KHR *properties)
{
@@ -528,74 +571,91 @@ wsi_display_fill_in_display_plane_properties(
}
}
-VkResult
-wsi_display_get_physical_device_display_plane_properties(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayPlanePropertiesKHR *properties)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pPropertyCount,
+ VkDisplayPlanePropertiesKHR *pProperties)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
- VK_OUTARRAY_MAKE(conn, properties, property_count);
+ VkResult result = wsi_get_connectors(physicalDevice);
+ if (result != VK_SUCCESS)
+ goto bail;
+
+ VK_OUTARRAY_MAKE_TYPED(VkDisplayPlanePropertiesKHR, conn,
+ pProperties, pPropertyCount);
wsi_for_each_connector(connector, wsi) {
- vk_outarray_append(&conn, prop) {
+ vk_outarray_append_typed(VkDisplayPlanePropertiesKHR, &conn, prop) {
VkDisplayPlaneProperties2KHR prop2 = {
.sType = VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR,
};
- wsi_display_fill_in_display_plane_properties(wsi_device, connector,
- &prop2);
+ wsi_display_fill_in_display_plane_properties(connector, &prop2);
*prop = prop2.displayPlaneProperties;
}
}
return vk_outarray_status(&conn);
+
+bail:
+ *pPropertyCount = 0;
+ return result;
}
-VkResult
-wsi_display_get_physical_device_display_plane_properties2(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayPlaneProperties2KHR *properties)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pPropertyCount,
+ VkDisplayPlaneProperties2KHR *pProperties)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
- VK_OUTARRAY_MAKE(conn, properties, property_count);
+ /* Get current information */
+ VkResult result = wsi_get_connectors(physicalDevice);
+ if (result != VK_SUCCESS)
+ goto bail;
+
+ VK_OUTARRAY_MAKE_TYPED(VkDisplayPlaneProperties2KHR, conn,
+ pProperties, pPropertyCount);
wsi_for_each_connector(connector, wsi) {
- vk_outarray_append(&conn, prop) {
- wsi_display_fill_in_display_plane_properties(wsi_device, connector,
- prop);
+ vk_outarray_append_typed(VkDisplayPlaneProperties2KHR, &conn, prop) {
+ wsi_display_fill_in_display_plane_properties(connector, prop);
}
}
return vk_outarray_status(&conn);
+
+bail:
+ *pPropertyCount = 0;
+ return result;
}
/*
* Implement vkGetDisplayPlaneSupportedDisplaysKHR (VK_KHR_display)
*/
-VkResult
-wsi_display_get_display_plane_supported_displays(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t plane_index,
- uint32_t *display_count,
- VkDisplayKHR *displays)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice,
+ uint32_t planeIndex,
+ uint32_t *pDisplayCount,
+ VkDisplayKHR *pDisplays)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
- VK_OUTARRAY_MAKE(conn, displays, display_count);
+ VK_OUTARRAY_MAKE_TYPED(VkDisplayKHR, conn, pDisplays, pDisplayCount);
int c = 0;
wsi_for_each_connector(connector, wsi) {
- if (c == plane_index && connector->connected) {
- vk_outarray_append(&conn, display) {
+ if (c == planeIndex && connector->connected) {
+ vk_outarray_append_typed(VkDisplayKHR, &conn, display) {
*display = wsi_display_connector_to_handle(connector);
}
}
@@ -610,7 +670,6 @@ wsi_display_get_display_plane_supported_displays(
static void
wsi_display_fill_in_display_mode_properties(
- struct wsi_device *wsi_device,
struct wsi_display_mode *display_mode,
VkDisplayModeProperties2KHR *properties)
{
@@ -624,53 +683,51 @@ wsi_display_fill_in_display_mode_properties(
(uint32_t) (wsi_display_mode_refresh(display_mode) * 1000 + 0.5);
}
-VkResult
-wsi_display_get_display_mode_properties(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- uint32_t *property_count,
- VkDisplayModePropertiesKHR *properties)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ uint32_t *pPropertyCount,
+ VkDisplayModePropertiesKHR *pProperties)
{
struct wsi_display_connector *connector =
wsi_display_connector_from_handle(display);
- VK_OUTARRAY_MAKE(conn, properties, property_count);
+ VK_OUTARRAY_MAKE_TYPED(VkDisplayModePropertiesKHR, conn,
+ pProperties, pPropertyCount);
wsi_for_each_display_mode(display_mode, connector) {
if (!display_mode->valid)
continue;
- vk_outarray_append(&conn, prop) {
+ vk_outarray_append_typed(VkDisplayModePropertiesKHR, &conn, prop) {
VkDisplayModeProperties2KHR prop2 = {
.sType = VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR,
};
- wsi_display_fill_in_display_mode_properties(wsi_device,
- display_mode, &prop2);
+ wsi_display_fill_in_display_mode_properties(display_mode, &prop2);
*prop = prop2.displayModeProperties;
}
}
return vk_outarray_status(&conn);
}
-VkResult
-wsi_display_get_display_mode_properties2(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- uint32_t *property_count,
- VkDisplayModeProperties2KHR *properties)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ uint32_t *pPropertyCount,
+ VkDisplayModeProperties2KHR *pProperties)
{
struct wsi_display_connector *connector =
wsi_display_connector_from_handle(display);
- VK_OUTARRAY_MAKE(conn, properties, property_count);
+ VK_OUTARRAY_MAKE_TYPED(VkDisplayModeProperties2KHR, conn,
+ pProperties, pPropertyCount);
wsi_for_each_display_mode(display_mode, connector) {
if (!display_mode->valid)
continue;
- vk_outarray_append(&conn, prop) {
- wsi_display_fill_in_display_mode_properties(wsi_device,
- display_mode, prop);
+ vk_outarray_append_typed(VkDisplayModeProperties2KHR, &conn, prop) {
+ wsi_display_fill_in_display_mode_properties(display_mode, prop);
}
}
return vk_outarray_status(&conn);
@@ -688,18 +745,17 @@ wsi_display_mode_matches_vk(wsi_display_mode *wsi,
/*
* Implement vkCreateDisplayModeKHR (VK_KHR_display)
*/
-VkResult
-wsi_display_create_display_mode(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- const VkDisplayModeCreateInfoKHR *create_info,
- const VkAllocationCallbacks *allocator,
- VkDisplayModeKHR *mode)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateDisplayModeKHR(VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ const VkDisplayModeCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDisplayModeKHR *pMode)
{
struct wsi_display_connector *connector =
wsi_display_connector_from_handle(display);
- if (create_info->flags != 0)
+ if (pCreateInfo->flags != 0)
return VK_ERROR_INITIALIZATION_FAILED;
/* Check and see if the requested mode happens to match an existing one and
@@ -709,8 +765,8 @@ wsi_display_create_display_mode(VkPhysicalDevice physical_device,
*/
wsi_for_each_display_mode(display_mode, connector) {
if (display_mode->valid) {
- if (wsi_display_mode_matches_vk(display_mode, &create_info->parameters)) {
- *mode = wsi_display_mode_to_handle(display_mode);
+ if (wsi_display_mode_matches_vk(display_mode, &pCreateInfo->parameters)) {
+ *pMode = wsi_display_mode_to_handle(display_mode);
return VK_SUCCESS;
}
}
@@ -721,53 +777,50 @@ wsi_display_create_display_mode(VkPhysicalDevice physical_device,
/*
* Implement vkGetDisplayPlaneCapabilities
*/
-VkResult
-wsi_get_display_plane_capabilities(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayModeKHR mode_khr,
- uint32_t plane_index,
- VkDisplayPlaneCapabilitiesKHR *capabilities)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice,
+ VkDisplayModeKHR _mode,
+ uint32_t planeIndex,
+ VkDisplayPlaneCapabilitiesKHR *pCapabilities)
{
- struct wsi_display_mode *mode = wsi_display_mode_from_handle(mode_khr);
+ struct wsi_display_mode *mode = wsi_display_mode_from_handle(_mode);
/* XXX use actual values */
- capabilities->supportedAlpha = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR;
- capabilities->minSrcPosition.x = 0;
- capabilities->minSrcPosition.y = 0;
- capabilities->maxSrcPosition.x = 0;
- capabilities->maxSrcPosition.y = 0;
- capabilities->minSrcExtent.width = mode->hdisplay;
- capabilities->minSrcExtent.height = mode->vdisplay;
- capabilities->maxSrcExtent.width = mode->hdisplay;
- capabilities->maxSrcExtent.height = mode->vdisplay;
- capabilities->minDstPosition.x = 0;
- capabilities->minDstPosition.y = 0;
- capabilities->maxDstPosition.x = 0;
- capabilities->maxDstPosition.y = 0;
- capabilities->minDstExtent.width = mode->hdisplay;
- capabilities->minDstExtent.height = mode->vdisplay;
- capabilities->maxDstExtent.width = mode->hdisplay;
- capabilities->maxDstExtent.height = mode->vdisplay;
+ pCapabilities->supportedAlpha = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR;
+ pCapabilities->minSrcPosition.x = 0;
+ pCapabilities->minSrcPosition.y = 0;
+ pCapabilities->maxSrcPosition.x = 0;
+ pCapabilities->maxSrcPosition.y = 0;
+ pCapabilities->minSrcExtent.width = mode->hdisplay;
+ pCapabilities->minSrcExtent.height = mode->vdisplay;
+ pCapabilities->maxSrcExtent.width = mode->hdisplay;
+ pCapabilities->maxSrcExtent.height = mode->vdisplay;
+ pCapabilities->minDstPosition.x = 0;
+ pCapabilities->minDstPosition.y = 0;
+ pCapabilities->maxDstPosition.x = 0;
+ pCapabilities->maxDstPosition.y = 0;
+ pCapabilities->minDstExtent.width = mode->hdisplay;
+ pCapabilities->minDstExtent.height = mode->vdisplay;
+ pCapabilities->maxDstExtent.width = mode->hdisplay;
+ pCapabilities->maxDstExtent.height = mode->vdisplay;
return VK_SUCCESS;
}
-VkResult
-wsi_get_display_plane_capabilities2(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
- VkDisplayPlaneCapabilities2KHR *capabilities)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
+ const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
+ VkDisplayPlaneCapabilities2KHR *pCapabilities)
{
- assert(capabilities->sType ==
+ assert(pCapabilities->sType ==
VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR);
VkResult result =
- wsi_get_display_plane_capabilities(physical_device, wsi_device,
+ wsi_GetDisplayPlaneCapabilitiesKHR(physicalDevice,
pDisplayPlaneInfo->mode,
pDisplayPlaneInfo->planeIndex,
- &capabilities->capabilities);
+ &pCapabilities->capabilities);
- vk_foreach_struct(ext, capabilities->pNext) {
+ vk_foreach_struct(ext, pCapabilities->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
@@ -784,33 +837,37 @@ wsi_get_display_plane_capabilities2(
return result;
}
-VkResult
-wsi_create_display_surface(VkInstance instance,
- const VkAllocationCallbacks *allocator,
- const VkDisplaySurfaceCreateInfoKHR *create_info,
- VkSurfaceKHR *surface_khr)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateDisplayPlaneSurfaceKHR(VkInstance _instance,
+ const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface)
{
- VkIcdSurfaceDisplay *surface = vk_zalloc(allocator, sizeof *surface, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ VkIcdSurfaceDisplay *surface;
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR);
+
+ surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof(*surface), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (surface == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
surface->base.platform = VK_ICD_WSI_PLATFORM_DISPLAY;
- surface->displayMode = create_info->displayMode;
- surface->planeIndex = create_info->planeIndex;
- surface->planeStackIndex = create_info->planeStackIndex;
- surface->transform = create_info->transform;
- surface->globalAlpha = create_info->globalAlpha;
- surface->alphaMode = create_info->alphaMode;
- surface->imageExtent = create_info->imageExtent;
+ surface->displayMode = pCreateInfo->displayMode;
+ surface->planeIndex = pCreateInfo->planeIndex;
+ surface->planeStackIndex = pCreateInfo->planeStackIndex;
+ surface->transform = pCreateInfo->transform;
+ surface->globalAlpha = pCreateInfo->globalAlpha;
+ surface->alphaMode = pCreateInfo->alphaMode;
+ surface->imageExtent = pCreateInfo->imageExtent;
+
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
- *surface_khr = VkIcdSurfaceBase_to_handle(&surface->base);
return VK_SUCCESS;
}
-
static VkResult
wsi_display_surface_get_support(VkIcdSurfaceBase *surface,
struct wsi_device *wsi_device,
@@ -849,22 +906,19 @@ wsi_display_surface_get_capabilities(VkIcdSurfaceBase *surface_base,
caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
caps->maxImageArrayLayers = 1;
- caps->supportedUsageFlags =
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT |
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ caps->supportedUsageFlags = wsi_caps_get_image_usage();
+
+ VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
+ if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
+ caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
return VK_SUCCESS;
}
static VkResult
-wsi_display_surface_get_surface_counters(
- VkIcdSurfaceBase *surface_base,
- VkSurfaceCounterFlagsEXT *counters)
+wsi_display_surface_get_surface_counters(VkSurfaceCounterFlagsEXT *counters)
{
- *counters = VK_SURFACE_COUNTER_VBLANK_EXT;
+ *counters = VK_SURFACE_COUNTER_VBLANK_BIT_EXT;
return VK_SUCCESS;
}
@@ -884,35 +938,91 @@ wsi_display_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
struct wsi_surface_supported_counters *counters =
vk_find_struct( caps->pNext, WSI_SURFACE_SUPPORTED_COUNTERS_MESA);
+ const VkSurfacePresentModeEXT *present_mode =
+ vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
if (counters) {
- result = wsi_display_surface_get_surface_counters(
- icd_surface,
- &counters->supported_surface_counters);
+ result = wsi_display_surface_get_surface_counters(&counters->supported_surface_counters);
+ }
+
+ vk_foreach_struct(ext, caps->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
+ VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
+ protected->supportsProtected = VK_FALSE;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
+ /* Unsupported. */
+ VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
+ scaling->supportedPresentScaling = 0;
+ scaling->supportedPresentGravityX = 0;
+ scaling->supportedPresentGravityY = 0;
+ scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
+ scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
+ /* We only support FIFO. */
+ VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
+ if (compat->pPresentModes) {
+ if (compat->presentModeCount) {
+ assert(present_mode);
+ compat->pPresentModes[0] = present_mode->presentMode;
+ compat->presentModeCount = 1;
+ }
+ } else {
+ compat->presentModeCount = 1;
+ }
+ break;
+ }
+
+ default:
+ /* Ignored */
+ break;
+ }
}
return result;
}
-static const struct {
- VkFormat format;
- uint32_t drm_format;
-} available_surface_formats[] = {
- { .format = VK_FORMAT_B8G8R8A8_SRGB, .drm_format = DRM_FORMAT_XRGB8888 },
- { .format = VK_FORMAT_B8G8R8A8_UNORM, .drm_format = DRM_FORMAT_XRGB8888 },
+struct wsi_display_surface_format {
+ VkSurfaceFormatKHR surface_format;
+ uint32_t drm_format;
+};
+
+static const struct wsi_display_surface_format
+ available_surface_formats[] = {
+ {
+ .surface_format = {
+ .format = VK_FORMAT_B8G8R8A8_SRGB,
+ .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ },
+ .drm_format = DRM_FORMAT_XRGB8888
+ },
+ {
+ .surface_format = {
+ .format = VK_FORMAT_B8G8R8A8_UNORM,
+ .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ },
+ .drm_format = DRM_FORMAT_XRGB8888
+ },
};
static void
-get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
+get_sorted_vk_formats(struct wsi_device *wsi_device, VkSurfaceFormatKHR *sorted_formats)
{
for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++)
- sorted_formats[i] = available_surface_formats[i].format;
+ sorted_formats[i] = available_surface_formats[i].surface_format;
if (wsi_device->force_bgra8_unorm_first) {
for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++) {
- if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
+ if (sorted_formats[i].format == VK_FORMAT_B8G8R8A8_UNORM) {
+ VkSurfaceFormatKHR tmp = sorted_formats[i];
sorted_formats[i] = sorted_formats[0];
- sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
+ sorted_formats[0] = tmp;
break;
}
}
@@ -925,15 +1035,15 @@ wsi_display_surface_get_formats(VkIcdSurfaceBase *icd_surface,
uint32_t *surface_format_count,
VkSurfaceFormatKHR *surface_formats)
{
- VK_OUTARRAY_MAKE(out, surface_formats, surface_format_count);
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
+ surface_formats, surface_format_count);
- VkFormat sorted_formats[ARRAY_SIZE(available_surface_formats)];
+ VkSurfaceFormatKHR sorted_formats[ARRAY_SIZE(available_surface_formats)];
get_sorted_vk_formats(wsi_device, sorted_formats);
for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
- vk_outarray_append(&out, f) {
- f->format = sorted_formats[i];
- f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
+ *f = sorted_formats[i];
}
}
@@ -947,16 +1057,16 @@ wsi_display_surface_get_formats2(VkIcdSurfaceBase *surface,
uint32_t *surface_format_count,
VkSurfaceFormat2KHR *surface_formats)
{
- VK_OUTARRAY_MAKE(out, surface_formats, surface_format_count);
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
+ surface_formats, surface_format_count);
- VkFormat sorted_formats[ARRAY_SIZE(available_surface_formats)];
+ VkSurfaceFormatKHR sorted_formats[ARRAY_SIZE(available_surface_formats)];
get_sorted_vk_formats(wsi_device, sorted_formats);
for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
- vk_outarray_append(&out, f) {
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
- f->surfaceFormat.format = sorted_formats[i];
- f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ f->surfaceFormat = sorted_formats[i];
}
}
@@ -965,12 +1075,14 @@ wsi_display_surface_get_formats2(VkIcdSurfaceBase *surface,
static VkResult
wsi_display_surface_get_present_modes(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
uint32_t *present_mode_count,
VkPresentModeKHR *present_modes)
{
- VK_OUTARRAY_MAKE(conn, present_modes, present_mode_count);
+ VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, conn,
+ present_modes, present_mode_count);
- vk_outarray_append(&conn, present) {
+ vk_outarray_append_typed(VkPresentModeKHR, &conn, present) {
*present = VK_PRESENT_MODE_FIFO_KHR;
}
@@ -985,10 +1097,10 @@ wsi_display_surface_get_present_rectangles(VkIcdSurfaceBase *surface_base,
{
VkIcdSurfaceDisplay *surface = (VkIcdSurfaceDisplay *) surface_base;
wsi_display_mode *mode = wsi_display_mode_from_handle(surface->displayMode);
- VK_OUTARRAY_MAKE(out, pRects, pRectCount);
+ VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
if (wsi_device_matches_drm_fd(wsi_device, mode->connector->wsi->fd)) {
- vk_outarray_append(&out, rect) {
+ vk_outarray_append_typed(VkRect2D, &out, rect) {
*rect = (VkRect2D) {
.offset = { 0, 0 },
.extent = { mode->hdisplay, mode->vdisplay },
@@ -1008,10 +1120,8 @@ wsi_display_destroy_buffer(struct wsi_display *wsi,
}
static VkResult
-wsi_display_image_init(VkDevice device_h,
- struct wsi_swapchain *drv_chain,
+wsi_display_image_init(struct wsi_swapchain *drv_chain,
const VkSwapchainCreateInfoKHR *create_info,
- const VkAllocationCallbacks *allocator,
struct wsi_display_image *image)
{
struct wsi_display_swapchain *chain =
@@ -1020,7 +1130,8 @@ wsi_display_image_init(VkDevice device_h,
uint32_t drm_format = 0;
for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++) {
- if (create_info->imageFormat == available_surface_formats[i].format) {
+ if (create_info->imageFormat == available_surface_formats[i].surface_format.format &&
+ create_info->imageColorSpace == available_surface_formats[i].surface_format.colorSpace) {
drm_format = available_surface_formats[i].drm_format;
break;
}
@@ -1030,20 +1141,16 @@ wsi_display_image_init(VkDevice device_h,
if (drm_format == 0)
return VK_ERROR_DEVICE_LOST;
- VkResult result = wsi_create_native_image(&chain->base, create_info,
- 0, NULL, NULL, NULL,
- &image->base);
+ VkResult result = wsi_create_image(&chain->base, &chain->base.image_info,
+ &image->base);
if (result != VK_SUCCESS)
return result;
memset(image->buffer, 0, sizeof (image->buffer));
for (unsigned int i = 0; i < image->base.num_planes; i++) {
- int ret = drmPrimeFDToHandle(wsi->fd, image->base.fds[i],
+ int ret = drmPrimeFDToHandle(wsi->fd, image->base.dma_buf_fd,
&image->buffer[i]);
-
- close(image->base.fds[i]);
- image->base.fds[i] = -1;
if (ret < 0)
goto fail_handle;
}
@@ -1071,10 +1178,6 @@ fail_handle:
for (unsigned int i = 0; i < image->base.num_planes; i++) {
if (image->buffer[i])
wsi_display_destroy_buffer(wsi, image->buffer[i]);
- if (image->base.fds[i] != -1) {
- close(image->base.fds[i]);
- image->base.fds[i] = -1;
- }
}
wsi_destroy_image(&chain->base, &image->base);
@@ -1084,7 +1187,6 @@ fail_handle:
static void
wsi_display_image_finish(struct wsi_swapchain *drv_chain,
- const VkAllocationCallbacks *allocator,
struct wsi_display_image *image)
{
struct wsi_display_swapchain *chain =
@@ -1105,7 +1207,10 @@ wsi_display_swapchain_destroy(struct wsi_swapchain *drv_chain,
(struct wsi_display_swapchain *) drv_chain;
for (uint32_t i = 0; i < chain->base.image_count; i++)
- wsi_display_image_finish(drv_chain, allocator, &chain->images[i]);
+ wsi_display_image_finish(drv_chain, &chain->images[i]);
+
+ pthread_mutex_destroy(&chain->present_id_mutex);
+ pthread_cond_destroy(&chain->present_id_cond);
wsi_swapchain_finish(&chain->base);
vk_free(allocator, chain);
@@ -1142,6 +1247,30 @@ static VkResult
_wsi_display_queue_next(struct wsi_swapchain *drv_chain);
static void
+wsi_display_present_complete(struct wsi_display_swapchain *swapchain,
+ struct wsi_display_image *image)
+{
+ if (image->present_id) {
+ pthread_mutex_lock(&swapchain->present_id_mutex);
+ if (image->present_id > swapchain->present_id) {
+ swapchain->present_id = image->present_id;
+ pthread_cond_broadcast(&swapchain->present_id_cond);
+ }
+ pthread_mutex_unlock(&swapchain->present_id_mutex);
+ }
+}
+
+static void
+wsi_display_surface_error(struct wsi_display_swapchain *swapchain, VkResult result)
+{
+ pthread_mutex_lock(&swapchain->present_id_mutex);
+ swapchain->present_id = UINT64_MAX;
+ swapchain->present_id_error = result;
+ pthread_cond_broadcast(&swapchain->present_id_cond);
+ pthread_mutex_unlock(&swapchain->present_id_mutex);
+}
+
+static void
wsi_display_page_flip_handler2(int fd,
unsigned int frame,
unsigned int sec,
@@ -1155,6 +1284,8 @@ wsi_display_page_flip_handler2(int fd,
wsi_display_debug("image %ld displayed at %d\n",
image - &(image->chain->images[0]), frame);
image->state = WSI_IMAGE_DISPLAYING;
+ wsi_display_present_complete(chain, image);
+
wsi_display_idle_old_displaying(image);
VkResult result = _wsi_display_queue_next(&(chain->base));
if (result != VK_SUCCESS)
@@ -1246,6 +1377,21 @@ wsi_display_stop_wait_thread(struct wsi_display *wsi)
pthread_mutex_unlock(&wsi->wait_mutex);
}
+static int
+cond_timedwait_ns(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ uint64_t timeout_ns)
+{
+ struct timespec abs_timeout = {
+ .tv_sec = timeout_ns / 1000000000ULL,
+ .tv_nsec = timeout_ns % 1000000000ULL,
+ };
+
+ int ret = pthread_cond_timedwait(cond, mutex, &abs_timeout);
+ wsi_display_debug("%9ld done waiting for event %d\n", pthread_self(), ret);
+ return ret;
+}
+
/*
* Wait for at least one event from the kernel to be processed.
* Call with wait_mutex held
@@ -1254,23 +1400,40 @@ static int
wsi_display_wait_for_event(struct wsi_display *wsi,
uint64_t timeout_ns)
{
- int ret;
-
- ret = wsi_display_start_wait_thread(wsi);
+ int ret = wsi_display_start_wait_thread(wsi);
if (ret)
return ret;
- struct timespec abs_timeout = {
- .tv_sec = timeout_ns / 1000000000ULL,
- .tv_nsec = timeout_ns % 1000000000ULL,
- };
+ return cond_timedwait_ns(&wsi->wait_cond, &wsi->wait_mutex, timeout_ns);
+}
- ret = pthread_cond_timedwait(&wsi->wait_cond, &wsi->wait_mutex,
- &abs_timeout);
+/* Wait for device event to be processed.
+ * Call with wait_mutex held
+ */
+static int
+wsi_device_wait_for_event(struct wsi_display *wsi,
+ uint64_t timeout_ns)
+{
+ return cond_timedwait_ns(&wsi->hotplug_cond, &wsi->wait_mutex, timeout_ns);
+}
- wsi_display_debug("%9ld done waiting for event %d\n", pthread_self(), ret);
- return ret;
+static VkResult
+wsi_display_release_images(struct wsi_swapchain *drv_chain,
+ uint32_t count, const uint32_t *indices)
+{
+ struct wsi_display_swapchain *chain = (struct wsi_display_swapchain *)drv_chain;
+ if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
+ return chain->status;
+
+ for (uint32_t i = 0; i < count; i++) {
+ uint32_t index = indices[i];
+ assert(index < chain->base.image_count);
+ assert(chain->images[index].state == WSI_IMAGE_DRAWING);
+ chain->images[index].state = WSI_IMAGE_IDLE;
+ }
+
+ return VK_SUCCESS;
}
static VkResult
@@ -1314,6 +1477,7 @@ wsi_display_acquire_next_image(struct wsi_swapchain *drv_chain,
if (ret && ret != ETIMEDOUT) {
result = VK_ERROR_SURFACE_LOST_KHR;
+ wsi_display_surface_error(chain, result);
goto done;
}
}
@@ -1477,18 +1641,13 @@ bail:
}
static VkResult
-wsi_display_fence_wait(struct wsi_fence *fence_wsi, uint64_t timeout)
+wsi_display_fence_wait(struct wsi_display_fence *fence, uint64_t timeout)
{
- const struct wsi_device *wsi_device = fence_wsi->wsi_device;
- struct wsi_display *wsi =
- (struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
- struct wsi_display_fence *fence = (struct wsi_display_fence *) fence_wsi;
-
wsi_display_debug("%9lu wait fence %lu %ld\n",
pthread_self(), fence->sequence,
- (int64_t) (timeout - wsi_common_get_current_time()));
- wsi_display_debug_code(uint64_t start_ns = wsi_common_get_current_time());
- pthread_mutex_lock(&wsi->wait_mutex);
+ (int64_t) (timeout - os_time_get_nano()));
+ wsi_display_debug_code(uint64_t start_ns = os_time_get_nano());
+ pthread_mutex_lock(&fence->wsi->wait_mutex);
VkResult result;
int ret = 0;
@@ -1507,7 +1666,10 @@ wsi_display_fence_wait(struct wsi_fence *fence_wsi, uint64_t timeout)
break;
}
- ret = wsi_display_wait_for_event(wsi, timeout);
+ if (fence->device_event)
+ ret = wsi_device_wait_for_event(fence->wsi, timeout);
+ else
+ ret = wsi_display_wait_for_event(fence->wsi, timeout);
if (ret && ret != ETIMEDOUT) {
wsi_display_debug("%9lu fence %lu error\n",
@@ -1516,10 +1678,10 @@ wsi_display_fence_wait(struct wsi_fence *fence_wsi, uint64_t timeout)
break;
}
}
- pthread_mutex_unlock(&wsi->wait_mutex);
+ pthread_mutex_unlock(&fence->wsi->wait_mutex);
wsi_display_debug("%9lu fence wait %f ms\n",
pthread_self(),
- ((int64_t) (wsi_common_get_current_time() - start_ns)) /
+ ((int64_t) (os_time_get_nano() - start_ns)) /
1.0e6);
return result;
}
@@ -1528,17 +1690,14 @@ static void
wsi_display_fence_check_free(struct wsi_display_fence *fence)
{
if (fence->event_received && fence->destroyed)
- vk_free(fence->base.alloc, fence);
+ vk_free(fence->wsi->alloc, fence);
}
static void wsi_display_fence_event_handler(struct wsi_display_fence *fence)
{
- struct wsi_display *wsi =
- (struct wsi_display *) fence->base.wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
-
if (fence->syncobj) {
- (void) drmSyncobjSignal(wsi->fd, &fence->syncobj, 1);
- (void) drmSyncobjDestroy(wsi->fd, fence->syncobj);
+ (void) drmSyncobjSignal(fence->wsi->syncobj_fd, &fence->syncobj, 1);
+ (void) drmSyncobjDestroy(fence->wsi->syncobj_fd, fence->syncobj);
}
fence->event_received = true;
@@ -1546,9 +1705,15 @@ static void wsi_display_fence_event_handler(struct wsi_display_fence *fence)
}
static void
-wsi_display_fence_destroy(struct wsi_fence *fence_wsi)
+wsi_display_fence_destroy(struct wsi_display_fence *fence)
{
- struct wsi_display_fence *fence = (struct wsi_display_fence *) fence_wsi;
+ /* Destroy hotplug fence list. */
+ if (fence->device_event) {
+ pthread_mutex_lock(&fence->wsi->wait_mutex);
+ list_del(&fence->link);
+ pthread_mutex_unlock(&fence->wsi->wait_mutex);
+ fence->event_received = true;
+ }
assert(!fence->destroyed);
fence->destroyed = true;
@@ -1556,35 +1721,25 @@ wsi_display_fence_destroy(struct wsi_fence *fence_wsi)
}
static struct wsi_display_fence *
-wsi_display_fence_alloc(VkDevice device,
- const struct wsi_device *wsi_device,
- VkDisplayKHR display,
- const VkAllocationCallbacks *allocator,
- int sync_fd)
+wsi_display_fence_alloc(struct wsi_display *wsi, int sync_fd)
{
- struct wsi_display *wsi =
- (struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
struct wsi_display_fence *fence =
- vk_zalloc2(wsi->alloc, allocator, sizeof (*fence),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_zalloc(wsi->alloc, sizeof (*fence),
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!fence)
return NULL;
if (sync_fd >= 0) {
- int ret = drmSyncobjFDToHandle(wsi->fd, sync_fd, &fence->syncobj);
+ int ret = drmSyncobjFDToHandle(wsi->syncobj_fd, sync_fd, &fence->syncobj);
+
if (ret) {
- vk_free2(wsi->alloc, allocator, fence);
+ vk_free(wsi->alloc, fence);
return NULL;
}
}
- fence->base.device = device;
- fence->base.display = display;
- fence->base.wsi_device = wsi_device;
- fence->base.alloc = allocator ? allocator : wsi->alloc;
- fence->base.wait = wsi_display_fence_wait;
- fence->base.destroy = wsi_display_fence_destroy;
+ fence->wsi = wsi;
fence->event_received = false;
fence->destroyed = false;
fence->sequence = ++fence_sequence;
@@ -1592,6 +1747,69 @@ wsi_display_fence_alloc(VkDevice device,
}
static VkResult
+wsi_display_sync_init(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t initial_value)
+{
+ assert(initial_value == 0);
+ return VK_SUCCESS;
+}
+
+static void
+wsi_display_sync_finish(struct vk_device *device,
+ struct vk_sync *sync)
+{
+ struct wsi_display_sync *wsi_sync =
+ container_of(sync, struct wsi_display_sync, sync);
+ if (wsi_sync->fence)
+ wsi_display_fence_destroy(wsi_sync->fence);
+}
+
+static VkResult
+wsi_display_sync_wait(struct vk_device *device,
+ struct vk_sync *sync,
+ uint64_t wait_value,
+ enum vk_sync_wait_flags wait_flags,
+ uint64_t abs_timeout_ns)
+{
+ struct wsi_display_sync *wsi_sync =
+ container_of(sync, struct wsi_display_sync, sync);
+
+ assert(wait_value == 0);
+ assert(wait_flags == VK_SYNC_WAIT_COMPLETE);
+
+ return wsi_display_fence_wait(wsi_sync->fence, abs_timeout_ns);
+}
+
+static const struct vk_sync_type wsi_display_sync_type = {
+ .size = sizeof(struct wsi_display_sync),
+ .features = VK_SYNC_FEATURE_BINARY |
+ VK_SYNC_FEATURE_CPU_WAIT,
+ .init = wsi_display_sync_init,
+ .finish = wsi_display_sync_finish,
+ .wait = wsi_display_sync_wait,
+};
+
+static VkResult
+wsi_display_sync_create(struct vk_device *device,
+ struct wsi_display_fence *fence,
+ struct vk_sync **sync_out)
+{
+ VkResult result = vk_sync_create(device, &wsi_display_sync_type,
+ 0 /* flags */,
+ 0 /* initial_value */, sync_out);
+ if (result != VK_SUCCESS)
+ return result;
+
+ struct wsi_display_sync *sync =
+ container_of(*sync_out, struct wsi_display_sync, sync);
+
+ sync->fence = fence;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
wsi_register_vblank_event(struct wsi_display_fence *fence,
const struct wsi_device *wsi_device,
VkDisplayKHR display,
@@ -1607,6 +1825,17 @@ wsi_register_vblank_event(struct wsi_display_fence *fence,
if (wsi->fd < 0)
return VK_ERROR_INITIALIZATION_FAILED;
+ /* A display event may be registered before the first page flip at which
+ * point crtc_id will be 0. If this is the case we setup the connector
+ * here to allow drmCrtcQueueSequence to succeed.
+ */
+ if (!connector->crtc_id) {
+ VkResult ret = wsi_display_setup_connector(connector,
+ connector->current_mode);
+ if (ret != VK_SUCCESS)
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
for (;;) {
int ret = drmCrtcQueueSequence(wsi->fd, connector->crtc_id,
flags,
@@ -1662,8 +1891,10 @@ _wsi_display_queue_next(struct wsi_swapchain *drv_chain)
wsi_display_mode_from_handle(surface->displayMode);
wsi_display_connector *connector = display_mode->connector;
- if (wsi->fd < 0)
+ if (wsi->fd < 0) {
+ wsi_display_surface_error(chain, VK_ERROR_SURFACE_LOST_KHR);
return VK_ERROR_SURFACE_LOST_KHR;
+ }
if (display_mode != connector->current_mode)
connector->active = false;
@@ -1735,6 +1966,7 @@ _wsi_display_queue_next(struct wsi_swapchain *drv_chain)
* previous image is now idle.
*/
image->state = WSI_IMAGE_DISPLAYING;
+ wsi_display_present_complete(chain, image);
wsi_display_idle_old_displaying(image);
connector->active = true;
return VK_SUCCESS;
@@ -1744,6 +1976,7 @@ _wsi_display_queue_next(struct wsi_swapchain *drv_chain)
if (ret != -EACCES) {
connector->active = false;
image->state = WSI_IMAGE_IDLE;
+ wsi_display_surface_error(chain, VK_ERROR_SURFACE_LOST_KHR);
return VK_ERROR_SURFACE_LOST_KHR;
}
@@ -1758,6 +1991,7 @@ _wsi_display_queue_next(struct wsi_swapchain *drv_chain)
static VkResult
wsi_display_queue_present(struct wsi_swapchain *drv_chain,
uint32_t image_index,
+ uint64_t present_id,
const VkPresentRegionKHR *damage)
{
struct wsi_display_swapchain *chain =
@@ -1770,11 +2004,17 @@ wsi_display_queue_present(struct wsi_swapchain *drv_chain,
if (chain->status != VK_SUCCESS)
return chain->status;
+ image->present_id = present_id;
+
assert(image->state == WSI_IMAGE_DRAWING);
wsi_display_debug("present %d\n", image_index);
pthread_mutex_lock(&wsi->wait_mutex);
+ /* Make sure that the page flip handler is processed in finite time if using present wait. */
+ if (present_id)
+ wsi_display_start_wait_thread(wsi);
+
image->flip_sequence = ++chain->flip_sequence;
image->state = WSI_IMAGE_QUEUED;
@@ -1791,6 +2031,48 @@ wsi_display_queue_present(struct wsi_swapchain *drv_chain,
}
static VkResult
+wsi_display_wait_for_present(struct wsi_swapchain *wsi_chain,
+ uint64_t waitValue,
+ uint64_t timeout)
+{
+ struct wsi_display_swapchain *chain = (struct wsi_display_swapchain *)wsi_chain;
+ struct timespec abs_timespec;
+ uint64_t abs_timeout = 0;
+
+ if (timeout != 0)
+ abs_timeout = os_time_get_absolute_timeout(timeout);
+
+ /* Need to observe that the swapchain semaphore has been unsignalled,
+ * as this is guaranteed when a present is complete. */
+ VkResult result = wsi_swapchain_wait_for_present_semaphore(
+ &chain->base, waitValue, timeout);
+ if (result != VK_SUCCESS)
+ return result;
+
+ timespec_from_nsec(&abs_timespec, abs_timeout);
+
+ pthread_mutex_lock(&chain->present_id_mutex);
+ while (chain->present_id < waitValue) {
+ int ret = pthread_cond_timedwait(&chain->present_id_cond,
+ &chain->present_id_mutex,
+ &abs_timespec);
+ if (ret == ETIMEDOUT) {
+ result = VK_TIMEOUT;
+ break;
+ }
+ if (ret) {
+ result = VK_ERROR_DEVICE_LOST;
+ break;
+ }
+ }
+
+ if (result == VK_SUCCESS && chain->present_id_error)
+ result = chain->present_id_error;
+ pthread_mutex_unlock(&chain->present_id_mutex);
+ return result;
+}
+
+static VkResult
wsi_display_surface_create_swapchain(
VkIcdSurfaceBase *icd_surface,
VkDevice device,
@@ -1813,9 +2095,30 @@ wsi_display_surface_create_swapchain(
if (chain == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
+ struct wsi_drm_image_params image_params = {
+ .base.image_type = WSI_IMAGE_TYPE_DRM,
+ .same_gpu = true,
+ };
+
+ int ret = pthread_mutex_init(&chain->present_id_mutex, NULL);
+ if (ret != 0) {
+ vk_free(allocator, chain);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ bool bret = wsi_init_pthread_cond_monotonic(&chain->present_id_cond);
+ if (!bret) {
+ pthread_mutex_destroy(&chain->present_id_mutex);
+ vk_free(allocator, chain);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
VkResult result = wsi_swapchain_init(wsi_device, &chain->base, device,
- create_info, allocator);
+ create_info, &image_params.base,
+ allocator);
if (result != VK_SUCCESS) {
+ pthread_cond_destroy(&chain->present_id_cond);
+ pthread_mutex_destroy(&chain->present_id_mutex);
vk_free(allocator, chain);
return result;
}
@@ -1823,7 +2126,9 @@ wsi_display_surface_create_swapchain(
chain->base.destroy = wsi_display_swapchain_destroy;
chain->base.get_wsi_image = wsi_display_get_wsi_image;
chain->base.acquire_next_image = wsi_display_acquire_next_image;
+ chain->base.release_images = wsi_display_release_images;
chain->base.queue_present = wsi_display_queue_present;
+ chain->base.wait_for_present = wsi_display_wait_for_present;
chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, create_info);
chain->base.image_count = num_images;
@@ -1833,15 +2138,18 @@ wsi_display_surface_create_swapchain(
chain->surface = (VkIcdSurfaceDisplay *) icd_surface;
for (uint32_t image = 0; image < chain->base.image_count; image++) {
- result = wsi_display_image_init(device, &chain->base,
- create_info, allocator,
+ result = wsi_display_image_init(&chain->base,
+ create_info,
&chain->images[image]);
if (result != VK_SUCCESS) {
while (image > 0) {
--image;
- wsi_display_image_finish(&chain->base, allocator,
+ wsi_display_image_finish(&chain->base,
&chain->images[image]);
}
+ pthread_cond_destroy(&chain->present_id_cond);
+ pthread_mutex_destroy(&chain->present_id_mutex);
+ wsi_swapchain_finish(&chain->base);
vk_free(allocator, chain);
goto fail_init_images;
}
@@ -1855,31 +2163,6 @@ fail_init_images:
return result;
}
-static bool
-wsi_init_pthread_cond_monotonic(pthread_cond_t *cond)
-{
- pthread_condattr_t condattr;
- bool ret = false;
-
- if (pthread_condattr_init(&condattr) != 0)
- goto fail_attr_init;
-
- if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0)
- goto fail_attr_set;
-
- if (pthread_cond_init(cond, &condattr) != 0)
- goto fail_cond_init;
-
- ret = true;
-
-fail_cond_init:
-fail_attr_set:
- pthread_condattr_destroy(&condattr);
-fail_attr_init:
- return ret;
-}
-
-
/*
* Local version fo the libdrm helper. Added to avoid depending on bleeding
* edge version of the library.
@@ -1903,6 +2186,88 @@ local_drmIsMaster(int fd)
return drmAuthMagic(fd, 0) != -EACCES;
}
+#ifdef HAVE_LIBUDEV
+static void *
+udev_event_listener_thread(void *data)
+{
+ struct wsi_device *wsi_device = data;
+ struct wsi_display *wsi =
+ (struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
+
+ struct udev *u = udev_new();
+ if (!u)
+ goto fail;
+
+ struct udev_monitor *mon =
+ udev_monitor_new_from_netlink(u, "udev");
+ if (!mon)
+ goto fail_udev;
+
+ int ret =
+ udev_monitor_filter_add_match_subsystem_devtype(mon, "drm", "drm_minor");
+ if (ret < 0)
+ goto fail_udev_monitor;
+
+ ret = udev_monitor_enable_receiving(mon);
+ if (ret < 0)
+ goto fail_udev_monitor;
+
+ int udev_fd = udev_monitor_get_fd(mon);
+
+ pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+ for (;;) {
+ nfds_t nfds = 1;
+ struct pollfd fds[1] = {
+ {
+ .fd = udev_fd,
+ .events = POLLIN,
+ },
+ };
+
+ int ret = poll(fds, nfds, -1);
+ if (ret > 0) {
+ if (fds[0].revents & POLLIN) {
+ struct udev_device *dev = udev_monitor_receive_device(mon);
+
+ /* Ignore event if it is not a hotplug event */
+ if (!atoi(udev_device_get_property_value(dev, "HOTPLUG")))
+ continue;
+
+ /* Note, this supports both drmSyncobjWait for fence->syncobj
+ * and wsi_display_wait_for_event.
+ */
+ pthread_mutex_lock(&wsi->wait_mutex);
+ pthread_cond_broadcast(&wsi->hotplug_cond);
+ list_for_each_entry(struct wsi_display_fence, fence,
+ &wsi_device->hotplug_fences, link) {
+ if (fence->syncobj)
+ drmSyncobjSignal(wsi->syncobj_fd, &fence->syncobj, 1);
+ fence->event_received = true;
+ }
+ pthread_mutex_unlock(&wsi->wait_mutex);
+ udev_device_unref(dev);
+ }
+ } else if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ udev_monitor_unref(mon);
+ udev_unref(u);
+
+ return 0;
+
+fail_udev_monitor:
+ udev_monitor_unref(mon);
+fail_udev:
+ udev_unref(u);
+fail:
+ wsi_display_debug("critical hotplug thread error\n");
+ return 0;
+}
+#endif
+
VkResult
wsi_display_init_wsi(struct wsi_device *wsi_device,
const VkAllocationCallbacks *alloc,
@@ -1921,6 +2286,8 @@ wsi_display_init_wsi(struct wsi_device *wsi_device,
if (wsi->fd != -1 && !local_drmIsMaster(wsi->fd))
wsi->fd = -1;
+ wsi->syncobj_fd = wsi->fd;
+
wsi->alloc = alloc;
list_inithead(&wsi->connectors);
@@ -1936,6 +2303,11 @@ wsi_display_init_wsi(struct wsi_device *wsi_device,
goto fail_cond;
}
+ if (!wsi_init_pthread_cond_monotonic(&wsi->hotplug_cond)) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail_hotplug_cond;
+ }
+
wsi->base.get_support = wsi_display_surface_get_support;
wsi->base.get_capabilities2 = wsi_display_surface_get_capabilities2;
wsi->base.get_formats = wsi_display_surface_get_formats;
@@ -1948,6 +2320,8 @@ wsi_display_init_wsi(struct wsi_device *wsi_device,
return VK_SUCCESS;
+fail_hotplug_cond:
+ pthread_cond_destroy(&wsi->wait_cond);
fail_cond:
pthread_mutex_destroy(&wsi->wait_mutex);
fail_mutex:
@@ -1972,8 +2346,15 @@ wsi_display_finish_wsi(struct wsi_device *wsi_device,
}
wsi_display_stop_wait_thread(wsi);
+
+ if (wsi->hotplug_thread) {
+ pthread_cancel(wsi->hotplug_thread);
+ pthread_join(wsi->hotplug_thread, NULL);
+ }
+
pthread_mutex_destroy(&wsi->wait_mutex);
pthread_cond_destroy(&wsi->wait_cond);
+ pthread_cond_destroy(&wsi->hotplug_cond);
vk_free(alloc, wsi);
}
@@ -1982,11 +2363,12 @@ wsi_display_finish_wsi(struct wsi_device *wsi_device,
/*
* Implement vkReleaseDisplay
*/
-VkResult
-wsi_release_display(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_ReleaseDisplayEXT(VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
@@ -1997,6 +2379,8 @@ wsi_release_display(VkPhysicalDevice physical_device,
wsi->fd = -1;
}
+ wsi_display_connector_from_handle(display)->active = false;
+
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
wsi_display_connector_from_handle(display)->output = None;
#endif
@@ -2318,7 +2702,7 @@ wsi_display_get_output(struct wsi_device *wsi_device,
connector->connected =
oir->connection != XCB_RANDR_CONNECTION_DISCONNECTED;
- wsi_display_invalidate_connector_modes(wsi_device, connector);
+ wsi_display_invalidate_connector_modes(connector);
xcb_randr_mode_t *x_modes = xcb_randr_get_output_info_modes(oir);
for (int m = 0; m < oir->num_modes; m++) {
@@ -2400,12 +2784,13 @@ wsi_display_find_crtc_for_output(xcb_connection_t *connection,
return idle_crtc;
}
-VkResult
-wsi_acquire_xlib_display(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- Display *dpy,
- VkDisplayKHR display)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_AcquireXlibDisplayEXT(VkPhysicalDevice physicalDevice,
+ Display *dpy,
+ VkDisplayKHR display)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
xcb_connection_t *connection = XGetXCBConnection(dpy);
@@ -2463,33 +2848,36 @@ wsi_acquire_xlib_display(VkPhysicalDevice physical_device,
return VK_SUCCESS;
}
-VkResult
-wsi_get_randr_output_display(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetRandROutputDisplayEXT(VkPhysicalDevice physicalDevice,
Display *dpy,
- RROutput output,
- VkDisplayKHR *display)
+ RROutput rrOutput,
+ VkDisplayKHR *pDisplay)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
xcb_connection_t *connection = XGetXCBConnection(dpy);
struct wsi_display_connector *connector =
- wsi_display_get_output(wsi_device, connection, (xcb_randr_output_t) output);
+ wsi_display_get_output(wsi_device, connection,
+ (xcb_randr_output_t) rrOutput);
if (connector)
- *display = wsi_display_connector_to_handle(connector);
+ *pDisplay = wsi_display_connector_to_handle(connector);
else
- *display = VK_NULL_HANDLE;
+ *pDisplay = VK_NULL_HANDLE;
return VK_SUCCESS;
}
#endif
/* VK_EXT_display_control */
-VkResult
-wsi_display_power_control(VkDevice device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- const VkDisplayPowerInfoEXT *display_power_info)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_DisplayPowerControlEXT(VkDevice _device,
+ VkDisplayKHR display,
+ const VkDisplayPowerInfoEXT *pDisplayPowerInfo)
{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct wsi_device *wsi_device = device->physical->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
struct wsi_display_connector *connector =
@@ -2499,7 +2887,7 @@ wsi_display_power_control(VkDevice device,
if (wsi->fd < 0)
return VK_ERROR_INITIALIZATION_FAILED;
- switch (display_power_info->powerState) {
+ switch (pDisplayPowerInfo->powerState) {
case VK_DISPLAY_POWER_STATE_OFF_EXT:
mode = DRM_MODE_DPMS_OFF;
break;
@@ -2518,25 +2906,96 @@ wsi_display_power_control(VkDevice device,
}
VkResult
-wsi_register_device_event(VkDevice device,
+wsi_register_device_event(VkDevice _device,
struct wsi_device *wsi_device,
const VkDeviceEventInfoEXT *device_event_info,
const VkAllocationCallbacks *allocator,
- struct wsi_fence **fence_p,
+ struct vk_sync **sync_out,
int sync_fd)
{
- return VK_ERROR_FEATURE_NOT_PRESENT;
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct wsi_display *wsi =
+ (struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
+ VkResult ret = VK_SUCCESS;
+
+#ifdef HAVE_LIBUDEV
+ /* Start listening for output change notifications. */
+ pthread_mutex_lock(&wsi->wait_mutex);
+ if (!wsi->hotplug_thread) {
+ if (pthread_create(&wsi->hotplug_thread, NULL, udev_event_listener_thread,
+ wsi_device)) {
+ pthread_mutex_unlock(&wsi->wait_mutex);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ }
+ pthread_mutex_unlock(&wsi->wait_mutex);
+#endif
+
+ struct wsi_display_fence *fence;
+ assert(device_event_info->deviceEvent ==
+ VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT);
+
+ fence = wsi_display_fence_alloc(wsi, sync_fd);
+
+ if (!fence)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ fence->device_event = true;
+
+ pthread_mutex_lock(&wsi->wait_mutex);
+ list_addtail(&fence->link, &wsi_device->hotplug_fences);
+ pthread_mutex_unlock(&wsi->wait_mutex);
+
+ if (sync_out) {
+ ret = wsi_display_sync_create(device, fence, sync_out);
+ if (ret != VK_SUCCESS)
+ wsi_display_fence_destroy(fence);
+ } else {
+ wsi_display_fence_destroy(fence);
+ }
+
+ return ret;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_RegisterDeviceEventEXT(VkDevice _device, const VkDeviceEventInfoEXT *device_event_info,
+ const VkAllocationCallbacks *allocator, VkFence *_fence)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_fence *fence;
+ VkResult ret;
+
+ const VkFenceCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ .flags = 0,
+ };
+ ret = vk_fence_create(device, &info, allocator, &fence);
+ if (ret != VK_SUCCESS)
+ return ret;
+
+ ret = wsi_register_device_event(_device,
+ device->physical->wsi_device,
+ device_event_info,
+ allocator,
+ &fence->temporary,
+ -1);
+ if (ret == VK_SUCCESS)
+ *_fence = vk_fence_to_handle(fence);
+ else
+ vk_fence_destroy(device, fence, allocator);
+ return ret;
}
VkResult
-wsi_register_display_event(VkDevice device,
+wsi_register_display_event(VkDevice _device,
struct wsi_device *wsi_device,
VkDisplayKHR display,
const VkDisplayEventInfoEXT *display_event_info,
const VkAllocationCallbacks *allocator,
- struct wsi_fence **fence_p,
+ struct vk_sync **sync_out,
int sync_fd)
{
+ VK_FROM_HANDLE(vk_device, device, _device);
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
struct wsi_display_fence *fence;
@@ -2545,7 +3004,7 @@ wsi_register_display_event(VkDevice device,
switch (display_event_info->displayEvent) {
case VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT:
- fence = wsi_display_fence_alloc(device, wsi_device, display, allocator, sync_fd);
+ fence = wsi_display_fence_alloc(wsi, sync_fd);
if (!fence)
return VK_ERROR_OUT_OF_HOST_MEMORY;
@@ -2554,13 +3013,16 @@ wsi_register_display_event(VkDevice device,
DRM_CRTC_SEQUENCE_RELATIVE, 1, NULL);
if (ret == VK_SUCCESS) {
- if (fence_p)
- *fence_p = &fence->base;
- else
- fence->base.destroy(&fence->base);
+ if (sync_out) {
+ ret = wsi_display_sync_create(device, fence, sync_out);
+ if (ret != VK_SUCCESS)
+ wsi_display_fence_destroy(fence);
+ } else {
+ wsi_display_fence_destroy(fence);
+ }
} else if (fence != NULL) {
if (fence->syncobj)
- drmSyncobjDestroy(wsi->fd, fence->syncobj);
+ drmSyncobjDestroy(wsi->syncobj_fd, fence->syncobj);
vk_free2(wsi->alloc, allocator, fence);
}
@@ -2573,14 +3035,51 @@ wsi_register_display_event(VkDevice device,
return ret;
}
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_RegisterDisplayEventEXT(VkDevice _device, VkDisplayKHR display,
+ const VkDisplayEventInfoEXT *display_event_info,
+ const VkAllocationCallbacks *allocator, VkFence *_fence)
+{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct vk_fence *fence;
+ VkResult ret;
-VkResult
-wsi_get_swapchain_counter(VkDevice device,
- struct wsi_device *wsi_device,
- VkSwapchainKHR _swapchain,
- VkSurfaceCounterFlagBitsEXT flag_bits,
- uint64_t *value)
+ const VkFenceCreateInfo info = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ .flags = 0,
+ };
+ ret = vk_fence_create(device, &info, allocator, &fence);
+ if (ret != VK_SUCCESS)
+ return ret;
+
+ ret = wsi_register_display_event(
+ _device, device->physical->wsi_device,
+ display, display_event_info, allocator, &fence->temporary, -1);
+
+ if (ret == VK_SUCCESS)
+ *_fence = vk_fence_to_handle(fence);
+ else
+ vk_fence_destroy(device, fence, allocator);
+ return ret;
+}
+
+void
+wsi_display_setup_syncobj_fd(struct wsi_device *wsi_device,
+ int fd)
+{
+ struct wsi_display *wsi =
+ (struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
+ wsi->syncobj_fd = fd;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetSwapchainCounterEXT(VkDevice _device,
+ VkSwapchainKHR _swapchain,
+ VkSurfaceCounterFlagBitsEXT counter,
+ uint64_t *pCounterValue)
{
+ VK_FROM_HANDLE(vk_device, device, _device);
+ struct wsi_device *wsi_device = device->physical->wsi_device;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
struct wsi_display_swapchain *swapchain =
@@ -2592,67 +3091,73 @@ wsi_get_swapchain_counter(VkDevice device,
return VK_ERROR_INITIALIZATION_FAILED;
if (!connector->active) {
- *value = 0;
+ *pCounterValue = 0;
return VK_SUCCESS;
}
- int ret = drmCrtcGetSequence(wsi->fd, connector->crtc_id, value, NULL);
+ int ret = drmCrtcGetSequence(wsi->fd, connector->crtc_id,
+ pCounterValue, NULL);
if (ret)
- *value = 0;
+ *pCounterValue = 0;
return VK_SUCCESS;
}
-VkResult
-wsi_acquire_drm_display(VkPhysicalDevice pDevice,
- struct wsi_device *wsi_device,
- int drm_fd,
- VkDisplayKHR display)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_AcquireDrmDisplayEXT(VkPhysicalDevice physicalDevice,
+ int32_t drmFd,
+ VkDisplayKHR display)
{
- if (!wsi_device_matches_drm_fd(wsi_device, drm_fd))
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
+
+ if (!wsi_device_matches_drm_fd(wsi_device, drmFd))
return VK_ERROR_UNKNOWN;
struct wsi_display *wsi =
(struct wsi_display *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_DISPLAY];
/* XXX no support for mulitple leases yet */
- if (wsi->fd >= 0 || !local_drmIsMaster(drm_fd))
+ if (wsi->fd >= 0 || !local_drmIsMaster(drmFd))
return VK_ERROR_INITIALIZATION_FAILED;
struct wsi_display_connector *connector =
wsi_display_connector_from_handle(display);
drmModeConnectorPtr drm_connector =
- drmModeGetConnectorCurrent(drm_fd, connector->id);
+ drmModeGetConnectorCurrent(drmFd, connector->id);
if (!drm_connector)
return VK_ERROR_INITIALIZATION_FAILED;
drmModeFreeConnector(drm_connector);
- wsi->fd = drm_fd;
+ wsi->fd = drmFd;
return VK_SUCCESS;
}
-VkResult
-wsi_get_drm_display(VkPhysicalDevice pDevice,
- struct wsi_device *wsi_device,
- int drm_fd,
- int connector_id,
- VkDisplayKHR *display)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_GetDrmDisplayEXT(VkPhysicalDevice physicalDevice,
+ int32_t drmFd,
+ uint32_t connectorId,
+ VkDisplayKHR *pDisplay)
{
- if (!wsi_device_matches_drm_fd(wsi_device, drm_fd))
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
+
+ if (!wsi_device_matches_drm_fd(wsi_device, drmFd)) {
+ *pDisplay = VK_NULL_HANDLE;
return VK_ERROR_UNKNOWN;
+ }
struct wsi_display_connector *connector =
- wsi_display_get_connector(wsi_device, drm_fd, connector_id);
+ wsi_display_get_connector(wsi_device, drmFd, connectorId);
if (!connector) {
- *display = VK_NULL_HANDLE;
+ *pDisplay = VK_NULL_HANDLE;
return VK_ERROR_UNKNOWN;
}
- *display = wsi_display_connector_to_handle(connector);
+ *pDisplay = wsi_display_connector_to_handle(connector);
return VK_SUCCESS;
}
-
diff --git a/src/vulkan/wsi/wsi_common_display.h b/src/vulkan/wsi/wsi_common_display.h
index 3f78d00dab5..dd54b9b775f 100644
--- a/src/vulkan/wsi/wsi_common_display.h
+++ b/src/vulkan/wsi/wsi_common_display.h
@@ -27,118 +27,15 @@
#include <xf86drm.h>
#include <xf86drmMode.h>
-VkResult
-wsi_display_get_physical_device_display_properties(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayPropertiesKHR *properties);
-
-VkResult
-wsi_display_get_physical_device_display_properties2(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *pPropertyCount,
- VkDisplayProperties2KHR *pProperties);
-
-VkResult
-wsi_display_get_physical_device_display_plane_properties(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayPlanePropertiesKHR *properties);
-
-VkResult
-wsi_display_get_physical_device_display_plane_properties2(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t *property_count,
- VkDisplayPlaneProperties2KHR *properties);
-
-VkResult
-wsi_display_get_display_plane_supported_displays(
- VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- uint32_t plane_index,
- uint32_t *display_count,
- VkDisplayKHR *displays);
-
-VkResult
-wsi_display_get_display_mode_properties(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- uint32_t *property_count,
- VkDisplayModePropertiesKHR *properties);
-
-VkResult
-wsi_display_get_display_mode_properties2(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- uint32_t *property_count,
- VkDisplayModeProperties2KHR *properties);
-
-VkResult
-wsi_display_create_display_mode(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- const VkDisplayModeCreateInfoKHR *create_info,
- const VkAllocationCallbacks *allocator,
- VkDisplayModeKHR *mode);
-
-VkResult
-wsi_get_display_plane_capabilities(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayModeKHR mode_khr,
- uint32_t plane_index,
- VkDisplayPlaneCapabilitiesKHR *capabilities);
-
-VkResult
-wsi_get_display_plane_capabilities2(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
- VkDisplayPlaneCapabilities2KHR *capabilities);
-
-VkResult
-wsi_create_display_surface(VkInstance instance,
- const VkAllocationCallbacks *pAllocator,
- const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface);
-
-VkResult
-wsi_release_display(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display);
-
-
-#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
-VkResult
-wsi_acquire_xlib_display(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- Display *dpy,
- VkDisplayKHR display);
-
-VkResult
-wsi_get_randr_output_display(VkPhysicalDevice physical_device,
- struct wsi_device *wsi_device,
- Display *dpy,
- RROutput output,
- VkDisplayKHR *display);
-
-#endif /* VK_USE_PLATFORM_XLIB_XRANDR_EXT */
+struct vk_sync;
/* VK_EXT_display_control */
VkResult
-wsi_display_power_control(VkDevice device,
- struct wsi_device *wsi_device,
- VkDisplayKHR display,
- const VkDisplayPowerInfoEXT *display_power_info);
-
-VkResult
wsi_register_device_event(VkDevice device,
struct wsi_device *wsi_device,
const VkDeviceEventInfoEXT *device_event_info,
const VkAllocationCallbacks *allocator,
- struct wsi_fence **fence,
+ struct vk_sync **sync,
int sync_fd);
VkResult
@@ -147,28 +44,7 @@ wsi_register_display_event(VkDevice device,
VkDisplayKHR display,
const VkDisplayEventInfoEXT *display_event_info,
const VkAllocationCallbacks *allocator,
- struct wsi_fence **fence,
+ struct vk_sync **sync,
int sync_fd);
-VkResult
-wsi_get_swapchain_counter(VkDevice device,
- struct wsi_device *wsi_device,
- VkSwapchainKHR swapchain,
- VkSurfaceCounterFlagBitsEXT flag_bits,
- uint64_t *value);
-
-/* VK_EXT_acquire_drm_display */
-VkResult
-wsi_acquire_drm_display(VkPhysicalDevice pDevice,
- struct wsi_device *wsi_device,
- int drmFd,
- VkDisplayKHR display);
-
-VkResult
-wsi_get_drm_display(VkPhysicalDevice pDevice,
- struct wsi_device *wsi_device,
- int drmFd,
- int connectorId,
- VkDisplayKHR *display);
-
#endif
diff --git a/src/vulkan/wsi/wsi_common_drm.c b/src/vulkan/wsi/wsi_common_drm.c
index 496143c6f52..474e59f88bd 100644
--- a/src/vulkan/wsi/wsi_common_drm.c
+++ b/src/vulkan/wsi/wsi_common_drm.c
@@ -22,18 +22,423 @@
*/
#include "wsi_common_private.h"
+#include "wsi_common_drm.h"
#include "util/macros.h"
#include "util/os_file.h"
+#include "util/log.h"
#include "util/xmlconfig.h"
+#include "vk_device.h"
+#include "vk_physical_device.h"
+#include "vk_log.h"
#include "vk_util.h"
#include "drm-uapi/drm_fourcc.h"
+#include "drm-uapi/dma-buf.h"
+#include "util/libsync.h"
+#include <errno.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <xf86drm.h>
+static VkResult
+wsi_dma_buf_export_sync_file(int dma_buf_fd, int *sync_file_fd)
+{
+ /* Don't keep trying an IOCTL that doesn't exist. */
+ static bool no_dma_buf_sync_file = false;
+ if (no_dma_buf_sync_file)
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+
+ struct dma_buf_export_sync_file export = {
+ .flags = DMA_BUF_SYNC_RW,
+ .fd = -1,
+ };
+ int ret = drmIoctl(dma_buf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &export);
+ if (ret) {
+ if (errno == ENOTTY || errno == EBADF || errno == ENOSYS) {
+ no_dma_buf_sync_file = true;
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ } else {
+ mesa_loge("MESA: failed to export sync file '%s'", strerror(errno));
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ }
+
+ *sync_file_fd = export.fd;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_dma_buf_import_sync_file(int dma_buf_fd, int sync_file_fd)
+{
+ /* Don't keep trying an IOCTL that doesn't exist. */
+ static bool no_dma_buf_sync_file = false;
+ if (no_dma_buf_sync_file)
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+
+ struct dma_buf_import_sync_file import = {
+ .flags = DMA_BUF_SYNC_RW,
+ .fd = sync_file_fd,
+ };
+ int ret = drmIoctl(dma_buf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &import);
+ if (ret) {
+ if (errno == ENOTTY || errno == EBADF || errno == ENOSYS) {
+ no_dma_buf_sync_file = true;
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ } else {
+ mesa_loge("MESA: failed to import sync file '%s'", strerror(errno));
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain *chain,
+ const struct wsi_image *image)
+{
+ VkResult result;
+
+ if (!(chain->wsi->semaphore_export_handle_types &
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT))
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+
+ int sync_file_fd = -1;
+ result = wsi_dma_buf_export_sync_file(image->dma_buf_fd, &sync_file_fd);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = wsi_dma_buf_import_sync_file(image->dma_buf_fd, sync_file_fd);
+ close(sync_file_fd);
+ if (result != VK_SUCCESS)
+ return result;
+
+ /* If we got here, all our checks pass. Create the actual semaphore */
+ const VkExportSemaphoreCreateInfo export_info = {
+ .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
+ .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ };
+ const VkSemaphoreCreateInfo semaphore_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = &export_info,
+ };
+ result = chain->wsi->CreateSemaphore(chain->device, &semaphore_info,
+ &chain->alloc,
+ &chain->dma_buf_semaphore);
+ if (result != VK_SUCCESS)
+ return result;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+wsi_prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain *chain,
+ const struct wsi_image *image)
+{
+ VkResult result;
+
+ /* We cache result - 1 in the swapchain */
+ if (unlikely(chain->signal_dma_buf_from_semaphore == 0)) {
+ result = prepare_signal_dma_buf_from_semaphore(chain, image);
+ assert(result <= 0);
+ chain->signal_dma_buf_from_semaphore = (int)result - 1;
+ } else {
+ result = (VkResult)(chain->signal_dma_buf_from_semaphore + 1);
+ }
+
+ return result;
+}
+
+VkResult
+wsi_signal_dma_buf_from_semaphore(const struct wsi_swapchain *chain,
+ const struct wsi_image *image)
+{
+ VkResult result;
+
+ const VkSemaphoreGetFdInfoKHR get_fd_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
+ .semaphore = chain->dma_buf_semaphore,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ };
+ int sync_file_fd = -1;
+ result = chain->wsi->GetSemaphoreFdKHR(chain->device, &get_fd_info,
+ &sync_file_fd);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = wsi_dma_buf_import_sync_file(image->dma_buf_fd, sync_file_fd);
+ close(sync_file_fd);
+ return result;
+}
+
+static const struct vk_sync_type *
+get_sync_file_sync_type(struct vk_device *device,
+ enum vk_sync_features req_features)
+{
+ for (const struct vk_sync_type *const *t =
+ device->physical->supported_sync_types; *t; t++) {
+ if (req_features & ~(*t)->features)
+ continue;
+
+ if ((*t)->import_sync_file != NULL)
+ return *t;
+ }
+
+ return NULL;
+}
+
+VkResult
+wsi_create_sync_for_dma_buf_wait(const struct wsi_swapchain *chain,
+ const struct wsi_image *image,
+ enum vk_sync_features req_features,
+ struct vk_sync **sync_out)
+{
+ VK_FROM_HANDLE(vk_device, device, chain->device);
+ VkResult result;
+
+ const struct vk_sync_type *sync_type =
+ get_sync_file_sync_type(device, req_features);
+ if (sync_type == NULL)
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+
+ int sync_file_fd = -1;
+ result = wsi_dma_buf_export_sync_file(image->dma_buf_fd, &sync_file_fd);
+ if (result != VK_SUCCESS)
+ return result;
+
+ struct vk_sync *sync = NULL;
+ result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
+ if (result != VK_SUCCESS)
+ goto fail_close_sync_file;
+
+ result = vk_sync_import_sync_file(device, sync, sync_file_fd);
+ if (result != VK_SUCCESS)
+ goto fail_destroy_sync;
+
+ close(sync_file_fd);
+ *sync_out = sync;
+
+ return VK_SUCCESS;
+
+fail_destroy_sync:
+ vk_sync_destroy(device, sync);
+fail_close_sync_file:
+ close(sync_file_fd);
+
+ return result;
+}
+
+VkResult
+wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
+ struct wsi_image *image)
+{
+ /* Cleanup of any failures is handled by the caller in wsi_create_image
+ * calling wsi_destroy_image -> wsi_destroy_image_explicit_sync_drm. */
+
+ VK_FROM_HANDLE(vk_device, device, chain->device);
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result = VK_SUCCESS;
+ int ret = 0;
+
+ const VkExportSemaphoreCreateInfo semaphore_export_info = {
+ .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
+ /* This is a syncobj fd for any drivers using syncobj. */
+ .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
+ };
+
+ const VkSemaphoreTypeCreateInfo semaphore_type_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
+ .pNext = &semaphore_export_info,
+ .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
+ };
+
+ const VkSemaphoreCreateInfo semaphore_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = &semaphore_type_info,
+ };
+
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ result = wsi->CreateSemaphore(chain->device,
+ &semaphore_info,
+ &chain->alloc,
+ &image->explicit_sync[i].semaphore);
+ if (result != VK_SUCCESS)
+ return result;
+
+ const VkSemaphoreGetFdInfoKHR semaphore_get_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
+ .semaphore = image->explicit_sync[i].semaphore,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
+ };
+
+ result = wsi->GetSemaphoreFdKHR(chain->device, &semaphore_get_info, &image->explicit_sync[i].fd);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ ret = drmSyncobjFDToHandle(device->drm_fd, image->explicit_sync[i].fd, &image->explicit_sync[i].handle);
+ if (ret != 0)
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
+ struct wsi_image *image)
+{
+ VK_FROM_HANDLE(vk_device, device, chain->device);
+ const struct wsi_device *wsi = chain->wsi;
+
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ if (image->explicit_sync[i].handle != 0) {
+ drmSyncobjDestroy(device->drm_fd, image->explicit_sync[i].handle);
+ image->explicit_sync[i].handle = 0;
+ }
+
+ if (image->explicit_sync[i].fd >= 0) {
+ close(image->explicit_sync[i].fd);
+ image->explicit_sync[i].fd = -1;
+ }
+
+ if (image->explicit_sync[i].semaphore != VK_NULL_HANDLE) {
+ wsi->DestroySemaphore(chain->device, image->explicit_sync[i].semaphore, &chain->alloc);
+ image->explicit_sync[i].semaphore = VK_NULL_HANDLE;
+ }
+ }
+}
+
+static VkResult
+wsi_create_sync_imm(struct vk_device *device, struct vk_sync **sync_out)
+{
+ const struct vk_sync_type *sync_type =
+ get_sync_file_sync_type(device, VK_SYNC_FEATURE_CPU_WAIT);
+ struct vk_sync *sync = NULL;
+ VkResult result;
+
+ result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
+ if (result != VK_SUCCESS)
+ goto error;
+
+ result = vk_sync_signal(device, sync, 0);
+ if (result != VK_SUCCESS)
+ goto error;
+
+ *sync_out = sync;
+ goto done;
+
+error:
+ vk_sync_destroy(device, sync);
+done:
+ return result;
+}
+
+VkResult
+wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
+ const struct wsi_image *image,
+ enum vk_sync_features req_features,
+ struct vk_sync **sync_out)
+{
+ VK_FROM_HANDLE(vk_device, device, chain->device);
+ const struct vk_sync_type *sync_type =
+ get_sync_file_sync_type(device, VK_SYNC_FEATURE_CPU_WAIT);
+ VkResult result = VK_SUCCESS;
+ struct vk_sync *sync = NULL;
+ int sync_file_fds[WSI_ES_COUNT] = { -1, -1 };
+ uint32_t tmp_handles[WSI_ES_COUNT] = { 0, 0 };
+ int merged_sync_fd = -1;
+ if (sync_type == NULL)
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+
+ if (image->explicit_sync[WSI_ES_RELEASE].timeline == 0) {
+ /* Signal immediately, there is no release to forward. */
+ return wsi_create_sync_imm(device, sync_out);
+ }
+
+ /* Transfer over to a new sync file with a
+ * surrogate handle.
+ */
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ if (drmSyncobjCreate(device->drm_fd, 0, &tmp_handles[i])) {
+ result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to create temp syncobj. Errno: %d - %s", errno, strerror(errno));
+ goto fail;
+ }
+
+ if (drmSyncobjTransfer(device->drm_fd, tmp_handles[i], 0,
+ image->explicit_sync[i].handle, image->explicit_sync[i].timeline, 0)) {
+ result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to transfer syncobj. Was the timeline point materialized? Errno: %d - %s", errno, strerror(errno));
+ goto fail;
+ }
+ if (drmSyncobjExportSyncFile(device->drm_fd, tmp_handles[i], &sync_file_fds[i])) {
+ result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to export sync file. Errno: %d - %s", errno, strerror(errno));
+ goto fail;
+ }
+ }
+
+ merged_sync_fd = sync_merge("acquire merged sync", sync_file_fds[WSI_ES_ACQUIRE], sync_file_fds[WSI_ES_RELEASE]);
+ if (merged_sync_fd < 0) {
+ result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to merge acquire + release sync timelines. Errno: %d - %s", errno, strerror(errno));
+ goto fail;
+ }
+
+ result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ result = vk_sync_import_sync_file(device, sync, merged_sync_fd);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ *sync_out = sync;
+ goto done;
+
+fail:
+ if (sync)
+ vk_sync_destroy(device, sync);
+done:
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ if (tmp_handles[i])
+ drmSyncobjDestroy(device->drm_fd, tmp_handles[i]);
+ }
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ if (sync_file_fds[i] >= 0)
+ close(sync_file_fds[i]);
+ }
+ if (merged_sync_fd >= 0)
+ close(merged_sync_fd);
+ return result;
+}
+
+
+bool
+wsi_common_drm_devices_equal(int fd_a, int fd_b)
+{
+ drmDevicePtr device_a, device_b;
+ int ret;
+
+ ret = drmGetDevice2(fd_a, 0, &device_a);
+ if (ret)
+ return false;
+
+ ret = drmGetDevice2(fd_b, 0, &device_b);
+ if (ret) {
+ drmFreeDevice(&device_a);
+ return false;
+ }
+
+ bool result = drmDevicesEqual(device_a, device_b);
+
+ drmFreeDevice(&device_a);
+ drmFreeDevice(&device_b);
+
+ return result;
+}
+
bool
wsi_device_matches_drm_fd(const struct wsi_device *wsi, int drm_fd)
{
@@ -64,115 +469,49 @@ wsi_device_matches_drm_fd(const struct wsi_device *wsi, int drm_fd)
}
static uint32_t
-select_memory_type(const struct wsi_device *wsi,
- bool want_device_local,
- uint32_t type_bits)
+prime_select_buffer_memory_type(const struct wsi_device *wsi,
+ uint32_t type_bits)
{
- assert(type_bits);
-
- bool all_local = true;
- for (uint32_t i = 0; i < wsi->memory_props.memoryTypeCount; i++) {
- const VkMemoryType type = wsi->memory_props.memoryTypes[i];
- bool local = type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
- if ((type_bits & (1 << i)) && local == want_device_local)
- return i;
- all_local &= local;
- }
-
- /* ignore want_device_local when all memory types are device-local */
- if (all_local) {
- assert(!want_device_local);
- return ffs(type_bits) - 1;
- }
-
- unreachable("No memory type found");
+ return wsi_select_memory_type(wsi, 0 /* req_props */,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ type_bits);
}
-static uint32_t
-vk_format_size(VkFormat format)
+static const struct VkDrmFormatModifierPropertiesEXT *
+get_modifier_props(const struct wsi_image_info *info, uint64_t modifier)
{
- switch (format) {
- case VK_FORMAT_B8G8R8A8_UNORM:
- case VK_FORMAT_B8G8R8A8_SRGB:
- return 4;
- default:
- unreachable("Unknown WSI Format");
+ for (uint32_t i = 0; i < info->modifier_prop_count; i++) {
+ if (info->modifier_props[i].drmFormatModifier == modifier)
+ return &info->modifier_props[i];
}
+ return NULL;
}
-VkResult
-wsi_create_native_image(const struct wsi_swapchain *chain,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- uint32_t num_modifier_lists,
- const uint32_t *num_modifiers,
- const uint64_t *const *modifiers,
- uint8_t *(alloc_shm)(struct wsi_image *image, unsigned size),
- struct wsi_image *image)
+static VkResult
+wsi_create_native_image_mem(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image);
+
+static VkResult
+wsi_configure_native_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_drm_image_params *params,
+ struct wsi_image_info *info)
{
const struct wsi_device *wsi = chain->wsi;
- VkResult result;
-
- memset(image, 0, sizeof(*image));
- for (int i = 0; i < ARRAY_SIZE(image->fds); i++)
- image->fds[i] = -1;
-
- struct wsi_image_create_info image_wsi_info = {
- .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
- };
- VkImageCreateInfo image_info = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
- .pNext = &image_wsi_info,
- .flags = 0,
- .imageType = VK_IMAGE_TYPE_2D,
- .format = pCreateInfo->imageFormat,
- .extent = {
- .width = pCreateInfo->imageExtent.width,
- .height = pCreateInfo->imageExtent.height,
- .depth = 1,
- },
- .mipLevels = 1,
- .arrayLayers = 1,
- .samples = VK_SAMPLE_COUNT_1_BIT,
- .tiling = VK_IMAGE_TILING_OPTIMAL,
- .usage = pCreateInfo->imageUsage,
- .sharingMode = pCreateInfo->imageSharingMode,
- .queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount,
- .pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices,
- .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
- };
- VkImageFormatListCreateInfoKHR image_format_list;
- if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
- image_info.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
- VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR;
-
- const VkImageFormatListCreateInfoKHR *format_list =
- vk_find_struct_const(pCreateInfo->pNext,
- IMAGE_FORMAT_LIST_CREATE_INFO_KHR);
-
-#ifndef NDEBUG
- assume(format_list && format_list->viewFormatCount > 0);
- bool format_found = false;
- for (int i = 0; i < format_list->viewFormatCount; i++)
- if (pCreateInfo->imageFormat == format_list->pViewFormats[i])
- format_found = true;
- assert(format_found);
-#endif
+ VkExternalMemoryHandleTypeFlags handle_type =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- image_format_list = *format_list;
- image_format_list.pNext = NULL;
- __vk_append_struct(&image_info, &image_format_list);
- }
+ VkResult result = wsi_configure_image(chain, pCreateInfo, handle_type, info);
+ if (result != VK_SUCCESS)
+ return result;
- VkImageDrmFormatModifierListCreateInfoEXT image_modifier_list;
+ info->explicit_sync = params->explicit_sync;
- uint32_t image_modifier_count = 0, modifier_prop_count = 0;
- struct VkDrmFormatModifierPropertiesEXT *modifier_props = NULL;
- uint64_t *image_modifiers = NULL;
- if (num_modifier_lists == 0) {
+ if (params->num_modifier_lists == 0) {
/* If we don't have modifiers, fall back to the legacy "scanout" flag */
- image_wsi_info.scanout = true;
+ info->wsi.scanout = true;
} else {
/* The winsys can't request modifiers if we don't support them. */
assert(wsi->supports_modifiers);
@@ -183,33 +522,31 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2,
.pNext = &modifier_props_list,
};
- wsi->GetPhysicalDeviceFormatProperties2KHR(wsi->pdevice,
- pCreateInfo->imageFormat,
- &format_props);
+ wsi->GetPhysicalDeviceFormatProperties2(wsi->pdevice,
+ pCreateInfo->imageFormat,
+ &format_props);
assert(modifier_props_list.drmFormatModifierCount > 0);
- modifier_props = vk_alloc(&chain->alloc,
- sizeof(*modifier_props) *
- modifier_props_list.drmFormatModifierCount,
- 8,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (!modifier_props) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
- }
-
- modifier_props_list.pDrmFormatModifierProperties = modifier_props;
- wsi->GetPhysicalDeviceFormatProperties2KHR(wsi->pdevice,
- pCreateInfo->imageFormat,
- &format_props);
+ info->modifier_props =
+ vk_alloc(&chain->alloc,
+ sizeof(*info->modifier_props) *
+ modifier_props_list.drmFormatModifierCount,
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (info->modifier_props == NULL)
+ goto fail_oom;
+
+ modifier_props_list.pDrmFormatModifierProperties = info->modifier_props;
+ wsi->GetPhysicalDeviceFormatProperties2(wsi->pdevice,
+ pCreateInfo->imageFormat,
+ &format_props);
/* Call GetImageFormatProperties with every modifier and filter the list
* down to those that we know work.
*/
- modifier_prop_count = 0;
+ info->modifier_prop_count = 0;
for (uint32_t i = 0; i < modifier_props_list.drmFormatModifierCount; i++) {
VkPhysicalDeviceImageDrmFormatModifierInfoEXT mod_info = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT,
- .drmFormatModifier = modifier_props[i].drmFormatModifier,
+ .drmFormatModifier = info->modifier_props[i].drmFormatModifier,
.sharingMode = pCreateInfo->imageSharingMode,
.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount,
.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices,
@@ -220,16 +557,22 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
.type = VK_IMAGE_TYPE_2D,
.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,
.usage = pCreateInfo->imageUsage,
- .flags = image_info.flags,
+ .flags = info->create.flags,
};
- VkImageFormatListCreateInfoKHR format_list;
- if (image_info.flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
- format_list = image_format_list;
+ VkImageFormatListCreateInfo format_list;
+ if (info->create.flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
+ format_list = info->format_list;
format_list.pNext = NULL;
__vk_append_struct(&format_info, &format_list);
}
+ struct wsi_image_create_info wsi_info = (struct wsi_image_create_info) {
+ .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
+ .pNext = NULL,
+ };
+ __vk_append_struct(&format_info, &wsi_info);
+
VkImageFormatProperties2 format_props = {
.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
.pNext = NULL,
@@ -238,34 +581,30 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
result = wsi->GetPhysicalDeviceImageFormatProperties2(wsi->pdevice,
&format_info,
&format_props);
- if (result == VK_SUCCESS)
- modifier_props[modifier_prop_count++] = modifier_props[i];
+ if (result == VK_SUCCESS &&
+ pCreateInfo->imageExtent.width <= format_props.imageFormatProperties.maxExtent.width &&
+ pCreateInfo->imageExtent.height <= format_props.imageFormatProperties.maxExtent.height)
+ info->modifier_props[info->modifier_prop_count++] = info->modifier_props[i];
}
uint32_t max_modifier_count = 0;
- for (uint32_t l = 0; l < num_modifier_lists; l++)
- max_modifier_count = MAX2(max_modifier_count, num_modifiers[l]);
-
- image_modifiers = vk_alloc(&chain->alloc,
- sizeof(*image_modifiers) *
- max_modifier_count,
- 8,
- VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (!image_modifiers) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
- }
+ for (uint32_t l = 0; l < params->num_modifier_lists; l++)
+ max_modifier_count = MAX2(max_modifier_count, params->num_modifiers[l]);
+
+ uint64_t *image_modifiers =
+ vk_alloc(&chain->alloc, sizeof(*image_modifiers) * max_modifier_count,
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!image_modifiers)
+ goto fail_oom;
- image_modifier_count = 0;
- for (uint32_t l = 0; l < num_modifier_lists; l++) {
+ uint32_t image_modifier_count = 0;
+ for (uint32_t l = 0; l < params->num_modifier_lists; l++) {
/* Walk the modifier lists and construct a list of supported
* modifiers.
*/
- for (uint32_t i = 0; i < num_modifiers[l]; i++) {
- for (uint32_t j = 0; j < modifier_prop_count; j++) {
- if (modifier_props[j].drmFormatModifier == modifiers[l][i])
- image_modifiers[image_modifier_count++] = modifiers[l][i];
- }
+ for (uint32_t i = 0; i < params->num_modifiers[l]; i++) {
+ if (get_modifier_props(info, params->modifiers[l][i]))
+ image_modifiers[image_modifier_count++] = params->modifiers[l][i];
}
/* We only want to take the modifiers from the first list */
@@ -274,47 +613,64 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
}
if (image_modifier_count > 0) {
- image_modifier_list = (VkImageDrmFormatModifierListCreateInfoEXT) {
+ info->create.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ info->drm_mod_list = (VkImageDrmFormatModifierListCreateInfoEXT) {
.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
.drmFormatModifierCount = image_modifier_count,
.pDrmFormatModifiers = image_modifiers,
};
- image_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
- __vk_append_struct(&image_info, &image_modifier_list);
+ image_modifiers = NULL;
+ __vk_append_struct(&info->create, &info->drm_mod_list);
} else {
+ vk_free(&chain->alloc, image_modifiers);
/* TODO: Add a proper error here */
assert(!"Failed to find a supported modifier! This should never "
"happen because LINEAR should always be available");
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
+ goto fail_oom;
}
}
- result = wsi->CreateImage(chain->device, &image_info,
- &chain->alloc, &image->image);
- if (result != VK_SUCCESS)
- goto fail;
+ info->create_mem = wsi_create_native_image_mem;
- VkMemoryRequirements reqs;
- wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
+ return VK_SUCCESS;
- void *sw_host_ptr = NULL;
- if (alloc_shm) {
- VkSubresourceLayout layout;
+fail_oom:
+ wsi_destroy_image_info(chain, info);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+}
- wsi->GetImageSubresourceLayout(chain->device, image->image,
- &(VkImageSubresource) {
- .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .mipLevel = 0,
- .arrayLayer = 0,
- }, &layout);
- sw_host_ptr = (*alloc_shm)(image, layout.size);
- }
+static VkResult
+wsi_init_image_dmabuf_fd(const struct wsi_swapchain *chain,
+ struct wsi_image *image,
+ bool linear)
+{
+ const struct wsi_device *wsi = chain->wsi;
+ const VkMemoryGetFdInfoKHR memory_get_fd_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
+ .pNext = NULL,
+ .memory = linear ? image->blit.memory : image->memory,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ };
+
+ return wsi->GetMemoryFdKHR(chain->device, &memory_get_fd_info,
+ &image->dma_buf_fd);
+}
+
+static VkResult
+wsi_create_native_image_mem(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result;
+
+ VkMemoryRequirements reqs;
+ wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
const struct wsi_memory_allocate_info memory_wsi_info = {
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
.pNext = NULL,
- .implicit_sync = true,
+ .implicit_sync = !info->explicit_sync,
};
const VkExportMemoryAllocateInfo memory_export_info = {
.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
@@ -327,61 +683,38 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
.image = image->image,
.buffer = VK_NULL_HANDLE,
};
- const VkImportMemoryHostPointerInfoEXT host_ptr_info = {
- .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
- .pNext = &memory_dedicated_info,
- .pHostPointer = sw_host_ptr,
- };
const VkMemoryAllocateInfo memory_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
- .pNext = sw_host_ptr ? (void *)&host_ptr_info : (void *)&memory_dedicated_info,
+ .pNext = &memory_dedicated_info,
.allocationSize = reqs.size,
- .memoryTypeIndex = select_memory_type(wsi, true, reqs.memoryTypeBits),
+ .memoryTypeIndex =
+ wsi_select_device_memory_type(wsi, reqs.memoryTypeBits),
};
result = wsi->AllocateMemory(chain->device, &memory_info,
&chain->alloc, &image->memory);
if (result != VK_SUCCESS)
- goto fail;
+ return result;
- result = wsi->BindImageMemory(chain->device, image->image,
- image->memory, 0);
+ result = wsi_init_image_dmabuf_fd(chain, image, false);
if (result != VK_SUCCESS)
- goto fail;
-
- int fd = -1;
- if (!wsi->sw) {
- const VkMemoryGetFdInfoKHR memory_get_fd_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
- .pNext = NULL,
- .memory = image->memory,
- .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
+ return result;
- result = wsi->GetMemoryFdKHR(chain->device, &memory_get_fd_info, &fd);
- if (result != VK_SUCCESS)
- goto fail;
- }
-
- if (!wsi->sw && num_modifier_lists > 0) {
+ if (info->drm_mod_list.drmFormatModifierCount > 0) {
VkImageDrmFormatModifierPropertiesEXT image_mod_props = {
.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
};
result = wsi->GetImageDrmFormatModifierPropertiesEXT(chain->device,
image->image,
&image_mod_props);
- if (result != VK_SUCCESS) {
- close(fd);
- goto fail;
- }
+ if (result != VK_SUCCESS)
+ return result;
+
image->drm_modifier = image_mod_props.drmFormatModifier;
assert(image->drm_modifier != DRM_FORMAT_MOD_INVALID);
- for (uint32_t j = 0; j < modifier_prop_count; j++) {
- if (modifier_props[j].drmFormatModifier == image->drm_modifier) {
- image->num_planes = modifier_props[j].drmFormatModifierPlaneCount;
- break;
- }
- }
+ const struct VkDrmFormatModifierPropertiesEXT *mod_props =
+ get_modifier_props(info, image->drm_modifier);
+ image->num_planes = mod_props->drmFormatModifierPlaneCount;
for (uint32_t p = 0; p < image->num_planes; p++) {
const VkImageSubresource image_subresource = {
@@ -395,18 +728,6 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
image->sizes[p] = image_layout.size;
image->row_pitches[p] = image_layout.rowPitch;
image->offsets[p] = image_layout.offset;
- if (p == 0) {
- image->fds[p] = fd;
- } else {
- image->fds[p] = os_dupfd_cloexec(fd);
- if (image->fds[p] == -1) {
- for (uint32_t i = 0; i < p; i++)
- close(image->fds[i]);
-
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
- }
- }
}
} else {
const VkImageSubresource image_subresource = {
@@ -423,234 +744,252 @@ wsi_create_native_image(const struct wsi_swapchain *chain,
image->sizes[0] = reqs.size;
image->row_pitches[0] = image_layout.rowPitch;
image->offsets[0] = 0;
- image->fds[0] = fd;
}
- vk_free(&chain->alloc, modifier_props);
- vk_free(&chain->alloc, image_modifiers);
-
return VK_SUCCESS;
-
-fail:
- vk_free(&chain->alloc, modifier_props);
- vk_free(&chain->alloc, image_modifiers);
- wsi_destroy_image(chain, image);
-
- return result;
-}
-
-static inline uint32_t
-align_u32(uint32_t v, uint32_t a)
-{
- assert(a != 0 && a == (a & -a));
- return (v + a - 1) & ~(a - 1);
}
#define WSI_PRIME_LINEAR_STRIDE_ALIGN 256
-VkResult
-wsi_create_prime_image(const struct wsi_swapchain *chain,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- bool use_modifier,
- struct wsi_image *image)
+static VkResult
+wsi_create_prime_image_mem(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
{
- const struct wsi_device *wsi = chain->wsi;
- VkResult result;
+ VkResult result =
+ wsi_create_buffer_blit_context(chain, info, image,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ if (result != VK_SUCCESS)
+ return result;
- memset(image, 0, sizeof(*image));
+ result = wsi_init_image_dmabuf_fd(chain, image, true);
+ if (result != VK_SUCCESS)
+ return result;
- const uint32_t cpp = vk_format_size(pCreateInfo->imageFormat);
- const uint32_t linear_stride = align_u32(pCreateInfo->imageExtent.width * cpp,
- WSI_PRIME_LINEAR_STRIDE_ALIGN);
+ image->drm_modifier = info->prime_use_linear_modifier ?
+ DRM_FORMAT_MOD_LINEAR : DRM_FORMAT_MOD_INVALID;
- uint32_t linear_size = linear_stride * pCreateInfo->imageExtent.height;
- linear_size = align_u32(linear_size, 4096);
+ return VK_SUCCESS;
+}
- const VkExternalMemoryBufferCreateInfo prime_buffer_external_info = {
- .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
- .pNext = NULL,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- const VkBufferCreateInfo prime_buffer_info = {
- .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
- .pNext = &prime_buffer_external_info,
- .size = linear_size,
- .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
- .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
- };
- result = wsi->CreateBuffer(chain->device, &prime_buffer_info,
- &chain->alloc, &image->prime.buffer);
+static VkResult
+wsi_configure_prime_image(UNUSED const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_drm_image_params *params,
+ struct wsi_image_info *info)
+{
+ bool use_modifier = params->num_modifier_lists > 0;
+ wsi_memory_type_select_cb select_buffer_memory_type =
+ params->same_gpu ? wsi_select_device_memory_type :
+ prime_select_buffer_memory_type;
+
+ VkResult result = wsi_configure_image(chain, pCreateInfo,
+ 0 /* handle_types */, info);
if (result != VK_SUCCESS)
- goto fail;
+ return result;
- VkMemoryRequirements reqs;
- wsi->GetBufferMemoryRequirements(chain->device, image->prime.buffer, &reqs);
- assert(reqs.size <= linear_size);
+ info->explicit_sync = params->explicit_sync;
- const struct wsi_memory_allocate_info memory_wsi_info = {
- .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
- .pNext = NULL,
- .implicit_sync = true,
- };
- const VkExportMemoryAllocateInfo prime_memory_export_info = {
- .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
- .pNext = &memory_wsi_info,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- const VkMemoryDedicatedAllocateInfo prime_memory_dedicated_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
- .pNext = &prime_memory_export_info,
- .image = VK_NULL_HANDLE,
- .buffer = image->prime.buffer,
- };
- const VkMemoryAllocateInfo prime_memory_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
- .pNext = &prime_memory_dedicated_info,
- .allocationSize = linear_size,
- .memoryTypeIndex = select_memory_type(wsi, false, reqs.memoryTypeBits),
- };
- result = wsi->AllocateMemory(chain->device, &prime_memory_info,
- &chain->alloc, &image->prime.memory);
- if (result != VK_SUCCESS)
- goto fail;
+ wsi_configure_buffer_image(chain, pCreateInfo,
+ WSI_PRIME_LINEAR_STRIDE_ALIGN, 4096,
+ info);
+ info->prime_use_linear_modifier = use_modifier;
- result = wsi->BindBufferMemory(chain->device, image->prime.buffer,
- image->prime.memory, 0);
- if (result != VK_SUCCESS)
- goto fail;
+ info->create_mem = wsi_create_prime_image_mem;
+ info->select_blit_dst_memory_type = select_buffer_memory_type;
+ info->select_image_memory_type = wsi_select_device_memory_type;
- const struct wsi_image_create_info image_wsi_info = {
- .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
- .prime_blit_src = true,
- };
- const VkImageCreateInfo image_info = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
- .pNext = &image_wsi_info,
- .flags = 0,
- .imageType = VK_IMAGE_TYPE_2D,
- .format = pCreateInfo->imageFormat,
- .extent = {
- .width = pCreateInfo->imageExtent.width,
- .height = pCreateInfo->imageExtent.height,
- .depth = 1,
- },
- .mipLevels = 1,
- .arrayLayers = 1,
- .samples = VK_SAMPLE_COUNT_1_BIT,
- .tiling = VK_IMAGE_TILING_OPTIMAL,
- .usage = pCreateInfo->imageUsage | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
- .sharingMode = pCreateInfo->imageSharingMode,
- .queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount,
- .pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices,
- .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
- };
- result = wsi->CreateImage(chain->device, &image_info,
- &chain->alloc, &image->image);
- if (result != VK_SUCCESS)
- goto fail;
+ return VK_SUCCESS;
+}
- wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
+bool
+wsi_drm_image_needs_buffer_blit(const struct wsi_device *wsi,
+ const struct wsi_drm_image_params *params)
+{
+ if (!params->same_gpu)
+ return true;
- const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
- .pNext = NULL,
- .image = image->image,
- .buffer = VK_NULL_HANDLE,
- };
- const VkMemoryAllocateInfo memory_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
- .pNext = &memory_dedicated_info,
- .allocationSize = reqs.size,
- .memoryTypeIndex = select_memory_type(wsi, true, reqs.memoryTypeBits),
- };
- result = wsi->AllocateMemory(chain->device, &memory_info,
- &chain->alloc, &image->memory);
- if (result != VK_SUCCESS)
- goto fail;
+ if (params->num_modifier_lists > 0 || wsi->supports_scanout)
+ return false;
- result = wsi->BindImageMemory(chain->device, image->image,
- image->memory, 0);
- if (result != VK_SUCCESS)
- goto fail;
+ return true;
+}
- image->prime.blit_cmd_buffers =
- vk_zalloc(&chain->alloc,
- sizeof(VkCommandBuffer) * wsi->queue_family_count, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!image->prime.blit_cmd_buffers) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
- }
+VkResult
+wsi_drm_configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_drm_image_params *params,
+ struct wsi_image_info *info)
+{
+ assert(params->base.image_type == WSI_IMAGE_TYPE_DRM);
- for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
- const VkCommandBufferAllocateInfo cmd_buffer_info = {
- .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
- .pNext = NULL,
- .commandPool = chain->cmd_pools[i],
- .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
- .commandBufferCount = 1,
- };
- result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
- &image->prime.blit_cmd_buffers[i]);
- if (result != VK_SUCCESS)
- goto fail;
+ if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
+ return wsi_configure_prime_image(chain, pCreateInfo,
+ params,
+ info);
+ } else {
+ return wsi_configure_native_image(chain, pCreateInfo,
+ params,
+ info);
+ }
+}
- const VkCommandBufferBeginInfo begin_info = {
- .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
- };
- wsi->BeginCommandBuffer(image->prime.blit_cmd_buffers[i], &begin_info);
-
- struct VkBufferImageCopy buffer_image_copy = {
- .bufferOffset = 0,
- .bufferRowLength = linear_stride / cpp,
- .bufferImageHeight = 0,
- .imageSubresource = {
- .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .mipLevel = 0,
- .baseArrayLayer = 0,
- .layerCount = 1,
- },
- .imageOffset = { .x = 0, .y = 0, .z = 0 },
- .imageExtent = {
- .width = pCreateInfo->imageExtent.width,
- .height = pCreateInfo->imageExtent.height,
- .depth = 1,
- },
- };
- wsi->CmdCopyImageToBuffer(image->prime.blit_cmd_buffers[i],
- image->image,
- VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
- image->prime.buffer,
- 1, &buffer_image_copy);
+enum wsi_explicit_sync_state_flags
+{
+ WSI_ES_STATE_RELEASE_MATERIALIZED = (1u << 0),
+ WSI_ES_STATE_RELEASE_SIGNALLED = (1u << 1),
+ WSI_ES_STATE_ACQUIRE_SIGNALLED = (1u << 2),
+};
+
+/* Levels of "freeness"
+ * 0 -> Acquire Signalled + Release Signalled
+ * 1 -> Acquire Signalled + Release Materialized
+ * 2 -> Release Signalled
+ * 3 -> Release Materialized
+ */
+static const uint32_t wsi_explicit_sync_free_levels[] = {
+ (WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED),
+ (WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED),
+ (WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_RELEASE_SIGNALLED),
+ (WSI_ES_STATE_RELEASE_MATERIALIZED),
+};
- result = wsi->EndCommandBuffer(image->prime.blit_cmd_buffers[i]);
- if (result != VK_SUCCESS)
- goto fail;
+static uint32_t
+wsi_drm_image_explicit_sync_state(struct vk_device *device, struct wsi_image *image)
+{
+ if (image->explicit_sync[WSI_ES_RELEASE].timeline == 0) {
+ /* This image has never been used in a timeline.
+ * It must be free.
+ */
+ return WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED;
}
- const VkMemoryGetFdInfoKHR linear_memory_get_fd_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
- .pNext = NULL,
- .memory = image->prime.memory,
- .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ uint64_t points[WSI_ES_COUNT] = { 0 };
+ uint32_t handles[WSI_ES_COUNT] = {
+ image->explicit_sync[WSI_ES_ACQUIRE].handle,
+ image->explicit_sync[WSI_ES_RELEASE].handle
};
- int fd;
- result = wsi->GetMemoryFdKHR(chain->device, &linear_memory_get_fd_info, &fd);
- if (result != VK_SUCCESS)
- goto fail;
+ int ret = drmSyncobjQuery(device->drm_fd, handles, points, WSI_ES_COUNT);
+ if (ret)
+ return 0;
+
+ uint32_t flags = 0;
+ if (points[WSI_ES_ACQUIRE] >= image->explicit_sync[WSI_ES_ACQUIRE].timeline) {
+ flags |= WSI_ES_STATE_ACQUIRE_SIGNALLED;
+ }
- image->drm_modifier = use_modifier ? DRM_FORMAT_MOD_LINEAR : DRM_FORMAT_MOD_INVALID;
- image->num_planes = 1;
- image->sizes[0] = linear_size;
- image->row_pitches[0] = linear_stride;
- image->offsets[0] = 0;
- image->fds[0] = fd;
+ if (points[WSI_ES_RELEASE] >= image->explicit_sync[WSI_ES_RELEASE].timeline) {
+ flags |= WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED;
+ } else {
+ uint32_t first_signalled;
+ ret = drmSyncobjTimelineWait(device->drm_fd, &handles[WSI_ES_RELEASE], &image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled);
+ if (ret == 0)
+ flags |= WSI_ES_STATE_RELEASE_MATERIALIZED;
+ }
- return VK_SUCCESS;
+ return flags;
+}
-fail:
- wsi_destroy_image(chain, image);
+static uint64_t
+wsi_drm_rel_timeout_to_abs(uint64_t rel_timeout_ns)
+{
+ uint64_t cur_time_ns = os_time_get_nano();
- return result;
+ /* Syncobj timeouts are signed */
+ return rel_timeout_ns > INT64_MAX - cur_time_ns
+ ? INT64_MAX
+ : cur_time_ns + rel_timeout_ns;
}
+VkResult
+wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
+ uint32_t image_count,
+ struct wsi_image **images,
+ uint64_t rel_timeout_ns,
+ uint32_t *image_index)
+{
+#ifdef HAVE_LIBDRM
+ STACK_ARRAY(uint32_t, handles, image_count);
+ STACK_ARRAY(uint64_t, points, image_count);
+ STACK_ARRAY(uint32_t, indices, image_count);
+ STACK_ARRAY(uint32_t, flags, image_count);
+ VK_FROM_HANDLE(vk_device, device, chain->device);
+ int ret = 0;
+
+ /* We don't need to wait for the merged timeline on the CPU,
+ * only on the GPU side of things.
+ *
+ * We already know that the CPU side for the acquire has materialized,
+ * for all images in this array.
+ * That's what "busy"/"free" essentially represents.
+ */
+ uint32_t unacquired_image_count = 0;
+ for (uint32_t i = 0; i < image_count; i++) {
+ if (images[i]->acquired)
+ continue;
+
+ flags[unacquired_image_count] = wsi_drm_image_explicit_sync_state(device, images[i]);
+ handles[unacquired_image_count] = images[i]->explicit_sync[WSI_ES_RELEASE].handle;
+ points[unacquired_image_count] = images[i]->explicit_sync[WSI_ES_RELEASE].timeline;
+ indices[unacquired_image_count] = i;
+ unacquired_image_count++;
+ }
+
+ /* Handle the case where there are no images to possible acquire. */
+ if (!unacquired_image_count) {
+ ret = -ETIME;
+ goto done;
+ }
+
+ /* Find the most optimal image using the free levels above. */
+ for (uint32_t free_level_idx = 0; free_level_idx < ARRAY_SIZE(wsi_explicit_sync_free_levels); free_level_idx++) {
+ uint32_t free_level = wsi_explicit_sync_free_levels[free_level_idx];
+
+ uint64_t present_serial = UINT64_MAX;
+ for (uint32_t i = 0; i < unacquired_image_count; i++) {
+ /* Pick the image that was presented longest ago inside
+ * of this free level, so it has the highest chance of
+ * being totally free the soonest.
+ */
+ if ((flags[i] & free_level) == free_level &&
+ images[indices[i]]->present_serial < present_serial) {
+ *image_index = indices[i];
+ present_serial = images[indices[i]]->present_serial;
+ }
+ }
+ if (present_serial != UINT64_MAX)
+ goto done;
+ }
+
+ /* Use DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE so we do not need to wait for the
+ * compositor's GPU work to be finished to acquire on the CPU side.
+ *
+ * We will forward the GPU signal to the VkSemaphore/VkFence of the acquire.
+ */
+ uint32_t first_signalled;
+ ret = drmSyncobjTimelineWait(device->drm_fd, handles, points, unacquired_image_count,
+ wsi_drm_rel_timeout_to_abs(rel_timeout_ns),
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
+ &first_signalled);
+
+ /* Return the first image that materialized. */
+ if (ret != 0)
+ goto done;
+
+ *image_index = indices[first_signalled];
+done:
+ STACK_ARRAY_FINISH(flags);
+ STACK_ARRAY_FINISH(indices);
+ STACK_ARRAY_FINISH(points);
+ STACK_ARRAY_FINISH(handles);
+
+ if (ret == 0)
+ return VK_SUCCESS;
+ else if (ret == -ETIME)
+ return rel_timeout_ns ? VK_TIMEOUT : VK_NOT_READY;
+ else
+ return VK_ERROR_OUT_OF_DATE_KHR;
+#else
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+#endif
+}
diff --git a/src/vulkan/wsi/wsi_common_drm.h b/src/vulkan/wsi/wsi_common_drm.h
new file mode 100644
index 00000000000..44774afd6fa
--- /dev/null
+++ b/src/vulkan/wsi/wsi_common_drm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2021 Igalia S.L.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef WSI_COMMON_DRM_H
+#define WSI_COMMON_DRM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+wsi_common_drm_devices_equal(int fd_a, int fd_b);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* WSI_COMMON_DRM_H */
diff --git a/src/vulkan/wsi/wsi_common_headless.c b/src/vulkan/wsi/wsi_common_headless.c
new file mode 100644
index 00000000000..136a4d11209
--- /dev/null
+++ b/src/vulkan/wsi/wsi_common_headless.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright 2021 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/** VK_EXT_headless_surface */
+
+#include "util/macros.h"
+#include "util/hash_table.h"
+#include "util/timespec.h"
+#include "util/u_thread.h"
+#include "util/xmlconfig.h"
+#include "vk_util.h"
+#include "vk_enum_to_str.h"
+#include "vk_instance.h"
+#include "vk_physical_device.h"
+#include "wsi_common_entrypoints.h"
+#include "wsi_common_private.h"
+#include "wsi_common_queue.h"
+
+#include "drm-uapi/drm_fourcc.h"
+
+struct wsi_headless_format {
+ VkFormat format;
+ struct u_vector modifiers;
+};
+
+struct wsi_headless {
+ struct wsi_interface base;
+
+ struct wsi_device *wsi;
+
+ const VkAllocationCallbacks *alloc;
+ VkPhysicalDevice physical_device;
+};
+
+static VkResult
+wsi_headless_surface_get_support(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ uint32_t queueFamilyIndex,
+ VkBool32* pSupported)
+{
+ *pSupported = true;
+
+ return VK_SUCCESS;
+}
+
+static const VkPresentModeKHR present_modes[] = {
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+};
+
+static VkResult
+wsi_headless_surface_get_capabilities(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ VkSurfaceCapabilitiesKHR* caps)
+{
+ /* For true mailbox mode, we need at least 4 images:
+ * 1) One to scan out from
+ * 2) One to have queued for scan-out
+ * 3) One to be currently held by the Wayland compositor
+ * 4) One to render to
+ */
+ caps->minImageCount = 4;
+ /* There is no real maximum */
+ caps->maxImageCount = 0;
+
+ caps->currentExtent = (VkExtent2D) { -1, -1 };
+ caps->minImageExtent = (VkExtent2D) { 1, 1 };
+ caps->maxImageExtent = (VkExtent2D) {
+ wsi_device->maxImageDimension2D,
+ wsi_device->maxImageDimension2D,
+ };
+
+ caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ caps->maxImageArrayLayers = 1;
+
+ caps->supportedCompositeAlpha =
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
+
+ caps->supportedUsageFlags = wsi_caps_get_image_usage();
+
+ VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
+ if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
+ caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_headless_surface_get_capabilities2(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ const void *info_next,
+ VkSurfaceCapabilities2KHR* caps)
+{
+ assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
+
+ VkResult result =
+ wsi_headless_surface_get_capabilities(surface, wsi_device,
+ &caps->surfaceCapabilities);
+
+ vk_foreach_struct(ext, caps->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
+ VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
+ protected->supportsProtected = VK_FALSE;
+ break;
+ }
+
+ default:
+ /* Ignored */
+ break;
+ }
+ }
+
+ return result;
+}
+
+static VkResult
+wsi_headless_surface_get_formats(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormatKHR* pSurfaceFormats)
+{
+ struct wsi_headless *wsi =
+ (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
+
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out, pSurfaceFormats, pSurfaceFormatCount);
+
+ if (wsi->wsi->force_bgra8_unorm_first) {
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
+ out_fmt->format = VK_FORMAT_B8G8R8A8_UNORM;
+ out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
+ out_fmt->format = VK_FORMAT_R8G8B8A8_UNORM;
+ out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ } else {
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
+ out_fmt->format = VK_FORMAT_R8G8B8A8_UNORM;
+ out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
+ out_fmt->format = VK_FORMAT_B8G8R8A8_UNORM;
+ out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static VkResult
+wsi_headless_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
+ const void *info_next,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormat2KHR* pSurfaceFormats)
+{
+ struct wsi_headless *wsi =
+ (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
+
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out, pSurfaceFormats, pSurfaceFormatCount);
+
+ if (wsi->wsi->force_bgra8_unorm_first) {
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
+ out_fmt->surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
+ out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
+ out_fmt->surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
+ out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ } else {
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
+ out_fmt->surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
+ out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
+ out_fmt->surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
+ out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static VkResult
+wsi_headless_surface_get_present_modes(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pPresentModeCount,
+ VkPresentModeKHR* pPresentModes)
+{
+ if (pPresentModes == NULL) {
+ *pPresentModeCount = ARRAY_SIZE(present_modes);
+ return VK_SUCCESS;
+ }
+
+ *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
+ typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
+
+ if (*pPresentModeCount < ARRAY_SIZE(present_modes))
+ return VK_INCOMPLETE;
+ else
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_headless_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pRectCount,
+ VkRect2D* pRects)
+{
+ VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
+
+ vk_outarray_append_typed(VkRect2D, &out, rect) {
+ /* We don't know a size so just return the usual "I don't know." */
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { UINT32_MAX, UINT32_MAX },
+ };
+ }
+
+ return vk_outarray_status(&out);
+}
+
+struct wsi_headless_image {
+ struct wsi_image base;
+ bool busy;
+};
+
+struct wsi_headless_swapchain {
+ struct wsi_swapchain base;
+
+ VkExtent2D extent;
+ VkFormat vk_format;
+
+ struct u_vector modifiers;
+
+ VkPresentModeKHR present_mode;
+ bool fifo_ready;
+
+ struct wsi_headless_image images[0];
+};
+VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_headless_swapchain, base.base, VkSwapchainKHR,
+ VK_OBJECT_TYPE_SWAPCHAIN_KHR)
+
+static struct wsi_image *
+wsi_headless_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
+ uint32_t image_index)
+{
+ struct wsi_headless_swapchain *chain =
+ (struct wsi_headless_swapchain *)wsi_chain;
+ return &chain->images[image_index].base;
+}
+
+static VkResult
+wsi_headless_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
+ const VkAcquireNextImageInfoKHR *info,
+ uint32_t *image_index)
+{
+ struct wsi_headless_swapchain *chain =
+ (struct wsi_headless_swapchain *)wsi_chain;
+ struct timespec start_time, end_time;
+ struct timespec rel_timeout;
+
+ timespec_from_nsec(&rel_timeout, info->timeout);
+
+ clock_gettime(CLOCK_MONOTONIC, &start_time);
+ timespec_add(&end_time, &rel_timeout, &start_time);
+
+ while (1) {
+ /* Try to find a free image. */
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ if (!chain->images[i].busy) {
+ /* We found a non-busy image */
+ *image_index = i;
+ chain->images[i].busy = true;
+ return VK_SUCCESS;
+ }
+ }
+
+ /* Check for timeout. */
+ struct timespec current_time;
+ clock_gettime(CLOCK_MONOTONIC, &current_time);
+ if (timespec_after(&current_time, &end_time))
+ return VK_NOT_READY;
+ }
+}
+
+static VkResult
+wsi_headless_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+ uint32_t image_index,
+ uint64_t present_id,
+ const VkPresentRegionKHR *damage)
+{
+ struct wsi_headless_swapchain *chain =
+ (struct wsi_headless_swapchain *)wsi_chain;
+
+ assert(image_index < chain->base.image_count);
+
+ chain->images[image_index].busy = false;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_headless_swapchain_destroy(struct wsi_swapchain *wsi_chain,
+ const VkAllocationCallbacks *pAllocator)
+{
+ struct wsi_headless_swapchain *chain =
+ (struct wsi_headless_swapchain *)wsi_chain;
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ if (chain->images[i].base.image != VK_NULL_HANDLE)
+ wsi_destroy_image(&chain->base, &chain->images[i].base);
+ }
+
+ u_vector_finish(&chain->modifiers);
+
+ wsi_swapchain_finish(&chain->base);
+
+ vk_free(pAllocator, chain);
+
+ return VK_SUCCESS;
+}
+
+static const struct VkDrmFormatModifierPropertiesEXT *
+get_modifier_props(const struct wsi_image_info *info, uint64_t modifier)
+{
+ for (uint32_t i = 0; i < info->modifier_prop_count; i++) {
+ if (info->modifier_props[i].drmFormatModifier == modifier)
+ return &info->modifier_props[i];
+ }
+ return NULL;
+}
+
+static VkResult
+wsi_create_null_image_mem(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ const struct wsi_device *wsi = chain->wsi;
+ VkResult result;
+
+ VkMemoryRequirements reqs;
+ wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
+
+ const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
+ .pNext = NULL,
+ .image = image->image,
+ .buffer = VK_NULL_HANDLE,
+ };
+ const VkMemoryAllocateInfo memory_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &memory_dedicated_info,
+ .allocationSize = reqs.size,
+ .memoryTypeIndex =
+ wsi_select_device_memory_type(wsi, reqs.memoryTypeBits),
+ };
+ result = wsi->AllocateMemory(chain->device, &memory_info,
+ &chain->alloc, &image->memory);
+ if (result != VK_SUCCESS)
+ return result;
+
+ image->dma_buf_fd = -1;
+
+ if (info->drm_mod_list.drmFormatModifierCount > 0) {
+ VkImageDrmFormatModifierPropertiesEXT image_mod_props = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
+ };
+ result = wsi->GetImageDrmFormatModifierPropertiesEXT(chain->device,
+ image->image,
+ &image_mod_props);
+ if (result != VK_SUCCESS)
+ return result;
+
+ image->drm_modifier = image_mod_props.drmFormatModifier;
+ assert(image->drm_modifier != DRM_FORMAT_MOD_INVALID);
+
+ const struct VkDrmFormatModifierPropertiesEXT *mod_props =
+ get_modifier_props(info, image->drm_modifier);
+ image->num_planes = mod_props->drmFormatModifierPlaneCount;
+
+ for (uint32_t p = 0; p < image->num_planes; p++) {
+ const VkImageSubresource image_subresource = {
+ .aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT << p,
+ .mipLevel = 0,
+ .arrayLayer = 0,
+ };
+ VkSubresourceLayout image_layout;
+ wsi->GetImageSubresourceLayout(chain->device, image->image,
+ &image_subresource, &image_layout);
+ image->sizes[p] = image_layout.size;
+ image->row_pitches[p] = image_layout.rowPitch;
+ image->offsets[p] = image_layout.offset;
+ }
+ } else {
+ const VkImageSubresource image_subresource = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .mipLevel = 0,
+ .arrayLayer = 0,
+ };
+ VkSubresourceLayout image_layout;
+ wsi->GetImageSubresourceLayout(chain->device, image->image,
+ &image_subresource, &image_layout);
+
+ image->drm_modifier = DRM_FORMAT_MOD_INVALID;
+ image->num_planes = 1;
+ image->sizes[0] = reqs.size;
+ image->row_pitches[0] = image_layout.rowPitch;
+ image->offsets[0] = 0;
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_headless_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ VkDevice device,
+ struct wsi_device *wsi_device,
+ const VkSwapchainCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ struct wsi_swapchain **swapchain_out)
+{
+ struct wsi_headless_swapchain *chain;
+ VkResult result;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+
+ int num_images = pCreateInfo->minImageCount;
+
+ size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
+ chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (chain == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ struct wsi_drm_image_params drm_params = {
+ .base.image_type = WSI_IMAGE_TYPE_DRM,
+ .same_gpu = true,
+ };
+
+ result = wsi_swapchain_init(wsi_device, &chain->base, device,
+ pCreateInfo, &drm_params.base, pAllocator);
+ if (result != VK_SUCCESS) {
+ vk_free(pAllocator, chain);
+ return result;
+ }
+
+ chain->base.destroy = wsi_headless_swapchain_destroy;
+ chain->base.get_wsi_image = wsi_headless_swapchain_get_wsi_image;
+ chain->base.acquire_next_image = wsi_headless_swapchain_acquire_next_image;
+ chain->base.queue_present = wsi_headless_swapchain_queue_present;
+ chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
+ chain->base.image_count = num_images;
+ chain->extent = pCreateInfo->imageExtent;
+ chain->vk_format = pCreateInfo->imageFormat;
+
+ result = wsi_configure_image(&chain->base, pCreateInfo,
+ 0, &chain->base.image_info);
+ if (result != VK_SUCCESS) {
+ goto fail;
+ }
+ chain->base.image_info.create_mem = wsi_create_null_image_mem;
+
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ result = wsi_create_image(&chain->base, &chain->base.image_info,
+ &chain->images[i].base);
+ if (result != VK_SUCCESS)
+ return result;
+
+ chain->images[i].busy = false;
+ }
+
+ *swapchain_out = &chain->base;
+
+ return VK_SUCCESS;
+
+fail:
+ wsi_headless_swapchain_destroy(&chain->base, pAllocator);
+
+ return result;
+}
+
+VkResult
+wsi_headless_init_wsi(struct wsi_device *wsi_device,
+ const VkAllocationCallbacks *alloc,
+ VkPhysicalDevice physical_device)
+{
+ struct wsi_headless *wsi;
+ VkResult result;
+
+ wsi = vk_alloc(alloc, sizeof(*wsi), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ if (!wsi) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail;
+ }
+
+ wsi->physical_device = physical_device;
+ wsi->alloc = alloc;
+ wsi->wsi = wsi_device;
+
+ wsi->base.get_support = wsi_headless_surface_get_support;
+ wsi->base.get_capabilities2 = wsi_headless_surface_get_capabilities2;
+ wsi->base.get_formats = wsi_headless_surface_get_formats;
+ wsi->base.get_formats2 = wsi_headless_surface_get_formats2;
+ wsi->base.get_present_modes = wsi_headless_surface_get_present_modes;
+ wsi->base.get_present_rectangles = wsi_headless_surface_get_present_rectangles;
+ wsi->base.create_swapchain = wsi_headless_surface_create_swapchain;
+
+ wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] = &wsi->base;
+
+ return VK_SUCCESS;
+
+fail:
+ wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] = NULL;
+
+ return result;
+}
+
+void
+wsi_headless_finish_wsi(struct wsi_device *wsi_device,
+ const VkAllocationCallbacks *alloc)
+{
+ struct wsi_headless *wsi =
+ (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
+ if (!wsi)
+ return;
+
+ vk_free(alloc, wsi);
+}
+
+VkResult wsi_CreateHeadlessSurfaceEXT(
+ VkInstance _instance,
+ const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ VkIcdSurfaceHeadless *surface;
+
+ surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (surface == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ surface->base.platform = VK_ICD_WSI_PLATFORM_HEADLESS;
+
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
+ return VK_SUCCESS;
+}
diff --git a/src/vulkan/wsi/wsi_common_private.h b/src/vulkan/wsi/wsi_common_private.h
index f08002abfaf..7767f7b8431 100644
--- a/src/vulkan/wsi/wsi_common_private.h
+++ b/src/vulkan/wsi/wsi_common_private.h
@@ -24,7 +24,117 @@
#define WSI_COMMON_PRIVATE_H
#include "wsi_common.h"
-#include "vulkan/util/vk_object.h"
+#include "util/perf/cpu_trace.h"
+#include "vk_object.h"
+#include "vk_sync.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct wsi_image;
+struct wsi_swapchain;
+
+#define WSI_DEBUG_BUFFER (1ull << 0)
+#define WSI_DEBUG_SW (1ull << 1)
+#define WSI_DEBUG_NOSHM (1ull << 2)
+#define WSI_DEBUG_LINEAR (1ull << 3)
+#define WSI_DEBUG_DXGI (1ull << 4)
+
+extern uint64_t WSI_DEBUG;
+
+enum wsi_image_type {
+ WSI_IMAGE_TYPE_CPU,
+ WSI_IMAGE_TYPE_DRM,
+ WSI_IMAGE_TYPE_DXGI,
+};
+
+struct wsi_base_image_params {
+ enum wsi_image_type image_type;
+};
+
+struct wsi_cpu_image_params {
+ struct wsi_base_image_params base;
+
+ uint8_t *(*alloc_shm)(struct wsi_image *image, unsigned size);
+};
+
+struct wsi_drm_image_params {
+ struct wsi_base_image_params base;
+
+ bool same_gpu;
+ bool explicit_sync;
+
+ uint32_t num_modifier_lists;
+ const uint32_t *num_modifiers;
+ const uint64_t *const *modifiers;
+};
+
+struct wsi_dxgi_image_params {
+ struct wsi_base_image_params base;
+ bool storage_image;
+};
+
+typedef uint32_t (*wsi_memory_type_select_cb)(const struct wsi_device *wsi,
+ uint32_t type_bits);
+
+struct wsi_image_info {
+ VkImageCreateInfo create;
+ struct wsi_image_create_info wsi;
+ VkExternalMemoryImageCreateInfo ext_mem;
+ VkImageFormatListCreateInfo format_list;
+ VkImageDrmFormatModifierListCreateInfoEXT drm_mod_list;
+
+ enum wsi_image_type image_type;
+ bool explicit_sync;
+ bool prime_use_linear_modifier;
+
+ /* Not really part of VkImageCreateInfo but needed to figure out the
+ * number of planes we need to bind.
+ */
+ uint32_t modifier_prop_count;
+ struct VkDrmFormatModifierPropertiesEXT *modifier_props;
+
+ /* For buffer blit images, the linear stride in bytes */
+ uint32_t linear_stride;
+
+ /* For buffer blit images, the size of the buffer in bytes */
+ uint64_t linear_size;
+
+ wsi_memory_type_select_cb select_image_memory_type;
+ wsi_memory_type_select_cb select_blit_dst_memory_type;
+
+ uint8_t *(*alloc_shm)(struct wsi_image *image, unsigned size);
+
+ VkResult (*create_mem)(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image);
+
+ VkResult (*finish_create)(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image);
+};
+
+enum wsi_explicit_sync_timelines
+{
+ WSI_ES_ACQUIRE,
+ WSI_ES_RELEASE,
+
+ WSI_ES_COUNT,
+};
+
+struct wsi_image_explicit_sync_timeline {
+ VkSemaphore semaphore;
+ uint64_t timeline;
+ int fd;
+ uint32_t handle;
+};
+
+enum wsi_swapchain_blit_type {
+ WSI_SWAPCHAIN_NO_BLIT,
+ WSI_SWAPCHAIN_BUFFER_BLIT,
+ WSI_SWAPCHAIN_IMAGE_BLIT,
+};
struct wsi_image {
VkImage image;
@@ -32,16 +142,29 @@ struct wsi_image {
struct {
VkBuffer buffer;
+ VkImage image;
VkDeviceMemory memory;
- VkCommandBuffer *blit_cmd_buffers;
- } prime;
+ VkCommandBuffer *cmd_buffers;
+ } blit;
+ /* Whether or not the image has been acquired
+ * on the CPU side via acquire_next_image.
+ */
+ bool acquired;
+ uint64_t present_serial;
+
+ struct wsi_image_explicit_sync_timeline explicit_sync[WSI_ES_COUNT];
+#ifndef _WIN32
uint64_t drm_modifier;
+#endif
int num_planes;
uint32_t sizes[4];
uint32_t offsets[4];
uint32_t row_pitches[4];
- int fds[4];
+#ifndef _WIN32
+ int dma_buf_fd;
+#endif
+ void *cpu_map;
};
struct wsi_swapchain {
@@ -53,9 +176,29 @@ struct wsi_swapchain {
VkAllocationCallbacks alloc;
VkFence* fences;
VkPresentModeKHR present_mode;
+ VkSemaphore present_id_timeline;
+
+ int signal_dma_buf_from_semaphore;
+ VkSemaphore dma_buf_semaphore;
+
+ struct wsi_image_info image_info;
uint32_t image_count;
+
+ uint64_t present_serial;
- bool use_prime_blit;
+ struct {
+ enum wsi_swapchain_blit_type type;
+ VkSemaphore *semaphores;
+
+ /* If the driver wants to use a special queue to execute the buffer blit,
+ * it'll implement the wsi_device::get_blit_queue callback.
+ * The created queue will be stored here and will be used to execute the
+ * buffer blit instead of using the present queue.
+ */
+ VkQueue queue;
+ } blit;
+
+ bool capture_key_pressed;
/* Command pools, one per queue family */
VkCommandPool *cmd_pools;
@@ -69,17 +212,35 @@ struct wsi_swapchain {
uint32_t *image_index);
VkResult (*queue_present)(struct wsi_swapchain *swap_chain,
uint32_t image_index,
+ uint64_t present_id,
const VkPresentRegionKHR *damage);
+ VkResult (*wait_for_present)(struct wsi_swapchain *swap_chain,
+ uint64_t present_id,
+ uint64_t timeout);
+ VkResult (*release_images)(struct wsi_swapchain *swap_chain,
+ uint32_t count,
+ const uint32_t *indices);
+ void (*set_present_mode)(struct wsi_swapchain *swap_chain,
+ VkPresentModeKHR mode);
};
bool
wsi_device_matches_drm_fd(const struct wsi_device *wsi, int drm_fd);
+void
+wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
+ const VkAllocationCallbacks *pAllocator);
+
+void
+wsi_win32_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
+ const VkAllocationCallbacks *pAllocator);
+
VkResult
wsi_swapchain_init(const struct wsi_device *wsi,
struct wsi_swapchain *chain,
VkDevice device,
const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_base_image_params *image_params,
const VkAllocationCallbacks *pAllocator);
enum VkPresentModeKHR
@@ -88,25 +249,123 @@ wsi_swapchain_get_present_mode(struct wsi_device *wsi,
void wsi_swapchain_finish(struct wsi_swapchain *chain);
+uint32_t
+wsi_select_memory_type(const struct wsi_device *wsi,
+ VkMemoryPropertyFlags req_flags,
+ VkMemoryPropertyFlags deny_flags,
+ uint32_t type_bits);
+uint32_t
+wsi_select_device_memory_type(const struct wsi_device *wsi,
+ uint32_t type_bits);
+
+bool
+wsi_drm_image_needs_buffer_blit(const struct wsi_device *wsi,
+ const struct wsi_drm_image_params *params);
+
+enum wsi_swapchain_blit_type
+wsi_dxgi_image_needs_blit(const struct wsi_device *wsi,
+ const struct wsi_dxgi_image_params *params,
+ VkDevice device);
+
+VkResult
+wsi_drm_configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_drm_image_params *params,
+ struct wsi_image_info *info);
+
+VkResult
+wsi_dxgi_configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_dxgi_image_params *params,
+ struct wsi_image_info *info);
+
+bool
+wsi_cpu_image_needs_buffer_blit(const struct wsi_device *wsi,
+ const struct wsi_cpu_image_params *params);
+
VkResult
-wsi_create_native_image(const struct wsi_swapchain *chain,
+wsi_configure_cpu_image(const struct wsi_swapchain *chain,
const VkSwapchainCreateInfoKHR *pCreateInfo,
- uint32_t num_modifier_lists,
- const uint32_t *num_modifiers,
- const uint64_t *const *modifiers,
- uint8_t *(alloc_shm)(struct wsi_image *image, unsigned size),
- struct wsi_image *image);
+ const struct wsi_cpu_image_params *params,
+ struct wsi_image_info *info);
+
+VkResult
+wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image,
+ VkExternalMemoryHandleTypeFlags handle_types);
+
+VkResult
+wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image);
+void
+wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ uint32_t stride_align, uint32_t size_align,
+ struct wsi_image_info *info);
+
+void
+wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain *chain,
+ struct wsi_image_info *info);
+
+VkResult
+wsi_configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ VkExternalMemoryHandleTypeFlags handle_types,
+ struct wsi_image_info *info);
+void
+wsi_destroy_image_info(const struct wsi_swapchain *chain,
+ struct wsi_image_info *info);
VkResult
-wsi_create_prime_image(const struct wsi_swapchain *chain,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- bool use_modifier,
- struct wsi_image *image);
+wsi_create_image(const struct wsi_swapchain *chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image);
+void
+wsi_image_init(struct wsi_image *image);
void
wsi_destroy_image(const struct wsi_swapchain *chain,
struct wsi_image *image);
+VkResult
+wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain *chain,
+ uint64_t present_id, uint64_t timeout);
+
+#ifdef HAVE_LIBDRM
+VkResult
+wsi_prepare_signal_dma_buf_from_semaphore(struct wsi_swapchain *chain,
+ const struct wsi_image *image);
+VkResult
+wsi_signal_dma_buf_from_semaphore(const struct wsi_swapchain *chain,
+ const struct wsi_image *image);
+VkResult
+wsi_create_sync_for_dma_buf_wait(const struct wsi_swapchain *chain,
+ const struct wsi_image *image,
+ enum vk_sync_features sync_features,
+ struct vk_sync **sync_out);
+VkResult
+wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
+ const struct wsi_image *image,
+ enum vk_sync_features req_features,
+ struct vk_sync **sync_out);
+
+VkResult
+wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
+ struct wsi_image *image);
+
+void
+wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
+ struct wsi_image *image);
+
+VkResult
+wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
+ uint32_t image_count,
+ struct wsi_image **images,
+ uint64_t rel_timeout_ns,
+ uint32_t *image_index);
+#endif
struct wsi_interface {
VkResult (*get_support)(VkIcdSurfaceBase *surface,
@@ -127,6 +386,7 @@ struct wsi_interface {
uint32_t* pSurfaceFormatCount,
VkSurfaceFormat2KHR* pSurfaceFormats);
VkResult (*get_present_modes)(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes);
VkResult (*get_present_rectangles)(VkIcdSurfaceBase *surface,
@@ -167,7 +427,27 @@ void
wsi_display_finish_wsi(struct wsi_device *wsi_device,
const VkAllocationCallbacks *alloc);
+void
+wsi_display_setup_syncobj_fd(struct wsi_device *wsi_device,
+ int fd);
+
+VkResult wsi_headless_init_wsi(struct wsi_device *wsi_device,
+ const VkAllocationCallbacks *alloc,
+ VkPhysicalDevice physical_device);
+
+void wsi_headless_finish_wsi(struct wsi_device *wsi_device,
+ const VkAllocationCallbacks *alloc);
+
VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_swapchain, base, VkSwapchainKHR,
VK_OBJECT_TYPE_SWAPCHAIN_KHR)
+#if defined(HAVE_PTHREAD) && !defined(_WIN32)
+bool
+wsi_init_pthread_cond_monotonic(pthread_cond_t *cond);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* WSI_COMMON_PRIVATE_H */
diff --git a/src/vulkan/wsi/wsi_common_queue.h b/src/vulkan/wsi/wsi_common_queue.h
index 6d489cbf17e..3676876b903 100644
--- a/src/vulkan/wsi/wsi_common_queue.h
+++ b/src/vulkan/wsi/wsi_common_queue.h
@@ -39,12 +39,10 @@ wsi_queue_init(struct wsi_queue *queue, int length)
{
int ret;
- uint32_t length_pow2 = 4;
- while (length_pow2 < length)
- length_pow2 *= 2;
+ if (length < 4)
+ length = 4;
- ret = u_vector_init(&queue->vector, sizeof(uint32_t),
- sizeof(uint32_t) * length_pow2);
+ ret = u_vector_init(&queue->vector, length, sizeof(uint32_t));
if (!ret)
return ENOMEM;
diff --git a/src/vulkan/wsi/wsi_common_wayland.c b/src/vulkan/wsi/wsi_common_wayland.c
index 4d16ba60098..ebfc80a84a5 100644
--- a/src/vulkan/wsi/wsi_common_wayland.c
+++ b/src/vulkan/wsi/wsi_common_wayland.c
@@ -32,50 +32,100 @@
#include <pthread.h>
#include <poll.h>
#include <sys/mman.h>
+#include <sys/types.h>
#include "drm-uapi/drm_fourcc.h"
+#include "vk_instance.h"
+#include "vk_device.h"
+#include "vk_physical_device.h"
#include "vk_util.h"
+#include "wsi_common_entrypoints.h"
#include "wsi_common_private.h"
-#include "wsi_common_wayland.h"
#include "linux-dmabuf-unstable-v1-client-protocol.h"
+#include "presentation-time-client-protocol.h"
+#include "linux-drm-syncobj-v1-client-protocol.h"
+#include "tearing-control-v1-client-protocol.h"
#include <util/compiler.h>
#include <util/hash_table.h>
#include <util/timespec.h>
+#include <util/u_endian.h>
#include <util/u_vector.h>
+#include <util/u_dynarray.h>
#include <util/anon_file.h>
+#include <util/os_time.h>
+
+#include <loader/loader_wayland_helper.h>
+
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
struct wsi_wayland;
struct wsi_wl_format {
VkFormat vk_format;
+ uint32_t flags;
struct u_vector modifiers;
};
+struct dmabuf_feedback_format_table {
+ unsigned int size;
+ struct {
+ uint32_t format;
+ uint32_t padding; /* unused */
+ uint64_t modifier;
+ } *data;
+};
+
+struct dmabuf_feedback_tranche {
+ dev_t target_device;
+ uint32_t flags;
+ struct u_vector formats;
+};
+
+struct dmabuf_feedback {
+ dev_t main_device;
+ struct dmabuf_feedback_format_table format_table;
+ struct util_dynarray tranches;
+ struct dmabuf_feedback_tranche pending_tranche;
+};
+
struct wsi_wl_display {
/* The real wl_display */
- struct wl_display * wl_display;
+ struct wl_display *wl_display;
/* Actually a proxy wrapper around the event queue */
- struct wl_display * wl_display_wrapper;
- struct wl_event_queue * queue;
+ struct wl_display *wl_display_wrapper;
+ struct wl_event_queue *queue;
- struct wl_shm * wl_shm;
- struct zwp_linux_dmabuf_v1 * wl_dmabuf;
+ struct wl_shm *wl_shm;
+ struct zwp_linux_dmabuf_v1 *wl_dmabuf;
+ struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
+ struct wp_tearing_control_manager_v1 *tearing_control_manager;
+ struct wp_linux_drm_syncobj_manager_v1 *wl_syncobj;
+
+ struct dmabuf_feedback_format_table format_table;
+
+ /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
+ struct wp_presentation *wp_presentation_notwrapped;
struct wsi_wayland *wsi_wl;
/* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
- struct u_vector formats;
-
- /* Only used for displays created by wsi_wl_display_create */
- uint32_t refcount;
+ struct u_vector formats;
bool sw;
+
+ dev_t main_device;
+ bool same_gpu;
};
struct wsi_wayland {
- struct wsi_interface base;
+ struct wsi_interface base;
struct wsi_device *wsi;
@@ -83,6 +133,90 @@ struct wsi_wayland {
VkPhysicalDevice physical_device;
};
+struct wsi_wl_image {
+ struct wsi_image base;
+ struct wl_buffer *buffer;
+ bool busy;
+ int shm_fd;
+ void *shm_ptr;
+ unsigned shm_size;
+
+ struct wp_linux_drm_syncobj_timeline_v1 *wl_syncobj_timeline[WSI_ES_COUNT];
+};
+
+enum wsi_wl_buffer_type {
+ WSI_WL_BUFFER_NATIVE,
+ WSI_WL_BUFFER_GPU_SHM,
+ WSI_WL_BUFFER_SHM_MEMCPY,
+};
+
+struct wsi_wl_surface {
+ VkIcdSurfaceWayland base;
+
+ unsigned int chain_count;
+
+ struct wsi_wl_swapchain *chain;
+ struct wl_surface *surface;
+ struct wsi_wl_display *display;
+
+ struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
+ struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
+
+ struct wp_linux_drm_syncobj_surface_v1 *wl_syncobj_surface;
+};
+
+struct wsi_wl_swapchain {
+ struct wsi_swapchain base;
+
+ struct wsi_wl_surface *wsi_wl_surface;
+ struct wp_tearing_control_v1 *tearing_control;
+
+ struct wl_callback *frame;
+
+ VkExtent2D extent;
+ VkFormat vk_format;
+ enum wsi_wl_buffer_type buffer_type;
+ uint32_t drm_format;
+ enum wl_shm_format shm_format;
+
+ bool suboptimal;
+ bool retired;
+
+ uint32_t num_drm_modifiers;
+ const uint64_t *drm_modifiers;
+
+ VkPresentModeKHR present_mode;
+ bool fifo_ready;
+
+ struct {
+ pthread_mutex_t lock; /* protects all members */
+ uint64_t max_completed;
+ struct wl_list outstanding_list;
+ pthread_cond_t list_advanced;
+ struct wl_event_queue *queue;
+ struct wp_presentation *wp_presentation;
+ /* Fallback when wp_presentation is not supported */
+ struct wl_surface *surface;
+ bool dispatch_in_progress;
+ } present_ids;
+
+ struct wsi_wl_image images[0];
+};
+VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
+ VK_OBJECT_TYPE_SWAPCHAIN_KHR)
+
+static bool
+wsi_wl_use_explicit_sync(struct wsi_wl_display *display, struct wsi_device *device)
+{
+ return wsi_device_supports_explicit_sync(device) &&
+ display->wl_syncobj != NULL;
+}
+
+enum wsi_wl_fmt_flag {
+ WSI_WL_FMT_ALPHA = 1 << 0,
+ WSI_WL_FMT_OPAQUE = 1 << 1,
+};
+
static struct wsi_wl_format *
find_format(struct u_vector *formats, VkFormat format)
{
@@ -97,12 +231,17 @@ find_format(struct u_vector *formats, VkFormat format)
static struct wsi_wl_format *
wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
- struct u_vector *formats, VkFormat format)
+ struct u_vector *formats,
+ VkFormat format, uint32_t flags)
{
+ assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
+
/* Don't add a format that's already in the list */
struct wsi_wl_format *f = find_format(formats, format);
- if (f)
+ if (f) {
+ f->flags |= flags;
return f;
+ }
/* Don't add formats that aren't renderable. */
VkFormatProperties props;
@@ -113,7 +252,7 @@ wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
return NULL;
struct u_vector modifiers;
- if (!u_vector_init(&modifiers, sizeof(uint64_t), 32))
+ if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
return NULL;
f = u_vector_add(formats);
@@ -123,6 +262,7 @@ wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
}
f->vk_format = format;
+ f->flags = flags;
f->modifiers = modifiers;
return f;
@@ -146,73 +286,158 @@ wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
}
static void
+wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
+ struct u_vector *formats,
+ VkFormat vk_format, uint32_t flags,
+ uint64_t modifier)
+{
+ struct wsi_wl_format *format;
+
+ format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
+ if (format)
+ wsi_wl_format_add_modifier(format, modifier);
+}
+
+static void
wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
struct u_vector *formats,
uint32_t drm_format, uint64_t modifier)
{
- struct wsi_wl_format *format = NULL, *srgb_format = NULL;
-
switch (drm_format) {
#if 0
/* TODO: These are only available when VK_EXT_4444_formats is enabled, so
* we probably need to make their use conditional on this extension. */
case DRM_FORMAT_ARGB4444:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A4R4G4B4_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_XRGB4444:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A4R4G4B4_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_ABGR4444:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A4B4G4R4_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_XBGR4444:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A4B4G4R4_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
#endif
/* Vulkan _PACKN formats have the same component order as DRM formats
* on little endian systems, on big endian there exists no analog. */
-#if MESA_LITTLE_ENDIAN
+#if UTIL_ARCH_LITTLE_ENDIAN
case DRM_FORMAT_RGBA4444:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_RGBX4444:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R4G4B4A4_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_BGRA4444:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_BGRX4444:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B4G4R4A4_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_RGB565:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R5G6B5_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R5G6B5_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+ modifier);
break;
case DRM_FORMAT_BGR565:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B5G6R5_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B5G6R5_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+ modifier);
break;
case DRM_FORMAT_ARGB1555:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_XRGB1555:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_A1R5G5B5_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_RGBA5551:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_RGBX5551:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R5G5B5A1_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_BGRA5551:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_BGRX5551:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B5G5R5A1_UNORM_PACK16);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_ARGB2101010:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_XRGB2101010:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_A2R10G10B10_UNORM_PACK32);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
case DRM_FORMAT_ABGR2101010:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
case DRM_FORMAT_XBGR2101010:
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_A2B10G10R10_UNORM_PACK32);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ WSI_WL_FMT_OPAQUE, modifier);
+ break;
+
+ /* Vulkan 16-bits-per-channel formats have an inverted channel order
+ * compared to DRM formats, just like the 8-bits-per-channel ones.
+ * On little endian systems the memory representation of each channel
+ * matches the DRM formats'. */
+ case DRM_FORMAT_ABGR16161616:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R16G16B16A16_UNORM,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
+ case DRM_FORMAT_XBGR16161616:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R16G16B16A16_UNORM,
+ WSI_WL_FMT_OPAQUE, modifier);
+ break;
+ case DRM_FORMAT_ABGR16161616F:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ WSI_WL_FMT_ALPHA, modifier);
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ WSI_WL_FMT_OPAQUE, modifier);
break;
#endif
@@ -227,68 +452,79 @@ wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
* From Wayland's perspective nothing changes, the difference is just how
* Vulkan interprets the pixel data. */
case DRM_FORMAT_XBGR8888:
- srgb_format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8_SRGB);
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8_UNORM);
- FALLTHROUGH;
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R8G8B8_SRGB,
+ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+ modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R8G8B8_UNORM,
+ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+ modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R8G8B8A8_SRGB,
+ WSI_WL_FMT_OPAQUE, modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R8G8B8A8_UNORM,
+ WSI_WL_FMT_OPAQUE, modifier);
+ break;
case DRM_FORMAT_ABGR8888:
- srgb_format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8A8_SRGB);
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8A8_UNORM);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R8G8B8A8_SRGB,
+ WSI_WL_FMT_ALPHA, modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_R8G8B8A8_UNORM,
+ WSI_WL_FMT_ALPHA, modifier);
break;
case DRM_FORMAT_XRGB8888:
- srgb_format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8_SRGB);
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8_UNORM);
- FALLTHROUGH;
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B8G8R8_SRGB,
+ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+ modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B8G8R8_UNORM,
+ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+ modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B8G8R8A8_SRGB,
+ WSI_WL_FMT_OPAQUE, modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B8G8R8A8_UNORM,
+ WSI_WL_FMT_OPAQUE, modifier);
+ break;
case DRM_FORMAT_ARGB8888:
- srgb_format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8A8_SRGB);
- format = wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8A8_UNORM);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B8G8R8A8_SRGB,
+ WSI_WL_FMT_ALPHA, modifier);
+ wsi_wl_display_add_vk_format_modifier(display, formats,
+ VK_FORMAT_B8G8R8A8_UNORM,
+ WSI_WL_FMT_ALPHA, modifier);
break;
}
+}
- if (format)
- wsi_wl_format_add_modifier(format, modifier);
- if (srgb_format)
- wsi_wl_format_add_modifier(srgb_format, modifier);
+static uint32_t
+drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
+{
+ /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
+ switch (shm_format) {
+ case WL_SHM_FORMAT_ARGB8888:
+ return DRM_FORMAT_ARGB8888;
+ case WL_SHM_FORMAT_XRGB8888:
+ return DRM_FORMAT_XRGB8888;
+ default:
+ return shm_format;
+ }
}
static void
wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
struct u_vector *formats,
- uint32_t wl_shm_format)
-{
- switch (wl_shm_format) {
- case WL_SHM_FORMAT_XBGR8888:
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8_SRGB);
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8_UNORM);
- FALLTHROUGH;
- case WL_SHM_FORMAT_ABGR8888:
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8A8_SRGB);
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_R8G8B8A8_UNORM);
- break;
- case WL_SHM_FORMAT_XRGB8888:
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8_SRGB);
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8_UNORM);
- FALLTHROUGH;
- case WL_SHM_FORMAT_ARGB8888:
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8A8_SRGB);
- wsi_wl_display_add_vk_format(display, formats,
- VK_FORMAT_B8G8R8A8_UNORM);
- break;
- }
+ enum wl_shm_format shm_format)
+{
+ uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
+
+ wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
+ DRM_FORMAT_MOD_INVALID);
}
static uint32_t
@@ -296,12 +532,12 @@ wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
{
switch (vk_format) {
#if 0
- case VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT:
+ case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
- case VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT:
+ case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
#endif
-#if MESA_LITTLE_ENDIAN
+#if UTIL_ARCH_LITTLE_ENDIAN
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
@@ -320,6 +556,10 @@ wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
#endif
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_R8G8B8_SRGB:
@@ -336,24 +576,26 @@ wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
default:
assert(!"Unsupported Vulkan format");
- return 0;
+ return DRM_FORMAT_INVALID;
}
}
-static uint32_t
+static enum wl_shm_format
wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
{
- switch (vk_format) {
- case VK_FORMAT_R8G8B8A8_UNORM:
- case VK_FORMAT_R8G8B8A8_SRGB:
- return alpha ? WL_SHM_FORMAT_ABGR8888 : WL_SHM_FORMAT_XBGR8888;
- case VK_FORMAT_B8G8R8A8_UNORM:
- case VK_FORMAT_B8G8R8A8_SRGB:
- return alpha ? WL_SHM_FORMAT_ARGB8888 : WL_SHM_FORMAT_XRGB8888;
+ uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
+ if (drm_format == DRM_FORMAT_INVALID) {
+ return 0;
+ }
+ /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
+ switch (drm_format) {
+ case DRM_FORMAT_ARGB8888:
+ return WL_SHM_FORMAT_ARGB8888;
+ case DRM_FORMAT_XRGB8888:
+ return WL_SHM_FORMAT_XRGB8888;
default:
- assert(!"Unsupported Vulkan format");
- return 0;
+ return drm_format;
}
}
@@ -373,6 +615,12 @@ dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
struct wsi_wl_display *display = data;
uint64_t modifier;
+ /* Ignore this if the compositor advertised dma-buf feedback. From version 4
+ * onwards (when dma-buf feedback was introduced), the compositor should not
+ * advertise this event anymore, but let's keep this for safety. */
+ if (display->wl_dmabuf_feedback)
+ return;
+
modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
wsi_wl_display_add_drm_format_modifier(display, &display->formats,
format, modifier);
@@ -384,6 +632,158 @@ static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
};
static void
+dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
+{
+ if (format_table->data && format_table->data != MAP_FAILED)
+ munmap(format_table->data, format_table->size);
+}
+
+static void
+dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
+{
+ memset(format_table, 0, sizeof(*format_table));
+}
+
+static void
+dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
+{
+ struct wsi_wl_format *format;
+
+ u_vector_foreach(format, &tranche->formats)
+ u_vector_finish(&format->modifiers);
+
+ u_vector_finish(&tranche->formats);
+}
+
+static int
+dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
+{
+ memset(tranche, 0, sizeof(*tranche));
+
+ if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
+ return -1;
+
+ return 0;
+}
+
+static void
+dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
+{
+ dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
+
+ util_dynarray_foreach(&dmabuf_feedback->tranches,
+ struct dmabuf_feedback_tranche, tranche)
+ dmabuf_feedback_tranche_fini(tranche);
+ util_dynarray_fini(&dmabuf_feedback->tranches);
+
+ dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
+}
+
+static int
+dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
+{
+ memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
+
+ if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
+ return -1;
+
+ util_dynarray_init(&dmabuf_feedback->tranches, NULL);
+
+ dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
+
+ return 0;
+}
+
+static void
+default_dmabuf_feedback_format_table(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
+ int32_t fd, uint32_t size)
+{
+ struct wsi_wl_display *display = data;
+
+ display->format_table.size = size;
+ display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+ close(fd);
+}
+
+static void
+default_dmabuf_feedback_main_device(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ struct wl_array *device)
+{
+ struct wsi_wl_display *display = data;
+
+ assert(device->size == sizeof(dev_t));
+ memcpy(&display->main_device, device->data, device->size);
+}
+
+static void
+default_dmabuf_feedback_tranche_target_device(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ struct wl_array *device)
+{
+ /* ignore this event */
+}
+
+static void
+default_dmabuf_feedback_tranche_flags(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ uint32_t flags)
+{
+ /* ignore this event */
+}
+
+static void
+default_dmabuf_feedback_tranche_formats(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ struct wl_array *indices)
+{
+ struct wsi_wl_display *display = data;
+ uint32_t format;
+ uint64_t modifier;
+ uint16_t *index;
+
+ /* We couldn't map the format table or the compositor didn't advertise it,
+ * so we have to ignore the feedback. */
+ if (display->format_table.data == MAP_FAILED ||
+ display->format_table.data == NULL)
+ return;
+
+ wl_array_for_each(index, indices) {
+ format = display->format_table.data[*index].format;
+ modifier = display->format_table.data[*index].modifier;
+ wsi_wl_display_add_drm_format_modifier(display, &display->formats,
+ format, modifier);
+ }
+}
+
+static void
+default_dmabuf_feedback_tranche_done(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+{
+ /* ignore this event */
+}
+
+static void
+default_dmabuf_feedback_done(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+{
+ /* ignore this event */
+}
+
+static const struct zwp_linux_dmabuf_feedback_v1_listener
+dmabuf_feedback_listener = {
+ .format_table = default_dmabuf_feedback_format_table,
+ .main_device = default_dmabuf_feedback_main_device,
+ .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
+ .tranche_flags = default_dmabuf_feedback_tranche_flags,
+ .tranche_formats = default_dmabuf_feedback_tranche_formats,
+ .tranche_done = default_dmabuf_feedback_tranche_done,
+ .done = default_dmabuf_feedback_done,
+};
+
+static void
shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
{
struct wsi_wl_display *display = data;
@@ -402,18 +802,29 @@ registry_handle_global(void *data, struct wl_registry *registry,
struct wsi_wl_display *display = data;
if (display->sw) {
- if (strcmp(interface, "wl_shm") == 0) {
+ if (strcmp(interface, wl_shm_interface.name) == 0) {
display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
wl_shm_add_listener(display->wl_shm, &shm_listener, display);
}
- return;
+ } else {
+ if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
+ display->wl_dmabuf =
+ wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
+ MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
+ zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
+ &dmabuf_listener, display);
+ } else if (strcmp(interface, wp_linux_drm_syncobj_manager_v1_interface.name) == 0) {
+ display->wl_syncobj =
+ wl_registry_bind(registry, name, &wp_linux_drm_syncobj_manager_v1_interface, 1);
+ }
}
- if (strcmp(interface, "zwp_linux_dmabuf_v1") == 0 && version >= 3) {
- display->wl_dmabuf =
- wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface, 3);
- zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
- &dmabuf_listener, display);
+ if (strcmp(interface, wp_presentation_interface.name) == 0) {
+ display->wp_presentation_notwrapped =
+ wl_registry_bind(registry, name, &wp_presentation_interface, 1);
+ } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
+ display->tearing_control_manager =
+ wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
}
}
@@ -430,16 +841,20 @@ static const struct wl_registry_listener registry_listener = {
static void
wsi_wl_display_finish(struct wsi_wl_display *display)
{
- assert(display->refcount == 0);
-
struct wsi_wl_format *f;
u_vector_foreach(f, &display->formats)
u_vector_finish(&f->modifiers);
u_vector_finish(&display->formats);
if (display->wl_shm)
wl_shm_destroy(display->wl_shm);
+ if (display->wl_syncobj)
+ wp_linux_drm_syncobj_manager_v1_destroy(display->wl_syncobj);
if (display->wl_dmabuf)
zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
+ if (display->wp_presentation_notwrapped)
+ wp_presentation_destroy(display->wp_presentation_notwrapped);
+ if (display->tearing_control_manager)
+ wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
if (display->wl_display_wrapper)
wl_proxy_wrapper_destroy(display->wl_display_wrapper);
if (display->queue)
@@ -455,15 +870,15 @@ wsi_wl_display_init(struct wsi_wayland *wsi_wl,
VkResult result = VK_SUCCESS;
memset(display, 0, sizeof(*display));
- if (!u_vector_init(&display->formats, sizeof(struct wsi_wl_format),
- 8 * sizeof(struct wsi_wl_format)))
+ if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
return VK_ERROR_OUT_OF_HOST_MEMORY;
display->wsi_wl = wsi_wl;
display->wl_display = wl_display;
display->sw = sw;
- display->queue = wl_display_create_queue(wl_display);
+ display->queue = wl_display_create_queue_with_name(wl_display,
+ "mesa vk display queue");
if (!display->queue) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail;
@@ -498,6 +913,36 @@ wsi_wl_display_init(struct wsi_wayland *wsi_wl,
if (!get_format_list)
goto out;
+ /* Default assumption */
+ display->same_gpu = true;
+
+ /* Get the default dma-buf feedback */
+ if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
+ ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
+ dmabuf_feedback_format_table_init(&display->format_table);
+ display->wl_dmabuf_feedback =
+ zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
+ zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
+ &dmabuf_feedback_listener, display);
+
+ /* Round-trip again to fetch dma-buf feedback */
+ wl_display_roundtrip_queue(display->wl_display, display->queue);
+
+ if (wsi_wl->wsi->drm_info.hasRender ||
+ wsi_wl->wsi->drm_info.hasPrimary) {
+ /* Apparently some wayland compositor do not send the render
+ * device node but the primary, so test against both.
+ */
+ display->same_gpu =
+ (wsi_wl->wsi->drm_info.hasRender &&
+ major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
+ minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
+ (wsi_wl->wsi->drm_info.hasPrimary &&
+ major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
+ minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
+ }
+ }
+
/* Round-trip again to get formats and modifiers */
wl_display_roundtrip_queue(display->wl_display, display->queue);
@@ -505,7 +950,7 @@ wsi_wl_display_init(struct wsi_wayland *wsi_wl,
/* Find BGRA8_UNORM in the list and swap it to the first position if we
* can find it. Some apps get confused if SRGB is first in the list.
*/
- struct wsi_wl_format *first_fmt = u_vector_head(&display->formats);
+ struct wsi_wl_format *first_fmt = u_vector_tail(&display->formats);
struct wsi_wl_format *f, tmp_fmt;
f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
if (f) {
@@ -519,7 +964,12 @@ out:
/* We don't need this anymore */
wl_registry_destroy(registry);
- display->refcount = 0;
+ /* Destroy default dma-buf feedback object and format table */
+ if (display->wl_dmabuf_feedback) {
+ zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
+ display->wl_dmabuf_feedback = NULL;
+ dmabuf_feedback_format_table_fini(&display->format_table);
+ }
return VK_SUCCESS;
@@ -550,37 +1000,32 @@ wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
return result;
}
- display->refcount++;
*display_out = display;
return result;
}
-static struct wsi_wl_display *
-wsi_wl_display_ref(struct wsi_wl_display *display)
-{
- display->refcount++;
- return display;
-}
-
static void
-wsi_wl_display_unref(struct wsi_wl_display *display)
+wsi_wl_display_destroy(struct wsi_wl_display *display)
{
- if (display->refcount-- > 1)
- return;
-
struct wsi_wayland *wsi = display->wsi_wl;
wsi_wl_display_finish(display);
vk_free(wsi->alloc, display);
}
-VkBool32
-wsi_wl_get_presentation_support(struct wsi_device *wsi_device,
- struct wl_display *wl_display)
+VKAPI_ATTR VkBool32 VKAPI_CALL
+wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ struct wl_display *wl_display)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
struct wsi_wayland *wsi =
(struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+ if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
+ return false;
+
struct wsi_wl_display display;
VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
wsi_device->sw);
@@ -601,23 +1046,34 @@ wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
return VK_SUCCESS;
}
-static const VkPresentModeKHR present_modes[] = {
- VK_PRESENT_MODE_MAILBOX_KHR,
- VK_PRESENT_MODE_FIFO_KHR,
-};
+static uint32_t
+wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT *present_mode)
+{
+ if (present_mode && (present_mode->presentMode == VK_PRESENT_MODE_FIFO_KHR ||
+ present_mode->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)) {
+ /* If we receive a FIFO present mode, only 2 images is required for forward progress.
+ * Performance with 2 images will be questionable, but we only allow it for applications
+ * using the new API, so we don't risk breaking any existing apps this way.
+ * Other ICDs expose 2 images here already. */
+ return 2;
+ } else {
+ /* For true mailbox mode, we need at least 4 images:
+ * 1) One to scan out from
+ * 2) One to have queued for scan-out
+ * 3) One to be currently held by the Wayland compositor
+ * 4) One to render to
+ */
+ return 4;
+ }
+}
static VkResult
wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
struct wsi_device *wsi_device,
+ const VkSurfacePresentModeEXT *present_mode,
VkSurfaceCapabilitiesKHR* caps)
{
- /* For true mailbox mode, we need at least 4 images:
- * 1) One to scan out from
- * 2) One to have queued for scan-out
- * 3) One to be currently held by the Wayland compositor
- * 4) One to render to
- */
- caps->minImageCount = 4;
+ caps->minImageCount = wsi_wl_surface_get_min_image_count(present_mode);
/* There is no real maximum */
caps->maxImageCount = 0;
@@ -636,12 +1092,11 @@ wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
- caps->supportedUsageFlags =
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT |
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ caps->supportedUsageFlags = wsi_caps_get_image_usage();
+
+ VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
+ if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
+ caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
return VK_SUCCESS;
}
@@ -654,8 +1109,10 @@ wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
{
assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
+ const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
+
VkResult result =
- wsi_wl_surface_get_capabilities(surface, wsi_device,
+ wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
&caps->surfaceCapabilities);
vk_foreach_struct(ext, caps->pNext) {
@@ -666,6 +1123,62 @@ wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
break;
}
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
+ /* Unsupported. */
+ VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
+ scaling->supportedPresentScaling = 0;
+ scaling->supportedPresentGravityX = 0;
+ scaling->supportedPresentGravityY = 0;
+ scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
+ scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
+ /* Can easily toggle between FIFO and MAILBOX on Wayland. */
+ VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
+ if (compat->pPresentModes) {
+ assert(present_mode);
+ VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
+ /* Must always return queried present mode even when truncating. */
+ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+ *mode = present_mode->presentMode;
+ }
+ switch (present_mode->presentMode) {
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+ *mode = VK_PRESENT_MODE_FIFO_KHR;
+ }
+ break;
+ case VK_PRESENT_MODE_FIFO_KHR:
+ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+ *mode = VK_PRESENT_MODE_MAILBOX_KHR;
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (!present_mode) {
+ wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
+ "without a VkSurfacePresentModeEXT set. This is an "
+ "application bug.\n");
+ compat->presentModeCount = 1;
+ } else {
+ switch (present_mode->presentMode) {
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ case VK_PRESENT_MODE_FIFO_KHR:
+ compat->presentModeCount = 2;
+ break;
+ default:
+ compat->presentModeCount = 1;
+ break;
+ }
+ }
+ }
+ break;
+ }
+
default:
/* Ignored */
break;
@@ -690,13 +1203,21 @@ wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
wsi_device->sw))
return VK_ERROR_SURFACE_LOST_KHR;
- VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
+ pSurfaceFormats, pSurfaceFormatCount);
struct wsi_wl_format *disp_fmt;
u_vector_foreach(disp_fmt, &display.formats) {
- vk_outarray_append(&out, out_fmt) {
+ /* Skip formats for which we can't support both alpha & opaque
+ * formats.
+ */
+ if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
+ !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
+ continue;
+
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
out_fmt->format = disp_fmt->vk_format;
- out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
}
}
@@ -721,13 +1242,21 @@ wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
wsi_device->sw))
return VK_ERROR_SURFACE_LOST_KHR;
- VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
+ pSurfaceFormats, pSurfaceFormatCount);
struct wsi_wl_format *disp_fmt;
u_vector_foreach(disp_fmt, &display.formats) {
- vk_outarray_append(&out, out_fmt) {
+ /* Skip formats for which we can't support both alpha & opaque
+ * formats.
+ */
+ if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
+ !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
+ continue;
+
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
out_fmt->surfaceFormat.format = disp_fmt->vk_format;
- out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
}
}
@@ -737,19 +1266,42 @@ wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
}
static VkResult
-wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
+wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes)
{
+ VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
+ struct wsi_wayland *wsi =
+ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+
+ struct wsi_wl_display display;
+ if (wsi_wl_display_init(wsi, &display, surface->display, true,
+ wsi_device->sw))
+ return VK_ERROR_SURFACE_LOST_KHR;
+
+ VkPresentModeKHR present_modes[3];
+ uint32_t present_modes_count = 0;
+
+ /* The following two modes are always supported */
+ present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
+ present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
+
+ if (display.tearing_control_manager)
+ present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
+
+ assert(present_modes_count <= ARRAY_SIZE(present_modes));
+ wsi_wl_display_finish(&display);
+
if (pPresentModes == NULL) {
- *pPresentModeCount = ARRAY_SIZE(present_modes);
+ *pPresentModeCount = present_modes_count;
return VK_SUCCESS;
}
- *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
+ *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
- if (*pPresentModeCount < ARRAY_SIZE(present_modes))
+ if (*pPresentModeCount < present_modes_count)
return VK_INCOMPLETE;
else
return VK_SUCCESS;
@@ -761,9 +1313,9 @@ wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
uint32_t* pRectCount,
VkRect2D* pRects)
{
- VK_OUTARRAY_MAKE(out, pRects, pRectCount);
+ VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
- vk_outarray_append(&out, rect) {
+ vk_outarray_append_typed(VkRect2D, &out, rect) {
/* We don't know a size so just return the usual "I don't know." */
*rect = (VkRect2D) {
.offset = { 0, 0 },
@@ -774,17 +1326,317 @@ wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
return vk_outarray_status(&out);
}
-VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
- const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface)
+void
+wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ struct wsi_wl_surface *wsi_wl_surface =
+ wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
+
+ if (wsi_wl_surface->wl_syncobj_surface)
+ wp_linux_drm_syncobj_surface_v1_destroy(wsi_wl_surface->wl_syncobj_surface);
+
+ if (wsi_wl_surface->wl_dmabuf_feedback) {
+ zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
+ dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
+ dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
+ }
+
+ if (wsi_wl_surface->surface)
+ wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
+
+ if (wsi_wl_surface->display)
+ wsi_wl_display_destroy(wsi_wl_surface->display);
+
+ vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
+}
+
+static struct wsi_wl_format *
+pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
+ VkFormat vk_format)
+{
+ struct wsi_wl_format *f = NULL;
+
+ /* If the main_device was not advertised, we don't have valid feedback */
+ if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
+ return NULL;
+
+ util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
+ struct dmabuf_feedback_tranche, tranche) {
+ f = find_format(&tranche->formats, vk_format);
+ if (f)
+ break;
+ }
+
+ return f;
+}
+
+static void
+surface_dmabuf_feedback_format_table(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
+ int32_t fd, uint32_t size)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+
+ feedback->format_table.size = size;
+ feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+ close(fd);
+}
+
+static void
+surface_dmabuf_feedback_main_device(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ struct wl_array *device)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+
+ memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
+}
+
+static void
+surface_dmabuf_feedback_tranche_target_device(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ struct wl_array *device)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+
+ memcpy(&feedback->pending_tranche.target_device, device->data,
+ sizeof(feedback->pending_tranche.target_device));
+}
+
+static void
+surface_dmabuf_feedback_tranche_flags(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ uint32_t flags)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+
+ feedback->pending_tranche.flags = flags;
+}
+
+static void
+surface_dmabuf_feedback_tranche_formats(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+ struct wl_array *indices)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+ uint32_t format;
+ uint64_t modifier;
+ uint16_t *index;
+
+ /* Compositor may advertise or not a format table. If it does, we use it.
+ * Otherwise, we steal the most recent advertised format table. If we don't have
+ * a most recent advertised format table, compositor did something wrong. */
+ if (feedback->format_table.data == NULL) {
+ feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
+ dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
+ }
+ if (feedback->format_table.data == MAP_FAILED ||
+ feedback->format_table.data == NULL)
+ return;
+
+ wl_array_for_each(index, indices) {
+ format = feedback->format_table.data[*index].format;
+ modifier = feedback->format_table.data[*index].modifier;
+
+ wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
+ &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
+ format, modifier);
+ }
+}
+
+static void
+surface_dmabuf_feedback_tranche_done(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+
+ /* Add tranche to array of tranches. */
+ util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
+ feedback->pending_tranche);
+
+ dmabuf_feedback_tranche_init(&feedback->pending_tranche);
+}
+
+static bool
+sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
+ uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
+{
+ uint32_t i, j;
+ bool mod_found;
+
+ if (num_drm_modifiers_A != num_drm_modifiers_B)
+ return false;
+
+ for (i = 0; i < num_drm_modifiers_A; i++) {
+ mod_found = false;
+ for (j = 0; j < num_drm_modifiers_B; j++) {
+ if (modifiers_A[i] == modifiers_B[j]) {
+ mod_found = true;
+ break;
+ }
+ }
+ if (!mod_found)
+ return false;
+ }
+
+ return true;
+}
+
+static void
+surface_dmabuf_feedback_done(void *data,
+ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+{
+ struct wsi_wl_surface *wsi_wl_surface = data;
+ struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
+ struct wsi_wl_format *f;
+
+ dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
+ wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
+ dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
+
+ /* It's not just because we received dma-buf feedback that re-allocation is a
+ * good idea. In order to know if we should re-allocate or not, we must
+ * compare the most recent parameters that we used to allocate with the ones
+ * from the feedback we just received.
+ *
+ * The allocation parameters are: the format, its set of modifiers and the
+ * tranche flags. On WSI we are not using the tranche flags for anything, so
+ * we disconsider this. As we can't switch to another format (it is selected
+ * by the client), we just need to compare the set of modifiers.
+ *
+ * So we just look for the vk_format in the tranches (respecting their
+ * preferences), and compare its set of modifiers with the set of modifiers
+ * we've used to allocate previously. If they differ, we are using suboptimal
+ * parameters and should re-allocate.
+ */
+ f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
+ if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
+ u_vector_tail(&f->modifiers),
+ chain->num_drm_modifiers,
+ chain->drm_modifiers))
+ wsi_wl_surface->chain->suboptimal = true;
+}
+
+static const struct zwp_linux_dmabuf_feedback_v1_listener
+surface_dmabuf_feedback_listener = {
+ .format_table = surface_dmabuf_feedback_format_table,
+ .main_device = surface_dmabuf_feedback_main_device,
+ .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
+ .tranche_flags = surface_dmabuf_feedback_tranche_flags,
+ .tranche_formats = surface_dmabuf_feedback_tranche_formats,
+ .tranche_done = surface_dmabuf_feedback_tranche_done,
+ .done = surface_dmabuf_feedback_done,
+};
+
+static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
+{
+ wsi_wl_surface->wl_dmabuf_feedback =
+ zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
+ wsi_wl_surface->surface);
+
+ zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
+ &surface_dmabuf_feedback_listener,
+ wsi_wl_surface);
+
+ if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
+ goto fail;
+ if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
+ goto fail_pending;
+
+ return VK_SUCCESS;
+
+fail_pending:
+ dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
+fail:
+ zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
+ wsi_wl_surface->wl_dmabuf_feedback = NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+}
+
+static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
+ struct wsi_device *wsi_device)
+{
+ struct wsi_wayland *wsi =
+ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+ VkResult result;
+
+ /* wsi_wl_surface has already been initialized. */
+ if (wsi_wl_surface->display)
+ return VK_SUCCESS;
+
+ result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
+ wsi_device->sw, &wsi_wl_surface->display);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
+ if (!wsi_wl_surface->surface) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail;
+ }
+ wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
+ wsi_wl_surface->display->queue);
+
+ /* Bind wsi_wl_surface to dma-buf feedback. */
+ if (wsi_wl_surface->display->wl_dmabuf &&
+ zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
+ ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
+ result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
+ wsi_wl_surface->display->queue);
+ }
+
+ if (wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device)) {
+ wsi_wl_surface->wl_syncobj_surface =
+ wp_linux_drm_syncobj_manager_v1_get_surface(wsi_wl_surface->display->wl_syncobj,
+ wsi_wl_surface->surface);
+
+ if (!wsi_wl_surface->wl_syncobj_surface)
+ goto fail;
+ }
+
+ return VK_SUCCESS;
+
+fail:
+ if (wsi_wl_surface->surface)
+ wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
+
+ if (wsi_wl_surface->display)
+ wsi_wl_display_destroy(wsi_wl_surface->display);
+ return result;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
+ const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface)
{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ struct wsi_wl_surface *wsi_wl_surface;
VkIcdSurfaceWayland *surface;
- surface = vk_alloc(pAllocator, sizeof *surface, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (surface == NULL)
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
+
+ wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (wsi_wl_surface == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
+ surface = &wsi_wl_surface->base;
+
surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
surface->display = pCreateInfo->display;
surface->surface = pCreateInfo->surface;
@@ -794,123 +1646,313 @@ VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
return VK_SUCCESS;
}
-struct wsi_wl_image {
- struct wsi_image base;
- struct wl_buffer * buffer;
- bool busy;
- void * data_ptr;
- uint32_t data_size;
+struct wsi_wl_present_id {
+ struct wp_presentation_feedback *feedback;
+ /* Fallback when wp_presentation is not supported.
+ * Using frame callback is not the intended way to achieve
+ * this, but it is the best effort alternative when the proper interface is
+ * not available. This approach also matches Xwayland,
+ * which uses frame callback to signal DRI3 COMPLETE. */
+ struct wl_callback *frame;
+ uint64_t present_id;
+ const VkAllocationCallbacks *alloc;
+ struct wsi_wl_swapchain *chain;
+ struct wl_list link;
};
-struct wsi_wl_swapchain {
- struct wsi_swapchain base;
+static struct wsi_image *
+wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
+ uint32_t image_index)
+{
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ return &chain->images[image_index].base;
+}
- struct wsi_wl_display *display;
+static VkResult
+wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
+ uint32_t count, const uint32_t *indices)
+{
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ for (uint32_t i = 0; i < count; i++) {
+ uint32_t index = indices[i];
+ chain->images[index].busy = false;
+ }
+ return VK_SUCCESS;
+}
- struct wl_surface * surface;
+static void
+wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
+ VkPresentModeKHR mode)
+{
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ chain->base.present_mode = mode;
+}
- struct wl_callback * frame;
+static VkResult
+wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
+ uint64_t present_id,
+ uint64_t timeout)
+{
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
- VkExtent2D extent;
- VkFormat vk_format;
- uint32_t drm_format;
- uint32_t shm_format;
+ /* We might not own this surface if we're retired, but it is only used here to
+ * read events from the present ID queue. This queue is private to a given VkSwapchainKHR,
+ * so calling present wait on a retired swapchain cannot interfere with a non-retired swapchain. */
+ struct wl_display *wl_display = chain->wsi_wl_surface->display->wl_display;
- uint32_t num_drm_modifiers;
- const uint64_t * drm_modifiers;
+ struct timespec end_time;
+ VkResult ret;
+ int err;
- VkPresentModeKHR present_mode;
- bool fifo_ready;
+ uint64_t atimeout;
+ if (timeout == 0 || timeout == UINT64_MAX)
+ atimeout = timeout;
+ else
+ atimeout = os_time_get_absolute_timeout(timeout);
- struct wsi_wl_image images[0];
-};
-VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
- VK_OBJECT_TYPE_SWAPCHAIN_KHR)
+ /* Need to observe that the swapchain semaphore has been unsignalled,
+ * as this is guaranteed when a present is complete. */
+ VkResult result = wsi_swapchain_wait_for_present_semaphore(
+ &chain->base, present_id, timeout);
+ if (result != VK_SUCCESS)
+ return result;
-static struct wsi_image *
-wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
- uint32_t image_index)
+ /* If using frame callback, guard against lack of forward progress
+ * of the frame callback in some situations,
+ * e.g. the surface might not be visible.
+ * If rendering has completed on GPU,
+ * and we still haven't received a callback after 100ms, unblock the application.
+ * 100ms is chosen arbitrarily.
+ * The queue depth in WL WSI is just one frame due to frame callback in FIFO mode,
+ * so from the time a frame has completed render to when it should be considered presented
+ * will not exceed 100ms except in contrived edge cases. */
+ uint64_t assumed_success_at = UINT64_MAX;
+ if (!chain->present_ids.wp_presentation)
+ assumed_success_at = os_time_get_absolute_timeout(100 * 1000 * 1000);
+
+ /* If app timeout is beyond the deadline we set for reply,
+ * always treat the timeout as successful. */
+ VkResult timeout_result = assumed_success_at < atimeout ? VK_SUCCESS : VK_TIMEOUT;
+ timespec_from_nsec(&end_time, MIN2(atimeout, assumed_success_at));
+
+ /* PresentWait can be called concurrently.
+ * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
+ * The lock is only held while there is forward progress processing events from Wayland,
+ * so there should be no problem locking without timeout.
+ * We would like to be able to support timeout = 0 to query the current max_completed count.
+ * A timedlock with no timeout can be problematic in that scenario. */
+ err = pthread_mutex_lock(&chain->present_ids.lock);
+ if (err != 0)
+ return VK_ERROR_OUT_OF_DATE_KHR;
+
+ if (chain->present_ids.max_completed >= present_id) {
+ pthread_mutex_unlock(&chain->present_ids.lock);
+ return VK_SUCCESS;
+ }
+
+ /* Someone else is dispatching events; wait for them to update the chain
+ * status and wake us up. */
+ while (chain->present_ids.dispatch_in_progress) {
+ err = pthread_cond_timedwait(&chain->present_ids.list_advanced,
+ &chain->present_ids.lock, &end_time);
+
+ if (err == ETIMEDOUT) {
+ pthread_mutex_unlock(&chain->present_ids.lock);
+ return timeout_result;
+ } else if (err != 0) {
+ pthread_mutex_unlock(&chain->present_ids.lock);
+ return VK_ERROR_OUT_OF_DATE_KHR;
+ }
+
+ if (chain->present_ids.max_completed >= present_id) {
+ pthread_mutex_unlock(&chain->present_ids.lock);
+ return VK_SUCCESS;
+ }
+
+ /* Whoever was previously dispatching the events isn't anymore, so we
+ * will take over and fall through below. */
+ if (!chain->present_ids.dispatch_in_progress)
+ break;
+ }
+
+ assert(!chain->present_ids.dispatch_in_progress);
+ chain->present_ids.dispatch_in_progress = true;
+
+ /* Whether or not we were dispatching the events before, we are now. */
+ while (1) {
+ if (chain->present_ids.max_completed >= present_id) {
+ ret = VK_SUCCESS;
+ break;
+ }
+ /* We drop the lock now - we're still protected by dispatch_in_progress,
+ * and holding the lock while dispatch_queue_timeout waits in poll()
+ * might delay other threads unnecessarily.
+ *
+ * We'll pick up the lock again in the dispatched functions.
+ */
+ pthread_mutex_unlock(&chain->present_ids.lock);
+
+ struct timespec current_time, remaining_timeout;
+ clock_gettime(CLOCK_MONOTONIC, &current_time);
+ timespec_sub_saturate(&remaining_timeout, &end_time, &current_time);
+ ret = wl_display_dispatch_queue_timeout(wl_display,
+ chain->present_ids.queue,
+ &remaining_timeout);
+ pthread_mutex_lock(&chain->present_ids.lock);
+ if (ret == -1) {
+ ret = VK_ERROR_OUT_OF_DATE_KHR;
+ break;
+ }
+ if (ret == 0) {
+ ret = timeout_result;
+ break;
+ }
+
+ /* Wake up other waiters who may have been unblocked by the events
+ * we just read. */
+ pthread_cond_broadcast(&chain->present_ids.list_advanced);
+ }
+
+ assert(chain->present_ids.dispatch_in_progress);
+ chain->present_ids.dispatch_in_progress = false;
+ pthread_cond_broadcast(&chain->present_ids.list_advanced);
+ pthread_mutex_unlock(&chain->present_ids.lock);
+ return ret;
+}
+
+static VkResult
+wsi_wl_swapchain_acquire_next_image_explicit(struct wsi_swapchain *wsi_chain,
+ const VkAcquireNextImageInfoKHR *info,
+ uint32_t *image_index)
{
struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
- return &chain->images[image_index].base;
+
+ /* See comments in queue_present() */
+ if (chain->retired)
+ return VK_ERROR_OUT_OF_DATE_KHR;
+
+ STACK_ARRAY(struct wsi_image*, images, wsi_chain->image_count);
+ for (uint32_t i = 0; i < chain->base.image_count; i++)
+ images[i] = &chain->images[i].base;
+
+ VkResult result = wsi_drm_wait_for_explicit_sync_release(wsi_chain,
+ wsi_chain->image_count,
+ images,
+ info->timeout,
+ image_index);
+ STACK_ARRAY_FINISH(images);
+
+ return result;
}
static VkResult
-wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
- const VkAcquireNextImageInfoKHR *info,
- uint32_t *image_index)
+wsi_wl_swapchain_acquire_next_image_implicit(struct wsi_swapchain *wsi_chain,
+ const VkAcquireNextImageInfoKHR *info,
+ uint32_t *image_index)
{
struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
struct timespec start_time, end_time;
struct timespec rel_timeout;
- int wl_fd = wl_display_get_fd(chain->display->wl_display);
+ /* See comments in queue_present() */
+ if (chain->retired)
+ return VK_ERROR_OUT_OF_DATE_KHR;
+
+ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
timespec_from_nsec(&rel_timeout, info->timeout);
clock_gettime(CLOCK_MONOTONIC, &start_time);
timespec_add(&end_time, &rel_timeout, &start_time);
while (1) {
- /* Try to dispatch potential events. */
- int ret = wl_display_dispatch_queue_pending(chain->display->wl_display,
- chain->display->queue);
- if (ret < 0)
- return VK_ERROR_OUT_OF_DATE_KHR;
-
/* Try to find a free image. */
for (uint32_t i = 0; i < chain->base.image_count; i++) {
if (!chain->images[i].busy) {
/* We found a non-busy image */
*image_index = i;
chain->images[i].busy = true;
- return VK_SUCCESS;
+ return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
}
}
- /* Check for timeout. */
- struct timespec current_time;
+ struct timespec current_time, remaining_timeout;
clock_gettime(CLOCK_MONOTONIC, &current_time);
- if (timespec_after(&current_time, &end_time))
- return VK_NOT_READY;
-
- /* Try to read events from the server. */
- ret = wl_display_prepare_read_queue(chain->display->wl_display,
- chain->display->queue);
- if (ret < 0) {
- /* Another thread might have read events for our queue already. Go
- * back to dispatch them.
- */
- if (errno == EAGAIN)
- continue;
- return VK_ERROR_OUT_OF_DATE_KHR;
- }
-
- struct pollfd pollfd = {
- .fd = wl_fd,
- .events = POLLIN
- };
- timespec_sub(&rel_timeout, &end_time, &current_time);
- ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
- if (ret <= 0) {
- int lerrno = errno;
- wl_display_cancel_read(chain->display->wl_display);
- if (ret < 0) {
- /* If ppoll() was interrupted, try again. */
- if (lerrno == EINTR || lerrno == EAGAIN)
- continue;
- return VK_ERROR_OUT_OF_DATE_KHR;
- }
- assert(ret == 0);
- continue;
- }
+ timespec_sub_saturate(&remaining_timeout, &end_time, &current_time);
- ret = wl_display_read_events(chain->display->wl_display);
- if (ret < 0)
+ /* Try to dispatch potential events. */
+ int ret = wl_display_dispatch_queue_timeout(wsi_wl_surface->display->wl_display,
+ wsi_wl_surface->display->queue,
+ &remaining_timeout);
+ if (ret == -1)
return VK_ERROR_OUT_OF_DATE_KHR;
+
+ /* Check for timeout. */
+ if (ret == 0)
+ return (info->timeout ? VK_TIMEOUT : VK_NOT_READY);
}
}
static void
+presentation_handle_sync_output(void *data,
+ struct wp_presentation_feedback *feedback,
+ struct wl_output *output)
+{
+}
+
+static void
+wsi_wl_presentation_update_present_id(struct wsi_wl_present_id *id)
+{
+ pthread_mutex_lock(&id->chain->present_ids.lock);
+ if (id->present_id > id->chain->present_ids.max_completed)
+ id->chain->present_ids.max_completed = id->present_id;
+
+ wl_list_remove(&id->link);
+ pthread_mutex_unlock(&id->chain->present_ids.lock);
+ vk_free(id->alloc, id);
+}
+
+static void
+presentation_handle_presented(void *data,
+ struct wp_presentation_feedback *feedback,
+ uint32_t tv_sec_hi, uint32_t tv_sec_lo,
+ uint32_t tv_nsec, uint32_t refresh,
+ uint32_t seq_hi, uint32_t seq_lo,
+ uint32_t flags)
+{
+ struct wsi_wl_present_id *id = data;
+ wsi_wl_presentation_update_present_id(id);
+ wp_presentation_feedback_destroy(feedback);
+}
+
+static void
+presentation_handle_discarded(void *data,
+ struct wp_presentation_feedback *feedback)
+{
+ struct wsi_wl_present_id *id = data;
+ wsi_wl_presentation_update_present_id(id);
+ wp_presentation_feedback_destroy(feedback);
+}
+
+static const struct wp_presentation_feedback_listener
+ pres_feedback_listener = {
+ presentation_handle_sync_output,
+ presentation_handle_presented,
+ presentation_handle_discarded,
+};
+
+static void
+presentation_frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
+{
+ struct wsi_wl_present_id *id = data;
+ wsi_wl_presentation_update_present_id(id);
+ wl_callback_destroy(callback);
+}
+
+static const struct wl_callback_listener pres_frame_listener = {
+ presentation_frame_handle_done,
+};
+
+static void
frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
{
struct wsi_wl_swapchain *chain = data;
@@ -928,61 +1970,117 @@ static const struct wl_callback_listener frame_listener = {
static VkResult
wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
uint32_t image_index,
+ uint64_t present_id,
const VkPresentRegionKHR *damage)
{
struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ bool queue_dispatched = false;
- if (chain->display->sw) {
+ /* While the specification suggests we can keep presenting already acquired
+ * images on a retired swapchain, there is no requirement to support that.
+ * From spec 1.3.278:
+ *
+ * After oldSwapchain is retired, the application can pass to vkQueuePresentKHR
+ * any images it had already acquired from oldSwapchain.
+ * E.g., an application may present an image from the old swapchain
+ * before an image from the new swapchain is ready to be presented.
+ * As usual, vkQueuePresentKHR may fail if oldSwapchain has entered a state
+ * that causes VK_ERROR_OUT_OF_DATE_KHR to be returned. */
+ if (chain->retired)
+ return VK_ERROR_OUT_OF_DATE_KHR;
+
+ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
+
+ if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
struct wsi_wl_image *image = &chain->images[image_index];
- void *dptr = image->data_ptr;
- void *sptr;
- chain->base.wsi->MapMemory(chain->base.device,
- image->base.memory,
- 0, 0, 0, &sptr);
-
- for (unsigned r = 0; r < chain->extent.height; r++) {
- memcpy(dptr, sptr, image->base.row_pitches[0]);
- dptr += image->base.row_pitches[0];
- sptr += image->base.row_pitches[0];
- }
- chain->base.wsi->UnmapMemory(chain->base.device,
- image->base.memory);
+ memcpy(image->shm_ptr, image->base.cpu_map,
+ image->base.row_pitches[0] * chain->extent.height);
+ }
+ /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
+ * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
+ while (!chain->fifo_ready) {
+ int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
+ wsi_wl_surface->display->queue);
+ if (ret < 0)
+ return VK_ERROR_OUT_OF_DATE_KHR;
+
+ queue_dispatched = true;
}
- if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
- while (!chain->fifo_ready) {
- int ret = wl_display_dispatch_queue(chain->display->wl_display,
- chain->display->queue);
- if (ret < 0)
- return VK_ERROR_OUT_OF_DATE_KHR;
- }
+
+ if (chain->base.image_info.explicit_sync) {
+ struct wsi_wl_image *image = &chain->images[image_index];
+ /* Incremented by signal in base queue_present. */
+ uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
+ uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
+ wp_linux_drm_syncobj_surface_v1_set_acquire_point(wsi_wl_surface->wl_syncobj_surface,
+ image->wl_syncobj_timeline[WSI_ES_ACQUIRE],
+ (uint32_t)(acquire_point >> 32),
+ (uint32_t)(acquire_point & 0xffffffff));
+ wp_linux_drm_syncobj_surface_v1_set_release_point(wsi_wl_surface->wl_syncobj_surface,
+ image->wl_syncobj_timeline[WSI_ES_RELEASE],
+ (uint32_t)(release_point >> 32),
+ (uint32_t)(release_point & 0xffffffff));
}
assert(image_index < chain->base.image_count);
- wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
+ wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
- if (wl_surface_get_version(chain->surface) >= 4 && damage &&
+ if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
damage->pRectangles && damage->rectangleCount > 0) {
for (unsigned i = 0; i < damage->rectangleCount; i++) {
const VkRectLayerKHR *rect = &damage->pRectangles[i];
assert(rect->layer == 0);
- wl_surface_damage_buffer(chain->surface,
+ wl_surface_damage_buffer(wsi_wl_surface->surface,
rect->offset.x, rect->offset.y,
rect->extent.width, rect->extent.height);
}
} else {
- wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
+ wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
}
if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
- chain->frame = wl_surface_frame(chain->surface);
+ chain->frame = wl_surface_frame(wsi_wl_surface->surface);
wl_callback_add_listener(chain->frame, &frame_listener, chain);
chain->fifo_ready = false;
+ } else {
+ /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
+ chain->fifo_ready = true;
+ }
+
+ if (present_id > 0) {
+ struct wsi_wl_present_id *id =
+ vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ id->chain = chain;
+ id->present_id = present_id;
+ id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
+
+ pthread_mutex_lock(&chain->present_ids.lock);
+
+ if (chain->present_ids.wp_presentation) {
+ id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
+ chain->wsi_wl_surface->surface);
+ wp_presentation_feedback_add_listener(id->feedback,
+ &pres_feedback_listener,
+ id);
+ } else {
+ id->frame = wl_surface_frame(chain->present_ids.surface);
+ wl_callback_add_listener(id->frame, &pres_frame_listener, id);
+ }
+
+ wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
+ pthread_mutex_unlock(&chain->present_ids.lock);
}
chain->images[image_index].busy = true;
- wl_surface_commit(chain->surface);
- wl_display_flush(chain->display->wl_display);
+ wl_surface_commit(wsi_wl_surface->surface);
+ wl_display_flush(wsi_wl_surface->display->wl_display);
+
+ if (!queue_dispatched && wsi_chain->image_info.explicit_sync) {
+ wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
+ wsi_wl_surface->display->queue);
+ }
return VK_SUCCESS;
}
@@ -1001,50 +2099,66 @@ static const struct wl_buffer_listener buffer_listener = {
buffer_handle_release,
};
+static uint8_t *
+wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
+{
+ struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
+
+ /* Create a shareable buffer */
+ int fd = os_create_anonymous_file(size, NULL);
+ if (fd < 0)
+ return NULL;
+
+ void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (ptr == MAP_FAILED) {
+ close(fd);
+ return NULL;
+ }
+
+ image->shm_fd = fd;
+ image->shm_ptr = ptr;
+ image->shm_size = size;
+
+ return ptr;
+}
+
static VkResult
wsi_wl_image_init(struct wsi_wl_swapchain *chain,
struct wsi_wl_image *image,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks* pAllocator)
{
- struct wsi_wl_display *display = chain->display;
+ struct wsi_wl_display *display = chain->wsi_wl_surface->display;
VkResult result;
- memset(image, 0, sizeof(*image));
-
- result = wsi_create_native_image(&chain->base, pCreateInfo,
- chain->num_drm_modifiers > 0 ? 1 : 0,
- &chain->num_drm_modifiers,
- &chain->drm_modifiers, NULL, &image->base);
-
+ result = wsi_create_image(&chain->base, &chain->base.image_info,
+ &image->base);
if (result != VK_SUCCESS)
return result;
- if (display->sw) {
- int fd, stride;
-
- stride = image->base.row_pitches[0];
- image->data_size = stride * chain->extent.height;
-
- /* Create a shareable buffer */
- fd = os_create_anonymous_file(image->data_size, NULL);
- if (fd < 0)
- goto fail_image;
-
- image->data_ptr = mmap(NULL, image->data_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if (image->data_ptr == MAP_FAILED) {
- close(fd);
- goto fail_image;
+ switch (chain->buffer_type) {
+ case WSI_WL_BUFFER_GPU_SHM:
+ case WSI_WL_BUFFER_SHM_MEMCPY: {
+ if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
+ wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
+ chain->extent.height);
}
+ assert(image->shm_ptr != NULL);
+
/* Share it in a wl_buffer */
- struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm, fd, image->data_size);
+ struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
+ image->shm_fd,
+ image->shm_size);
wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
- chain->extent.height, stride,
+ chain->extent.height,
+ image->base.row_pitches[0],
chain->shm_format);
wl_shm_pool_destroy(pool);
- close(fd);
- } else {
+ break;
+ }
+
+ case WSI_WL_BUFFER_NATIVE: {
assert(display->wl_dmabuf);
struct zwp_linux_buffer_params_v1 *params =
@@ -1054,13 +2168,12 @@ wsi_wl_image_init(struct wsi_wl_swapchain *chain,
for (int i = 0; i < image->base.num_planes; i++) {
zwp_linux_buffer_params_v1_add(params,
- image->base.fds[i],
+ image->base.dma_buf_fd,
i,
image->base.offsets[i],
image->base.row_pitches[i],
image->base.drm_modifier >> 32,
image->base.drm_modifier & 0xffffffff);
- close(image->base.fds[i]);
}
image->buffer =
@@ -1070,45 +2183,123 @@ wsi_wl_image_init(struct wsi_wl_swapchain *chain,
chain->drm_format,
0);
zwp_linux_buffer_params_v1_destroy(params);
+
+ if (chain->base.image_info.explicit_sync) {
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ image->wl_syncobj_timeline[i] =
+ wp_linux_drm_syncobj_manager_v1_import_timeline(display->wl_syncobj,
+ image->base.explicit_sync[i].fd);
+ if (!image->wl_syncobj_timeline[i])
+ goto fail_image;
+ }
+ }
+
+ break;
+ }
+
+ default:
+ unreachable("Invalid buffer type");
}
if (!image->buffer)
goto fail_image;
- wl_buffer_add_listener(image->buffer, &buffer_listener, image);
+ /* No need to listen for release if we are explicit sync. */
+ if (!chain->base.image_info.explicit_sync)
+ wl_buffer_add_listener(image->buffer, &buffer_listener, image);
return VK_SUCCESS;
fail_image:
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ if (image->wl_syncobj_timeline[i])
+ wp_linux_drm_syncobj_timeline_v1_destroy(image->wl_syncobj_timeline[i]);
+ }
wsi_destroy_image(&chain->base, &image->base);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
-static VkResult
-wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
- const VkAllocationCallbacks *pAllocator)
+static void
+wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
{
- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
-
for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ for (uint32_t j = 0; j < WSI_ES_COUNT; j++) {
+ if (chain->images[i].wl_syncobj_timeline[j])
+ wp_linux_drm_syncobj_timeline_v1_destroy(chain->images[i].wl_syncobj_timeline[j]);
+ }
if (chain->images[i].buffer) {
wl_buffer_destroy(chain->images[i].buffer);
wsi_destroy_image(&chain->base, &chain->images[i].base);
- if (chain->images[i].data_ptr)
- munmap(chain->images[i].data_ptr, chain->images[i].data_size);
+ if (chain->images[i].shm_size) {
+ close(chain->images[i].shm_fd);
+ munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
+ }
}
}
+}
+
+static void
+wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+ const VkAllocationCallbacks *pAllocator)
+{
+ /* Force wayland-client to release fd sent during the swapchain
+ * creation (see MAX_FDS_OUT) to avoid filling up VRAM with
+ * released buffers.
+ */
+ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
+ if (!chain->retired)
+ wl_display_flush(wsi_wl_surface->display->wl_display);
if (chain->frame)
wl_callback_destroy(chain->frame);
- if (chain->surface)
- wl_proxy_wrapper_destroy(chain->surface);
+ if (chain->tearing_control)
+ wp_tearing_control_v1_destroy(chain->tearing_control);
+
+ /* Only unregister if we are the non-retired swapchain, or
+ * we are a retired swapchain and memory allocation failed,
+ * in which case there are only retired swapchains. */
+ if (wsi_wl_surface->chain == chain)
+ wsi_wl_surface->chain = NULL;
+
+ assert(!chain->present_ids.dispatch_in_progress);
+
+ /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
+ * Waiting for the swapchain fence is enough.
+ * Just clean up anything user did not wait for. */
+ struct wsi_wl_present_id *id, *tmp;
+ wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
+ if (id->feedback)
+ wp_presentation_feedback_destroy(id->feedback);
+ if (id->frame)
+ wl_callback_destroy(id->frame);
+ wl_list_remove(&id->link);
+ vk_free(id->alloc, id);
+ }
+
+ if (chain->present_ids.wp_presentation)
+ wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
+ if (chain->present_ids.surface)
+ wl_proxy_wrapper_destroy(chain->present_ids.surface);
+ pthread_cond_destroy(&chain->present_ids.list_advanced);
+ pthread_mutex_destroy(&chain->present_ids.lock);
- if (chain->display)
- wsi_wl_display_unref(chain->display);
+ if (chain->present_ids.queue)
+ wl_event_queue_destroy(chain->present_ids.queue);
+
+ vk_free(pAllocator, (void *)chain->drm_modifiers);
wsi_swapchain_finish(&chain->base);
+}
+
+static VkResult
+wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
+ const VkAllocationCallbacks *pAllocator)
+{
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+
+ wsi_wl_swapchain_images_free(chain);
+ wsi_wl_swapchain_chain_free(chain, pAllocator);
vk_free(pAllocator, chain);
@@ -1123,14 +2314,24 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
const VkAllocationCallbacks* pAllocator,
struct wsi_swapchain **swapchain_out)
{
- VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
- struct wsi_wayland *wsi =
- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+ struct wsi_wl_surface *wsi_wl_surface =
+ wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
struct wsi_wl_swapchain *chain;
VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+ /* From spec 1.3.278:
+ * Upon calling vkCreateSwapchainKHR with an oldSwapchain that is not VK_NULL_HANDLE,
+ * oldSwapchain is retired - even if creation of the new swapchain fails. */
+ if (pCreateInfo->oldSwapchain) {
+ VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
+ /* oldSwapchain is extern-sync, so it is not possible to call AcquireNextImage or QueuePresent
+ * concurrently with this function. Next call to acquire or present will immediately
+ * return OUT_OF_DATE. */
+ old_chain->retired = true;
+ }
+
int num_images = pCreateInfo->minImageCount;
size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
@@ -1138,63 +2339,169 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
if (chain == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- result = wsi_swapchain_init(wsi_device, &chain->base, device,
- pCreateInfo, pAllocator);
- if (result != VK_SUCCESS) {
- vk_free(pAllocator, chain);
- return result;
+ wl_list_init(&chain->present_ids.outstanding_list);
+
+ /* We are taking ownership of the wsi_wl_surface, so remove ownership from
+ * oldSwapchain. If the surface is currently owned by a swapchain that is
+ * not oldSwapchain we return an error.
+ */
+ if (wsi_wl_surface->chain &&
+ wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
+ result = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
+ goto fail;
+ }
+ if (pCreateInfo->oldSwapchain) {
+ VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
+ if (old_chain->tearing_control) {
+ wp_tearing_control_v1_destroy(old_chain->tearing_control);
+ old_chain->tearing_control = NULL;
+ }
}
+ /* Take ownership of the wsi_wl_surface */
+ chain->wsi_wl_surface = wsi_wl_surface;
+ wsi_wl_surface->chain = chain;
+
+ result = wsi_wl_surface_init(wsi_wl_surface, wsi_device);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
+ if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+ chain->tearing_control =
+ wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
+ wsi_wl_surface->surface);
+ if (!chain->tearing_control) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail;
+ }
+ wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
+ WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
+ }
+
+ enum wsi_wl_buffer_type buffer_type;
+ struct wsi_base_image_params *image_params = NULL;
+ struct wsi_cpu_image_params cpu_image_params;
+ struct wsi_drm_image_params drm_image_params;
+ uint32_t num_drm_modifiers = 0;
+ const uint64_t *drm_modifiers = NULL;
+ if (wsi_device->sw) {
+ cpu_image_params = (struct wsi_cpu_image_params) {
+ .base.image_type = WSI_IMAGE_TYPE_CPU,
+ };
+ if (wsi_device->has_import_memory_host &&
+ !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
+ buffer_type = WSI_WL_BUFFER_GPU_SHM;
+ cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
+ } else {
+ buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
+ }
+ image_params = &cpu_image_params.base;
+ } else {
+ drm_image_params = (struct wsi_drm_image_params) {
+ .base.image_type = WSI_IMAGE_TYPE_DRM,
+ .same_gpu = wsi_wl_surface->display->same_gpu,
+ .explicit_sync = wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device),
+ };
+ /* Use explicit DRM format modifiers when both the server and the driver
+ * support them.
+ */
+ if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
+ struct wsi_wl_format *f = NULL;
+ /* Try to select modifiers for our vk_format from surface dma-buf
+ * feedback. If that doesn't work, fallback to the list of supported
+ * formats/modifiers by the display. */
+ if (wsi_wl_surface->wl_dmabuf_feedback)
+ f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
+ pCreateInfo->imageFormat);
+ if (f == NULL)
+ f = find_format(&chain->wsi_wl_surface->display->formats,
+ pCreateInfo->imageFormat);
+ if (f != NULL) {
+ num_drm_modifiers = u_vector_length(&f->modifiers);
+ drm_modifiers = u_vector_tail(&f->modifiers);
+ if (num_drm_modifiers > 0)
+ drm_image_params.num_modifier_lists = 1;
+ else
+ drm_image_params.num_modifier_lists = 0;
+ drm_image_params.num_modifiers = &num_drm_modifiers;
+ drm_image_params.modifiers = &drm_modifiers;
+ }
+ }
+ buffer_type = WSI_WL_BUFFER_NATIVE;
+ image_params = &drm_image_params.base;
+ }
+
+ result = wsi_swapchain_init(wsi_device, &chain->base, device,
+ pCreateInfo, image_params, pAllocator);
+ if (result != VK_SUCCESS)
+ goto fail;
+
bool alpha = pCreateInfo->compositeAlpha ==
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
chain->base.destroy = wsi_wl_swapchain_destroy;
chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
- chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
+ chain->base.acquire_next_image = chain->base.image_info.explicit_sync
+ ? wsi_wl_swapchain_acquire_next_image_explicit
+ : wsi_wl_swapchain_acquire_next_image_implicit;
chain->base.queue_present = wsi_wl_swapchain_queue_present;
- chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
+ chain->base.release_images = wsi_wl_swapchain_release_images;
+ chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
+ chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
+ chain->base.present_mode = present_mode;
chain->base.image_count = num_images;
chain->extent = pCreateInfo->imageExtent;
chain->vk_format = pCreateInfo->imageFormat;
- if (wsi_device->sw)
- chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
- else
+ chain->buffer_type = buffer_type;
+ if (buffer_type == WSI_WL_BUFFER_NATIVE) {
chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
-
- if (pCreateInfo->oldSwapchain) {
- /* If we have an oldSwapchain parameter, copy the display struct over
- * from the old one so we don't have to fully re-initialize it.
- */
- VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
- chain->display = wsi_wl_display_ref(old_chain->display);
} else {
- chain->display = NULL;
- result = wsi_wl_display_create(wsi, surface->display,
- wsi_device->sw, &chain->display);
- if (result != VK_SUCCESS)
- goto fail;
+ chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
}
+ chain->num_drm_modifiers = num_drm_modifiers;
+ if (num_drm_modifiers) {
+ uint64_t *drm_modifiers_copy =
+ vk_alloc(pAllocator, sizeof(*drm_modifiers) * num_drm_modifiers, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!drm_modifiers_copy) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail_free_wl_chain;
+ }
- chain->surface = wl_proxy_create_wrapper(surface->surface);
- if (!chain->surface) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
+ typed_memcpy(drm_modifiers_copy, drm_modifiers, num_drm_modifiers);
+ chain->drm_modifiers = drm_modifiers_copy;
}
- wl_proxy_set_queue((struct wl_proxy *) chain->surface,
- chain->display->queue);
- chain->num_drm_modifiers = 0;
- chain->drm_modifiers = 0;
-
- /* Use explicit DRM format modifiers when both the server and the driver
- * support them.
- */
- if (chain->display->wl_dmabuf && chain->base.wsi->supports_modifiers) {
- struct wsi_wl_format *f = find_format(&chain->display->formats, chain->vk_format);
- if (f) {
- chain->drm_modifiers = u_vector_tail(&f->modifiers);
- chain->num_drm_modifiers = u_vector_length(&f->modifiers);
- }
+ if (!wsi_init_pthread_cond_monotonic(&chain->present_ids.list_advanced)) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail_free_wl_chain;
+ }
+ pthread_mutex_init(&chain->present_ids.lock, NULL);
+
+ char *queue_name = vk_asprintf(pAllocator,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
+ "mesa vk surface %d swapchain %d queue",
+ wl_proxy_get_id((struct wl_proxy *) wsi_wl_surface->surface),
+ wsi_wl_surface->chain_count++);
+ chain->present_ids.queue =
+ wl_display_create_queue_with_name(chain->wsi_wl_surface->display->wl_display,
+ queue_name);
+ vk_free(pAllocator, queue_name);
+
+ if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
+ chain->present_ids.wp_presentation =
+ wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
+ wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
+ chain->present_ids.queue);
+ } else {
+ /* Fallback to frame callbacks when presentation protocol is not available.
+ * We already have a proxy for the surface, but need another since
+ * presentID is pumped through a different queue to not disrupt
+ * QueuePresentKHR frame callback's queue. */
+ chain->present_ids.surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
+ wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.surface,
+ chain->present_ids.queue);
}
chain->fifo_ready = true;
@@ -1203,7 +2510,7 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
result = wsi_wl_image_init(chain, &chain->images[i],
pCreateInfo, pAllocator);
if (result != VK_SUCCESS)
- goto fail;
+ goto fail_free_wl_images;
chain->images[i].busy = false;
}
@@ -1211,9 +2518,15 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
return VK_SUCCESS;
+fail_free_wl_images:
+ wsi_wl_swapchain_images_free(chain);
+fail_free_wl_chain:
+ wsi_wl_swapchain_chain_free(chain, pAllocator);
fail:
- wsi_wl_swapchain_destroy(&chain->base, pAllocator);
+ vk_free(pAllocator, chain);
+ wsi_wl_surface->chain = NULL;
+ assert(result != VK_SUCCESS);
return result;
}
diff --git a/src/vulkan/wsi/wsi_common_win32.c b/src/vulkan/wsi/wsi_common_win32.c
deleted file mode 100644
index 78d92206563..00000000000
--- a/src/vulkan/wsi/wsi_common_win32.c
+++ /dev/null
@@ -1,680 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <assert.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "vk_util.h"
-#include "wsi_common_private.h"
-#include "wsi_common_win32.h"
-
-#if defined(__GNUC__)
-#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" // warning: cast to pointer from integer of different size
-#endif
-
-struct wsi_win32;
-
-struct wsi_win32 {
- struct wsi_interface base;
-
- struct wsi_device *wsi;
-
- const VkAllocationCallbacks *alloc;
- VkPhysicalDevice physical_device;
-};
-
-struct wsi_win32_image {
- struct wsi_image base;
- struct wsi_win32_swapchain *chain;
- HDC dc;
- HBITMAP bmp;
- int bmp_row_pitch;
- void *ppvBits;
-};
-
-
-struct wsi_win32_swapchain {
- struct wsi_swapchain base;
- struct wsi_win32 *wsi;
- VkIcdSurfaceWin32 *surface;
- uint64_t flip_sequence;
- VkResult status;
- VkExtent2D extent;
- HWND wnd;
- HDC chain_dc;
- struct wsi_win32_image images[0];
-};
-
-VkBool32
-wsi_win32_get_presentation_support(struct wsi_device *wsi_device)
-{
- return TRUE;
-}
-
-VkResult
-wsi_create_win32_surface(VkInstance instance,
- const VkAllocationCallbacks *allocator,
- const VkWin32SurfaceCreateInfoKHR *create_info,
- VkSurfaceKHR *surface_khr)
-{
- VkIcdSurfaceWin32 *surface = vk_zalloc(allocator, sizeof *surface, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
- if (surface == NULL)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- surface->base.platform = VK_ICD_WSI_PLATFORM_WIN32;
-
- surface->hinstance = create_info->hinstance;
- surface->hwnd = create_info->hwnd;
-
- *surface_khr = VkIcdSurfaceBase_to_handle(&surface->base);
- return VK_SUCCESS;
-}
-
-static VkResult
-wsi_win32_surface_get_support(VkIcdSurfaceBase *surface,
- struct wsi_device *wsi_device,
- uint32_t queueFamilyIndex,
- VkBool32* pSupported)
-{
- *pSupported = true;
-
- return VK_SUCCESS;
-}
-
-static VkResult
-wsi_win32_surface_get_capabilities(VkIcdSurfaceBase *surface,
- struct wsi_device *wsi_device,
- VkSurfaceCapabilitiesKHR* caps)
-{
- caps->minImageCount = 1;
- /* There is no real maximum */
- caps->maxImageCount = 0;
-
- caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
- caps->minImageExtent = (VkExtent2D) { 1, 1 };
- caps->maxImageExtent = (VkExtent2D) {
- wsi_device->maxImageDimension2D,
- wsi_device->maxImageDimension2D,
- };
-
- caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
- caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
- caps->maxImageArrayLayers = 1;
-
- caps->supportedCompositeAlpha =
- VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
- VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
-
- caps->supportedUsageFlags =
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT |
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
-
- return VK_SUCCESS;
-}
-
-static VkResult
-wsi_win32_surface_get_capabilities2(VkIcdSurfaceBase *surface,
- struct wsi_device *wsi_device,
- const void *info_next,
- VkSurfaceCapabilities2KHR* caps)
-{
- assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
-
- VkResult result =
- wsi_win32_surface_get_capabilities(surface, wsi_device,
- &caps->surfaceCapabilities);
-
- vk_foreach_struct(ext, caps->pNext) {
- switch (ext->sType) {
- case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
- VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
- protected->supportsProtected = VK_FALSE;
- break;
- }
-
- default:
- /* Ignored */
- break;
- }
- }
-
- return result;
-}
-
-
-static const struct {
- VkFormat format;
-} available_surface_formats[] = {
- { .format = VK_FORMAT_B8G8R8A8_SRGB },
- { .format = VK_FORMAT_B8G8R8A8_UNORM },
-};
-
-
-static void
-get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
-{
- for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++)
- sorted_formats[i] = available_surface_formats[i].format;
-
- if (wsi_device->force_bgra8_unorm_first) {
- for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++) {
- if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
- sorted_formats[i] = sorted_formats[0];
- sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
- break;
- }
- }
- }
-}
-
-static VkResult
-wsi_win32_surface_get_formats(VkIcdSurfaceBase *icd_surface,
- struct wsi_device *wsi_device,
- uint32_t* pSurfaceFormatCount,
- VkSurfaceFormatKHR* pSurfaceFormats)
-{
- VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out, pSurfaceFormats, pSurfaceFormatCount);
-
- VkFormat sorted_formats[ARRAY_SIZE(available_surface_formats)];
- get_sorted_vk_formats(wsi_device, sorted_formats);
-
- for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
- vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
- f->format = sorted_formats[i];
- f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
- }
- }
-
- return vk_outarray_status(&out);
-}
-
-static VkResult
-wsi_win32_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
- struct wsi_device *wsi_device,
- const void *info_next,
- uint32_t* pSurfaceFormatCount,
- VkSurfaceFormat2KHR* pSurfaceFormats)
-{
- VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out, pSurfaceFormats, pSurfaceFormatCount);
-
- VkFormat sorted_formats[ARRAY_SIZE(available_surface_formats)];
- get_sorted_vk_formats(wsi_device, sorted_formats);
-
- for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
- vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
- assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
- f->surfaceFormat.format = sorted_formats[i];
- f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
- }
- }
-
- return vk_outarray_status(&out);
-}
-
-static const VkPresentModeKHR present_modes[] = {
- //VK_PRESENT_MODE_MAILBOX_KHR,
- VK_PRESENT_MODE_FIFO_KHR,
-};
-
-static VkResult
-wsi_win32_surface_get_present_modes(VkIcdSurfaceBase *surface,
- uint32_t* pPresentModeCount,
- VkPresentModeKHR* pPresentModes)
-{
- if (pPresentModes == NULL) {
- *pPresentModeCount = ARRAY_SIZE(present_modes);
- return VK_SUCCESS;
- }
-
- *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
- typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
-
- if (*pPresentModeCount < ARRAY_SIZE(present_modes))
- return VK_INCOMPLETE;
- else
- return VK_SUCCESS;
-}
-
-static VkResult
-wsi_win32_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
- struct wsi_device *wsi_device,
- uint32_t* pRectCount,
- VkRect2D* pRects)
-{
- VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
-
- vk_outarray_append_typed(VkRect2D, &out, rect) {
- /* We don't know a size so just return the usual "I don't know." */
- *rect = (VkRect2D) {
- .offset = { 0, 0 },
- .extent = { UINT32_MAX, UINT32_MAX },
- };
- }
-
- return vk_outarray_status(&out);
-}
-
-static uint32_t
-select_memory_type(const struct wsi_device *wsi,
- VkMemoryPropertyFlags props,
- uint32_t type_bits)
-{
- for (uint32_t i = 0; i < wsi->memory_props.memoryTypeCount; i++) {
- const VkMemoryType type = wsi->memory_props.memoryTypes[i];
- if ((type_bits & (1 << i)) && (type.propertyFlags & props) == props)
- return i;
- }
-
- unreachable("No memory type found");
-}
-
-VkResult
-wsi_create_native_image(const struct wsi_swapchain *chain,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- uint32_t num_modifier_lists,
- const uint32_t *num_modifiers,
- const uint64_t *const *modifiers,
- uint8_t *(alloc_shm)(struct wsi_image *image, unsigned size),
- struct wsi_image *image)
-{
- const struct wsi_device *wsi = chain->wsi;
- VkResult result;
-
- memset(image, 0, sizeof(*image));
- for (int i = 0; i < ARRAY_SIZE(image->fds); i++)
- image->fds[i] = -1;
-
- const struct wsi_image_create_info image_wsi_info = {
- .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
- };
- VkImageCreateInfo image_info = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
- .pNext = &image_wsi_info,
- .flags = 0,
- .imageType = VK_IMAGE_TYPE_2D,
- .format = pCreateInfo->imageFormat,
- .extent = {
- .width = pCreateInfo->imageExtent.width,
- .height = pCreateInfo->imageExtent.height,
- .depth = 1,
- },
- .mipLevels = 1,
- .arrayLayers = 1,
- .samples = VK_SAMPLE_COUNT_1_BIT,
- .tiling = VK_IMAGE_TILING_OPTIMAL,
- .usage = pCreateInfo->imageUsage,
- .sharingMode = pCreateInfo->imageSharingMode,
- .queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount,
- .pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices,
- .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
- };
-
- VkImageFormatListCreateInfoKHR image_format_list;
- if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
- image_info.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
- VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR;
-
- const VkImageFormatListCreateInfoKHR *format_list =
- vk_find_struct_const(pCreateInfo->pNext,
- IMAGE_FORMAT_LIST_CREATE_INFO_KHR);
-
-#ifndef NDEBUG
- assume(format_list && format_list->viewFormatCount > 0);
- bool format_found = false;
- for (int i = 0; i < format_list->viewFormatCount; i++)
- if (pCreateInfo->imageFormat == format_list->pViewFormats[i])
- format_found = true;
- assert(format_found);
-#endif
-
- image_format_list = *format_list;
- image_format_list.pNext = NULL;
- __vk_append_struct(&image_info, &image_format_list);
- }
-
-
- result = wsi->CreateImage(chain->device, &image_info,
- &chain->alloc, &image->image);
- if (result != VK_SUCCESS)
- goto fail;
-
- VkMemoryRequirements reqs;
- wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
-
- const struct wsi_memory_allocate_info memory_wsi_info = {
- .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
- .pNext = NULL,
- .implicit_sync = true,
- };
- const VkExportMemoryAllocateInfo memory_export_info = {
- .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
- .pNext = &memory_wsi_info,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
- .pNext = &memory_export_info,
- .image = image->image,
- .buffer = VK_NULL_HANDLE,
- };
- const VkMemoryAllocateInfo memory_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
- .pNext = &memory_dedicated_info,
- .allocationSize = reqs.size,
- .memoryTypeIndex = select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
- reqs.memoryTypeBits),
- };
- result = wsi->AllocateMemory(chain->device, &memory_info,
- &chain->alloc, &image->memory);
- if (result != VK_SUCCESS)
- goto fail;
-
- result = wsi->BindImageMemory(chain->device, image->image,
- image->memory, 0);
- if (result != VK_SUCCESS)
- goto fail;
-
- const VkImageSubresource image_subresource = {
- .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .mipLevel = 0,
- .arrayLayer = 0,
- };
- VkSubresourceLayout image_layout;
- wsi->GetImageSubresourceLayout(chain->device, image->image,
- &image_subresource, &image_layout);
-
- image->num_planes = 1;
- image->sizes[0] = reqs.size;
- image->row_pitches[0] = image_layout.rowPitch;
- image->offsets[0] = 0;
-
- return VK_SUCCESS;
-
-fail:
- wsi_destroy_image(chain, image);
-
- return result;
-}
-
-static VkResult
-wsi_win32_image_init(VkDevice device_h,
- struct wsi_swapchain *drv_chain,
- const VkSwapchainCreateInfoKHR *create_info,
- const VkAllocationCallbacks *allocator,
- struct wsi_win32_image *image)
-{
- struct wsi_win32_swapchain *chain = (struct wsi_win32_swapchain *) drv_chain;
-
- VkResult result = wsi_create_native_image(&chain->base, create_info,
- 0, NULL, NULL, NULL,
- &image->base);
- if (result != VK_SUCCESS)
- return result;
-
- VkIcdSurfaceWin32 *win32_surface = (VkIcdSurfaceWin32 *)create_info->surface;
- chain->wnd = win32_surface->hwnd;
- chain->chain_dc = GetDC(chain->wnd);
-
- image->dc = CreateCompatibleDC(chain->chain_dc);
- HBITMAP bmp = NULL;
-
- BITMAPINFO info = { 0 };
- info.bmiHeader.biSize = sizeof(BITMAPINFO);
- info.bmiHeader.biWidth = create_info->imageExtent.width;
- info.bmiHeader.biHeight = -create_info->imageExtent.height;
- info.bmiHeader.biPlanes = 1;
- info.bmiHeader.biBitCount = 32;
- info.bmiHeader.biCompression = BI_RGB;
-
- bmp = CreateDIBSection(image->dc, &info, DIB_RGB_COLORS, &image->ppvBits, NULL, 0);
- assert(bmp && image->ppvBits);
-
- SelectObject(image->dc, bmp);
-
- BITMAP header;
- int status = GetObject(bmp, sizeof(BITMAP), &header);
- (void)status;
- image->bmp_row_pitch = header.bmWidthBytes;
- image->bmp = bmp;
- image->chain = chain;
-
- return VK_SUCCESS;
-}
-
-static void
-wsi_win32_image_finish(struct wsi_swapchain *drv_chain,
- const VkAllocationCallbacks *allocator,
- struct wsi_win32_image *image)
-{
- struct wsi_win32_swapchain *chain =
- (struct wsi_win32_swapchain *) drv_chain;
-
- DeleteDC(image->dc);
- if(image->bmp)
- DeleteObject(image->bmp);
- wsi_destroy_image(&chain->base, &image->base);
-}
-
-static VkResult
-wsi_win32_swapchain_destroy(struct wsi_swapchain *drv_chain,
- const VkAllocationCallbacks *allocator)
-{
- struct wsi_win32_swapchain *chain =
- (struct wsi_win32_swapchain *) drv_chain;
-
- for (uint32_t i = 0; i < chain->base.image_count; i++)
- wsi_win32_image_finish(drv_chain, allocator, &chain->images[i]);
-
- DeleteDC(chain->chain_dc);
-
- wsi_swapchain_finish(&chain->base);
- vk_free(allocator, chain);
- return VK_SUCCESS;
-}
-
-static struct wsi_image *
-wsi_win32_get_wsi_image(struct wsi_swapchain *drv_chain,
- uint32_t image_index)
-{
- struct wsi_win32_swapchain *chain =
- (struct wsi_win32_swapchain *) drv_chain;
-
- return &chain->images[image_index].base;
-}
-
-static VkResult
-wsi_win32_acquire_next_image(struct wsi_swapchain *drv_chain,
- const VkAcquireNextImageInfoKHR *info,
- uint32_t *image_index)
-{
- struct wsi_win32_swapchain *chain =
- (struct wsi_win32_swapchain *)drv_chain;
-
- /* Bail early if the swapchain is broken */
- if (chain->status != VK_SUCCESS)
- return chain->status;
-
- *image_index = 0;
- return VK_SUCCESS;
-}
-
-static VkResult
-wsi_win32_queue_present(struct wsi_swapchain *drv_chain,
- uint32_t image_index,
- const VkPresentRegionKHR *damage)
-{
- struct wsi_win32_swapchain *chain = (struct wsi_win32_swapchain *) drv_chain;
- assert(image_index < chain->base.image_count);
- struct wsi_win32_image *image = &chain->images[image_index];
- VkResult result;
-
- char *ptr;
- char *dptr = image->ppvBits;
- result = chain->base.wsi->MapMemory(chain->base.device,
- image->base.memory,
- 0, image->base.sizes[0], 0, (void**)&ptr);
-
- for (unsigned h = 0; h < chain->extent.height; h++) {
- memcpy(dptr, ptr, chain->extent.width * 4);
- dptr += image->bmp_row_pitch;
- ptr += image->base.row_pitches[0];
- }
- if(StretchBlt(chain->chain_dc, 0, 0, chain->extent.width, chain->extent.height, image->dc, 0, 0, chain->extent.width, chain->extent.height, SRCCOPY))
- result = VK_SUCCESS;
- else
- result = VK_ERROR_MEMORY_MAP_FAILED;
-
- chain->base.wsi->UnmapMemory(chain->base.device, image->base.memory);
- if (result != VK_SUCCESS)
- chain->status = result;
-
- if (result != VK_SUCCESS)
- return result;
-
- return chain->status;
-}
-
-static VkResult
-wsi_win32_surface_create_swapchain(
- VkIcdSurfaceBase *icd_surface,
- VkDevice device,
- struct wsi_device *wsi_device,
- const VkSwapchainCreateInfoKHR *create_info,
- const VkAllocationCallbacks *allocator,
- struct wsi_swapchain **swapchain_out)
-{
- VkIcdSurfaceWin32 *surface = (VkIcdSurfaceWin32 *)icd_surface;
- struct wsi_win32 *wsi =
- (struct wsi_win32 *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32];
-
- assert(create_info->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
-
- const unsigned num_images = create_info->minImageCount;
- struct wsi_win32_swapchain *chain;
- size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
-
- chain = vk_zalloc(allocator, size,
- 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
- if (chain == NULL)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- VkResult result = wsi_swapchain_init(wsi_device, &chain->base, device,
- create_info, allocator);
- if (result != VK_SUCCESS) {
- vk_free(allocator, chain);
- return result;
- }
-
- chain->base.destroy = wsi_win32_swapchain_destroy;
- chain->base.get_wsi_image = wsi_win32_get_wsi_image;
- chain->base.acquire_next_image = wsi_win32_acquire_next_image;
- chain->base.queue_present = wsi_win32_queue_present;
- chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, create_info);
- chain->base.image_count = num_images;
- chain->extent = create_info->imageExtent;
-
- chain->wsi = wsi;
- chain->status = VK_SUCCESS;
-
- chain->surface = surface;
-
- for (uint32_t image = 0; image < chain->base.image_count; image++) {
- result = wsi_win32_image_init(device, &chain->base,
- create_info, allocator,
- &chain->images[image]);
- if (result != VK_SUCCESS) {
- while (image > 0) {
- --image;
- wsi_win32_image_finish(&chain->base, allocator,
- &chain->images[image]);
- }
- vk_free(allocator, chain);
- goto fail_init_images;
- }
- }
-
- *swapchain_out = &chain->base;
-
- return VK_SUCCESS;
-
-fail_init_images:
- return result;
-}
-
-
-VkResult
-wsi_win32_init_wsi(struct wsi_device *wsi_device,
- const VkAllocationCallbacks *alloc,
- VkPhysicalDevice physical_device)
-{
- struct wsi_win32 *wsi;
- VkResult result;
-
- wsi = vk_alloc(alloc, sizeof(*wsi), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- if (!wsi) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
- }
-
- wsi->physical_device = physical_device;
- wsi->alloc = alloc;
- wsi->wsi = wsi_device;
-
- wsi->base.get_support = wsi_win32_surface_get_support;
- wsi->base.get_capabilities2 = wsi_win32_surface_get_capabilities2;
- wsi->base.get_formats = wsi_win32_surface_get_formats;
- wsi->base.get_formats2 = wsi_win32_surface_get_formats2;
- wsi->base.get_present_modes = wsi_win32_surface_get_present_modes;
- wsi->base.get_present_rectangles = wsi_win32_surface_get_present_rectangles;
- wsi->base.create_swapchain = wsi_win32_surface_create_swapchain;
-
- wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32] = &wsi->base;
-
- return VK_SUCCESS;
-
-fail:
- wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32] = NULL;
-
- return result;
-}
-
-void
-wsi_win32_finish_wsi(struct wsi_device *wsi_device,
- const VkAllocationCallbacks *alloc)
-{
- struct wsi_win32 *wsi =
- (struct wsi_win32 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32];
- if (!wsi)
- return;
-
- vk_free(alloc, wsi);
-}
diff --git a/src/vulkan/wsi/wsi_common_win32.cpp b/src/vulkan/wsi/wsi_common_win32.cpp
new file mode 100644
index 00000000000..e614dae9782
--- /dev/null
+++ b/src/vulkan/wsi/wsi_common_win32.cpp
@@ -0,0 +1,1013 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "vk_format.h"
+#include "vk_instance.h"
+#include "vk_physical_device.h"
+#include "vk_util.h"
+#include "wsi_common_entrypoints.h"
+#include "wsi_common_private.h"
+
+#define D3D12_IGNORE_SDK_LAYERS
+#include <dxgi1_4.h>
+#include <directx/d3d12.h>
+#include <dxguids/dxguids.h>
+
+#include <dcomp.h>
+
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" // warning: cast to pointer from integer of different size
+#endif
+
+struct wsi_win32;
+
+struct wsi_win32 {
+ struct wsi_interface base;
+
+ struct wsi_device *wsi;
+
+ const VkAllocationCallbacks *alloc;
+ VkPhysicalDevice physical_device;
+ struct {
+ IDXGIFactory4 *factory;
+ IDCompositionDevice *dcomp;
+ } dxgi;
+};
+
+enum wsi_win32_image_state {
+ WSI_IMAGE_IDLE,
+ WSI_IMAGE_DRAWING,
+ WSI_IMAGE_QUEUED,
+};
+
+struct wsi_win32_image {
+ struct wsi_image base;
+ enum wsi_win32_image_state state;
+ struct wsi_win32_swapchain *chain;
+ struct {
+ ID3D12Resource *swapchain_res;
+ } dxgi;
+ struct {
+ HDC dc;
+ HBITMAP bmp;
+ int bmp_row_pitch;
+ void *ppvBits;
+ } sw;
+};
+
+struct wsi_win32_surface {
+ VkIcdSurfaceWin32 base;
+
+ /* The first time a swapchain is created against this surface, a DComp
+ * target/visual will be created for it and that swapchain will be bound.
+ * When a new swapchain is created, we delay changing the visual's content
+ * until that swapchain has completed its first present once, otherwise the
+ * window will flash white. When the currently-bound swapchain is destroyed,
+ * the visual's content is unset.
+ */
+ IDCompositionTarget *target;
+ IDCompositionVisual *visual;
+ struct wsi_win32_swapchain *current_swapchain;
+};
+
+struct wsi_win32_swapchain {
+ struct wsi_swapchain base;
+ IDXGISwapChain3 *dxgi;
+ struct wsi_win32 *wsi;
+ wsi_win32_surface *surface;
+ uint64_t flip_sequence;
+ VkResult status;
+ VkExtent2D extent;
+ HWND wnd;
+ HDC chain_dc;
+ struct wsi_win32_image images[0];
+};
+
+VKAPI_ATTR VkBool32 VKAPI_CALL
+wsi_GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex)
+{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
+ return (wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)) != 0;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateWin32SurfaceKHR(VkInstance _instance,
+ const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ wsi_win32_surface *surface;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR);
+
+ surface = (wsi_win32_surface *)vk_zalloc2(&instance->alloc, pAllocator, sizeof(*surface), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (surface == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ surface->base.base.platform = VK_ICD_WSI_PLATFORM_WIN32;
+
+ surface->base.hinstance = pCreateInfo->hinstance;
+ surface->base.hwnd = pCreateInfo->hwnd;
+
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->base.base);
+
+ return VK_SUCCESS;
+}
+
+void
+wsi_win32_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ wsi_win32_surface *surface = (wsi_win32_surface *)icd_surface;
+ if (surface->visual)
+ surface->visual->Release();
+ if (surface->target)
+ surface->target->Release();
+ vk_free2(&instance->alloc, pAllocator, icd_surface);
+}
+
+static VkResult
+wsi_win32_surface_get_support(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ uint32_t queueFamilyIndex,
+ VkBool32* pSupported)
+{
+ *pSupported = true;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_surface_get_capabilities(VkIcdSurfaceBase *surf,
+ struct wsi_device *wsi_device,
+ VkSurfaceCapabilitiesKHR* caps)
+{
+ VkIcdSurfaceWin32 *surface = (VkIcdSurfaceWin32 *)surf;
+
+ RECT win_rect;
+ if (!GetClientRect(surface->hwnd, &win_rect))
+ return VK_ERROR_SURFACE_LOST_KHR;
+
+ caps->minImageCount = 1;
+
+ if (!wsi_device->sw && wsi_device->win32.get_d3d12_command_queue) {
+ /* DXGI doesn't support random presenting order (images need to
+ * be presented in the order they were acquired), so we can't
+ * expose more than two image per swapchain.
+ */
+ caps->minImageCount = caps->maxImageCount = 2;
+ } else {
+ caps->minImageCount = 1;
+ /* Software callbacke, there is no real maximum */
+ caps->maxImageCount = 0;
+ }
+
+ caps->currentExtent = {
+ (uint32_t)win_rect.right - (uint32_t)win_rect.left,
+ (uint32_t)win_rect.bottom - (uint32_t)win_rect.top
+ };
+ caps->minImageExtent = { 1u, 1u };
+ caps->maxImageExtent = {
+ wsi_device->maxImageDimension2D,
+ wsi_device->maxImageDimension2D,
+ };
+
+ caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ caps->maxImageArrayLayers = 1;
+
+ caps->supportedCompositeAlpha =
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
+
+ caps->supportedUsageFlags = wsi_caps_get_image_usage();
+
+ VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
+ if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
+ caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_surface_get_capabilities2(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ const void *info_next,
+ VkSurfaceCapabilities2KHR* caps)
+{
+ assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
+
+ const VkSurfacePresentModeEXT *present_mode =
+ (const VkSurfacePresentModeEXT *)vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
+
+ VkResult result =
+ wsi_win32_surface_get_capabilities(surface, wsi_device,
+ &caps->surfaceCapabilities);
+
+ vk_foreach_struct(ext, caps->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
+ VkSurfaceProtectedCapabilitiesKHR *protected_cap = (VkSurfaceProtectedCapabilitiesKHR *)ext;
+ protected_cap->supportsProtected = VK_FALSE;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
+ /* Unsupported. */
+ VkSurfacePresentScalingCapabilitiesEXT *scaling =
+ (VkSurfacePresentScalingCapabilitiesEXT *)ext;
+ scaling->supportedPresentScaling = 0;
+ scaling->supportedPresentGravityX = 0;
+ scaling->supportedPresentGravityY = 0;
+ scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
+ scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
+ /* Unsupported, just report the input present mode. */
+ VkSurfacePresentModeCompatibilityEXT *compat =
+ (VkSurfacePresentModeCompatibilityEXT *)ext;
+ if (compat->pPresentModes) {
+ if (compat->presentModeCount) {
+ assert(present_mode);
+ compat->pPresentModes[0] = present_mode->presentMode;
+ compat->presentModeCount = 1;
+ }
+ } else {
+ if (!present_mode)
+ wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
+ "without a VkSurfacePresentModeEXT set. This is an "
+ "application bug.\n");
+ compat->presentModeCount = 1;
+ }
+ break;
+ }
+
+ default:
+ /* Ignored */
+ break;
+ }
+ }
+
+ return result;
+}
+
+
+static const struct {
+ VkFormat format;
+} available_surface_formats[] = {
+ { VK_FORMAT_B8G8R8A8_SRGB },
+ { VK_FORMAT_B8G8R8A8_UNORM },
+};
+
+
+static void
+get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++)
+ sorted_formats[i] = available_surface_formats[i].format;
+
+ if (wsi_device->force_bgra8_unorm_first) {
+ for (unsigned i = 0; i < ARRAY_SIZE(available_surface_formats); i++) {
+ if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
+ sorted_formats[i] = sorted_formats[0];
+ sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ }
+ }
+ }
+}
+
+static VkResult
+wsi_win32_surface_get_formats(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormatKHR* pSurfaceFormats)
+{
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out, pSurfaceFormats, pSurfaceFormatCount);
+
+ VkFormat sorted_formats[ARRAY_SIZE(available_surface_formats)];
+ get_sorted_vk_formats(wsi_device, sorted_formats);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
+ f->format = sorted_formats[i];
+ f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static VkResult
+wsi_win32_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
+ const void *info_next,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormat2KHR* pSurfaceFormats)
+{
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out, pSurfaceFormats, pSurfaceFormatCount);
+
+ VkFormat sorted_formats[ARRAY_SIZE(available_surface_formats)];
+ get_sorted_vk_formats(wsi_device, sorted_formats);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
+ assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
+ f->surfaceFormat.format = sorted_formats[i];
+ f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static const VkPresentModeKHR present_modes_gdi[] = {
+ VK_PRESENT_MODE_FIFO_KHR,
+};
+static const VkPresentModeKHR present_modes_dxgi[] = {
+ VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+};
+
+static VkResult
+wsi_win32_surface_get_present_modes(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pPresentModeCount,
+ VkPresentModeKHR* pPresentModes)
+{
+ const VkPresentModeKHR *array;
+ size_t array_size;
+ if (wsi_device->sw || !wsi_device->win32.get_d3d12_command_queue) {
+ array = present_modes_gdi;
+ array_size = ARRAY_SIZE(present_modes_gdi);
+ } else {
+ array = present_modes_dxgi;
+ array_size = ARRAY_SIZE(present_modes_dxgi);
+ }
+
+ if (pPresentModes == NULL) {
+ *pPresentModeCount = array_size;
+ return VK_SUCCESS;
+ }
+
+ *pPresentModeCount = MIN2(*pPresentModeCount, array_size);
+ typed_memcpy(pPresentModes, array, *pPresentModeCount);
+
+ if (*pPresentModeCount < array_size)
+ return VK_INCOMPLETE;
+ else
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pRectCount,
+ VkRect2D* pRects)
+{
+ VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
+
+ vk_outarray_append_typed(VkRect2D, &out, rect) {
+ /* We don't know a size so just return the usual "I don't know." */
+ *rect = {
+ { 0, 0 },
+ { UINT32_MAX, UINT32_MAX },
+ };
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static VkResult
+wsi_create_dxgi_image_mem(const struct wsi_swapchain *drv_chain,
+ const struct wsi_image_info *info,
+ struct wsi_image *image)
+{
+ struct wsi_win32_swapchain *chain = (struct wsi_win32_swapchain *)drv_chain;
+ const struct wsi_device *wsi = chain->base.wsi;
+
+ assert(chain->base.blit.type != WSI_SWAPCHAIN_BUFFER_BLIT);
+
+ struct wsi_win32_image *win32_image =
+ container_of(image, struct wsi_win32_image, base);
+ uint32_t image_idx =
+ ((uintptr_t)win32_image - (uintptr_t)chain->images) /
+ sizeof(*win32_image);
+ if (FAILED(chain->dxgi->GetBuffer(image_idx,
+ IID_PPV_ARGS(&win32_image->dxgi.swapchain_res))))
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ VkResult result =
+ wsi->win32.create_image_memory(chain->base.device,
+ win32_image->dxgi.swapchain_res,
+ &chain->base.alloc,
+ chain->base.blit.type == WSI_SWAPCHAIN_NO_BLIT ?
+ &image->memory : &image->blit.memory);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (chain->base.blit.type == WSI_SWAPCHAIN_NO_BLIT)
+ return VK_SUCCESS;
+
+ VkImageCreateInfo create = info->create;
+
+ create.usage &= ~VK_IMAGE_USAGE_STORAGE_BIT;
+ create.initialLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+
+ result = wsi->CreateImage(chain->base.device, &create,
+ &chain->base.alloc, &image->blit.image);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = wsi->BindImageMemory(chain->base.device, image->blit.image,
+ image->blit.memory, 0);
+ if (result != VK_SUCCESS)
+ return result;
+
+ VkMemoryRequirements reqs;
+ wsi->GetImageMemoryRequirements(chain->base.device, image->image, &reqs);
+
+ const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
+ nullptr,
+ image->blit.image,
+ VK_NULL_HANDLE,
+ };
+ const VkMemoryAllocateInfo memory_info = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ &memory_dedicated_info,
+ reqs.size,
+ info->select_image_memory_type(wsi, reqs.memoryTypeBits),
+ };
+
+ return wsi->AllocateMemory(chain->base.device, &memory_info,
+ &chain->base.alloc, &image->memory);
+}
+
+enum wsi_swapchain_blit_type
+wsi_dxgi_image_needs_blit(const struct wsi_device *wsi,
+ const struct wsi_dxgi_image_params *params,
+ VkDevice device)
+{
+ if (wsi->win32.requires_blits && wsi->win32.requires_blits(device))
+ return WSI_SWAPCHAIN_IMAGE_BLIT;
+ else if (params->storage_image)
+ return WSI_SWAPCHAIN_IMAGE_BLIT;
+ return WSI_SWAPCHAIN_NO_BLIT;
+}
+
+VkResult
+wsi_dxgi_configure_image(const struct wsi_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const struct wsi_dxgi_image_params *params,
+ struct wsi_image_info *info)
+{
+ VkResult result =
+ wsi_configure_image(chain, pCreateInfo, 0, info);
+ if (result != VK_SUCCESS)
+ return result;
+
+ info->create_mem = wsi_create_dxgi_image_mem;
+
+ if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
+ wsi_configure_image_blit_image(chain, info);
+ info->select_image_memory_type = wsi_select_device_memory_type;
+ info->select_blit_dst_memory_type = wsi_select_device_memory_type;
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_image_init(VkDevice device_h,
+ struct wsi_win32_swapchain *chain,
+ const VkSwapchainCreateInfoKHR *create_info,
+ const VkAllocationCallbacks *allocator,
+ struct wsi_win32_image *image)
+{
+ VkResult result = wsi_create_image(&chain->base, &chain->base.image_info,
+ &image->base);
+ if (result != VK_SUCCESS)
+ return result;
+
+ VkIcdSurfaceWin32 *win32_surface = (VkIcdSurfaceWin32 *)create_info->surface;
+ chain->wnd = win32_surface->hwnd;
+ image->chain = chain;
+
+ if (chain->dxgi)
+ return VK_SUCCESS;
+
+ chain->chain_dc = GetDC(chain->wnd);
+ image->sw.dc = CreateCompatibleDC(chain->chain_dc);
+ HBITMAP bmp = NULL;
+
+ BITMAPINFO info = { 0 };
+ info.bmiHeader.biSize = sizeof(BITMAPINFO);
+ info.bmiHeader.biWidth = create_info->imageExtent.width;
+ info.bmiHeader.biHeight = -create_info->imageExtent.height;
+ info.bmiHeader.biPlanes = 1;
+ info.bmiHeader.biBitCount = 32;
+ info.bmiHeader.biCompression = BI_RGB;
+
+ bmp = CreateDIBSection(image->sw.dc, &info, DIB_RGB_COLORS, &image->sw.ppvBits, NULL, 0);
+ assert(bmp && image->sw.ppvBits);
+
+ SelectObject(image->sw.dc, bmp);
+
+ BITMAP header;
+ int status = GetObject(bmp, sizeof(BITMAP), &header);
+ (void)status;
+ image->sw.bmp_row_pitch = header.bmWidthBytes;
+ image->sw.bmp = bmp;
+
+ return VK_SUCCESS;
+}
+
+static void
+wsi_win32_image_finish(struct wsi_win32_swapchain *chain,
+ const VkAllocationCallbacks *allocator,
+ struct wsi_win32_image *image)
+{
+ if (image->dxgi.swapchain_res)
+ image->dxgi.swapchain_res->Release();
+
+ if (image->sw.dc)
+ DeleteDC(image->sw.dc);
+ if(image->sw.bmp)
+ DeleteObject(image->sw.bmp);
+ wsi_destroy_image(&chain->base, &image->base);
+}
+
+static VkResult
+wsi_win32_swapchain_destroy(struct wsi_swapchain *drv_chain,
+ const VkAllocationCallbacks *allocator)
+{
+ struct wsi_win32_swapchain *chain =
+ (struct wsi_win32_swapchain *) drv_chain;
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++)
+ wsi_win32_image_finish(chain, allocator, &chain->images[i]);
+
+ DeleteDC(chain->chain_dc);
+
+ if (chain->surface->current_swapchain == chain)
+ chain->surface->current_swapchain = NULL;
+
+ if (chain->dxgi)
+ chain->dxgi->Release();
+
+ wsi_swapchain_finish(&chain->base);
+ vk_free(allocator, chain);
+ return VK_SUCCESS;
+}
+
+static struct wsi_image *
+wsi_win32_get_wsi_image(struct wsi_swapchain *drv_chain,
+ uint32_t image_index)
+{
+ struct wsi_win32_swapchain *chain =
+ (struct wsi_win32_swapchain *) drv_chain;
+
+ return &chain->images[image_index].base;
+}
+
+static VkResult
+wsi_win32_release_images(struct wsi_swapchain *drv_chain,
+ uint32_t count, const uint32_t *indices)
+{
+ struct wsi_win32_swapchain *chain =
+ (struct wsi_win32_swapchain *)drv_chain;
+
+ if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
+ return chain->status;
+
+ for (uint32_t i = 0; i < count; i++) {
+ uint32_t index = indices[i];
+ assert(index < chain->base.image_count);
+ assert(chain->images[index].state == WSI_IMAGE_DRAWING);
+ chain->images[index].state = WSI_IMAGE_IDLE;
+ }
+
+ return VK_SUCCESS;
+}
+
+
+static VkResult
+wsi_win32_acquire_next_image(struct wsi_swapchain *drv_chain,
+ const VkAcquireNextImageInfoKHR *info,
+ uint32_t *image_index)
+{
+ struct wsi_win32_swapchain *chain =
+ (struct wsi_win32_swapchain *)drv_chain;
+
+ /* Bail early if the swapchain is broken */
+ if (chain->status != VK_SUCCESS)
+ return chain->status;
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ if (chain->images[i].state == WSI_IMAGE_IDLE) {
+ *image_index = i;
+ chain->images[i].state = WSI_IMAGE_DRAWING;
+ return VK_SUCCESS;
+ }
+ }
+
+ assert(chain->dxgi);
+ uint32_t index = chain->dxgi->GetCurrentBackBufferIndex();
+ if (chain->images[index].state == WSI_IMAGE_DRAWING) {
+ index = (index + 1) % chain->base.image_count;
+ assert(chain->images[index].state == WSI_IMAGE_QUEUED);
+ }
+ if (chain->wsi->wsi->WaitForFences(chain->base.device, 1,
+ &chain->base.fences[index],
+ false, info->timeout) != VK_SUCCESS)
+ return VK_TIMEOUT;
+
+ *image_index = index;
+ chain->images[index].state = WSI_IMAGE_DRAWING;
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_queue_present_dxgi(struct wsi_win32_swapchain *chain,
+ struct wsi_win32_image *image,
+ const VkPresentRegionKHR *damage)
+{
+ uint32_t rect_count = damage ? damage->rectangleCount : 0;
+ STACK_ARRAY(RECT, rects, rect_count);
+
+ for (uint32_t r = 0; r < rect_count; r++) {
+ rects[r].left = damage->pRectangles[r].offset.x;
+ rects[r].top = damage->pRectangles[r].offset.y;
+ rects[r].right = damage->pRectangles[r].offset.x + damage->pRectangles[r].extent.width;
+ rects[r].bottom = damage->pRectangles[r].offset.y + damage->pRectangles[r].extent.height;
+ }
+
+ DXGI_PRESENT_PARAMETERS params = {
+ rect_count,
+ rects,
+ };
+
+ image->state = WSI_IMAGE_QUEUED;
+ UINT sync_interval = chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ? 1 : 0;
+ UINT present_flags = chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ?
+ DXGI_PRESENT_ALLOW_TEARING : 0;
+
+ HRESULT hres = chain->dxgi->Present1(sync_interval, present_flags, &params);
+ switch (hres) {
+ case DXGI_ERROR_DEVICE_REMOVED: return VK_ERROR_DEVICE_LOST;
+ case E_OUTOFMEMORY: return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ default:
+ if (FAILED(hres))
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ break;
+ }
+
+ if (chain->surface->current_swapchain != chain) {
+ chain->surface->visual->SetContent(chain->dxgi);
+ chain->wsi->dxgi.dcomp->Commit();
+ chain->surface->current_swapchain = chain;
+ }
+
+ /* Mark the other image idle */
+ chain->status = VK_SUCCESS;
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_queue_present(struct wsi_swapchain *drv_chain,
+ uint32_t image_index,
+ uint64_t present_id,
+ const VkPresentRegionKHR *damage)
+{
+ struct wsi_win32_swapchain *chain = (struct wsi_win32_swapchain *) drv_chain;
+ assert(image_index < chain->base.image_count);
+ struct wsi_win32_image *image = &chain->images[image_index];
+
+ assert(image->state == WSI_IMAGE_DRAWING);
+
+ if (chain->dxgi)
+ return wsi_win32_queue_present_dxgi(chain, image, damage);
+
+ char *ptr = (char *)image->base.cpu_map;
+ char *dptr = (char *)image->sw.ppvBits;
+
+ for (unsigned h = 0; h < chain->extent.height; h++) {
+ memcpy(dptr, ptr, chain->extent.width * 4);
+ dptr += image->sw.bmp_row_pitch;
+ ptr += image->base.row_pitches[0];
+ }
+ if (!StretchBlt(chain->chain_dc, 0, 0, chain->extent.width, chain->extent.height, image->sw.dc, 0, 0, chain->extent.width, chain->extent.height, SRCCOPY))
+ chain->status = VK_ERROR_MEMORY_MAP_FAILED;
+
+ image->state = WSI_IMAGE_IDLE;
+
+ return chain->status;
+}
+
+static VkResult
+wsi_win32_surface_create_swapchain_dxgi(
+ wsi_win32_surface *surface,
+ VkDevice device,
+ struct wsi_win32 *wsi,
+ const VkSwapchainCreateInfoKHR *create_info,
+ struct wsi_win32_swapchain *chain)
+{
+ IDXGIFactory4 *factory = wsi->dxgi.factory;
+ ID3D12CommandQueue *queue =
+ (ID3D12CommandQueue *)wsi->wsi->win32.get_d3d12_command_queue(device);
+
+ DXGI_SWAP_CHAIN_DESC1 desc = {
+ create_info->imageExtent.width,
+ create_info->imageExtent.height,
+ DXGI_FORMAT_B8G8R8A8_UNORM,
+ create_info->imageArrayLayers > 1, // Stereo
+ { 1 }, // SampleDesc
+ 0, // Usage (filled in below)
+ create_info->minImageCount,
+ DXGI_SCALING_STRETCH,
+ DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL,
+ DXGI_ALPHA_MODE_UNSPECIFIED,
+ chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ?
+ DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING : 0u
+ };
+
+ if (create_info->imageUsage &
+ (VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
+ desc.BufferUsage |= DXGI_USAGE_SHADER_INPUT;
+
+ if (create_info->imageUsage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
+ desc.BufferUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+
+ IDXGISwapChain1 *swapchain1;
+ if (FAILED(factory->CreateSwapChainForComposition(queue, &desc, NULL, &swapchain1)) ||
+ FAILED(swapchain1->QueryInterface(&chain->dxgi)))
+ return VK_ERROR_INITIALIZATION_FAILED;
+
+ swapchain1->Release();
+
+ if (!surface->target &&
+ FAILED(wsi->dxgi.dcomp->CreateTargetForHwnd(surface->base.hwnd, false, &surface->target)))
+ return VK_ERROR_INITIALIZATION_FAILED;
+
+ if (!surface->visual) {
+ if (FAILED(wsi->dxgi.dcomp->CreateVisual(&surface->visual)) ||
+ FAILED(surface->target->SetRoot(surface->visual)) ||
+ FAILED(surface->visual->SetContent(chain->dxgi)) ||
+ FAILED(wsi->dxgi.dcomp->Commit()))
+ return VK_ERROR_INITIALIZATION_FAILED;
+
+ surface->current_swapchain = chain;
+ }
+ return VK_SUCCESS;
+}
+
+static VkResult
+wsi_win32_surface_create_swapchain(
+ VkIcdSurfaceBase *icd_surface,
+ VkDevice device,
+ struct wsi_device *wsi_device,
+ const VkSwapchainCreateInfoKHR *create_info,
+ const VkAllocationCallbacks *allocator,
+ struct wsi_swapchain **swapchain_out)
+{
+ wsi_win32_surface *surface = (wsi_win32_surface *)icd_surface;
+ struct wsi_win32 *wsi =
+ (struct wsi_win32 *) wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32];
+
+ assert(create_info->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+
+ const unsigned num_images = create_info->minImageCount;
+ struct wsi_win32_swapchain *chain;
+ size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
+
+ chain = (wsi_win32_swapchain *)vk_zalloc(allocator, size,
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (chain == NULL)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ struct wsi_dxgi_image_params dxgi_image_params = {
+ { WSI_IMAGE_TYPE_DXGI },
+ };
+ dxgi_image_params.storage_image = (create_info->imageUsage & VK_IMAGE_USAGE_STORAGE_BIT) != 0;
+
+ struct wsi_cpu_image_params cpu_image_params = {
+ { WSI_IMAGE_TYPE_CPU },
+ };
+
+ bool supports_dxgi = wsi->dxgi.factory &&
+ wsi->dxgi.dcomp &&
+ wsi->wsi->win32.get_d3d12_command_queue;
+ struct wsi_base_image_params *image_params = supports_dxgi ?
+ &dxgi_image_params.base : &cpu_image_params.base;
+
+ VkResult result = wsi_swapchain_init(wsi_device, &chain->base, device,
+ create_info, image_params,
+ allocator);
+ if (result != VK_SUCCESS) {
+ vk_free(allocator, chain);
+ return result;
+ }
+
+ chain->base.destroy = wsi_win32_swapchain_destroy;
+ chain->base.get_wsi_image = wsi_win32_get_wsi_image;
+ chain->base.acquire_next_image = wsi_win32_acquire_next_image;
+ chain->base.release_images = wsi_win32_release_images;
+ chain->base.queue_present = wsi_win32_queue_present;
+ chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, create_info);
+ chain->extent = create_info->imageExtent;
+
+ chain->wsi = wsi;
+ chain->status = VK_SUCCESS;
+
+ chain->surface = surface;
+
+ if (image_params->image_type == WSI_IMAGE_TYPE_DXGI) {
+ result = wsi_win32_surface_create_swapchain_dxgi(surface, device, wsi, create_info, chain);
+ if (result != VK_SUCCESS)
+ goto fail;
+ }
+
+ for (uint32_t image = 0; image < num_images; image++) {
+ result = wsi_win32_image_init(device, chain,
+ create_info, allocator,
+ &chain->images[image]);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ chain->base.image_count++;
+ }
+
+ *swapchain_out = &chain->base;
+
+ return VK_SUCCESS;
+
+fail:
+ if (surface->visual) {
+ surface->visual->SetContent(NULL);
+ surface->current_swapchain = NULL;
+ wsi->dxgi.dcomp->Commit();
+ }
+ wsi_win32_swapchain_destroy(&chain->base, allocator);
+ return result;
+}
+
+static IDXGIFactory4 *
+dxgi_get_factory(bool debug)
+{
+ HMODULE dxgi_mod = LoadLibraryA("DXGI.DLL");
+ if (!dxgi_mod) {
+ return NULL;
+ }
+
+ typedef HRESULT(WINAPI *PFN_CREATE_DXGI_FACTORY2)(UINT flags, REFIID riid, void **ppFactory);
+ PFN_CREATE_DXGI_FACTORY2 CreateDXGIFactory2;
+
+ CreateDXGIFactory2 = (PFN_CREATE_DXGI_FACTORY2)GetProcAddress(dxgi_mod, "CreateDXGIFactory2");
+ if (!CreateDXGIFactory2) {
+ return NULL;
+ }
+
+ UINT flags = 0;
+ if (debug)
+ flags |= DXGI_CREATE_FACTORY_DEBUG;
+
+ IDXGIFactory4 *factory;
+ HRESULT hr = CreateDXGIFactory2(flags, IID_PPV_ARGS(&factory));
+ if (FAILED(hr)) {
+ return NULL;
+ }
+
+ return factory;
+}
+
+static IDCompositionDevice *
+dcomp_get_device()
+{
+ HMODULE dcomp_mod = LoadLibraryA("DComp.DLL");
+ if (!dcomp_mod) {
+ return NULL;
+ }
+
+ typedef HRESULT (STDAPICALLTYPE *PFN_DCOMP_CREATE_DEVICE)(IDXGIDevice *, REFIID, void **);
+ PFN_DCOMP_CREATE_DEVICE DCompositionCreateDevice;
+
+ DCompositionCreateDevice = (PFN_DCOMP_CREATE_DEVICE)GetProcAddress(dcomp_mod, "DCompositionCreateDevice");
+ if (!DCompositionCreateDevice) {
+ return NULL;
+ }
+
+ IDCompositionDevice *device;
+ HRESULT hr = DCompositionCreateDevice(NULL, IID_PPV_ARGS(&device));
+ if (FAILED(hr)) {
+ return NULL;
+ }
+
+ return device;
+}
+
+VkResult
+wsi_win32_init_wsi(struct wsi_device *wsi_device,
+ const VkAllocationCallbacks *alloc,
+ VkPhysicalDevice physical_device)
+{
+ struct wsi_win32 *wsi;
+ VkResult result;
+
+ wsi = (wsi_win32 *)vk_zalloc(alloc, sizeof(*wsi), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ if (!wsi) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail;
+ }
+
+ wsi->physical_device = physical_device;
+ wsi->alloc = alloc;
+ wsi->wsi = wsi_device;
+
+ if (!wsi_device->sw) {
+ wsi->dxgi.factory = dxgi_get_factory(WSI_DEBUG & WSI_DEBUG_DXGI);
+ if (!wsi->dxgi.factory) {
+ vk_free(alloc, wsi);
+ result = VK_ERROR_INITIALIZATION_FAILED;
+ goto fail;
+ }
+ wsi->dxgi.dcomp = dcomp_get_device();
+ if (!wsi->dxgi.dcomp) {
+ wsi->dxgi.factory->Release();
+ vk_free(alloc, wsi);
+ result = VK_ERROR_INITIALIZATION_FAILED;
+ goto fail;
+ }
+ }
+
+ wsi->base.get_support = wsi_win32_surface_get_support;
+ wsi->base.get_capabilities2 = wsi_win32_surface_get_capabilities2;
+ wsi->base.get_formats = wsi_win32_surface_get_formats;
+ wsi->base.get_formats2 = wsi_win32_surface_get_formats2;
+ wsi->base.get_present_modes = wsi_win32_surface_get_present_modes;
+ wsi->base.get_present_rectangles = wsi_win32_surface_get_present_rectangles;
+ wsi->base.create_swapchain = wsi_win32_surface_create_swapchain;
+
+ wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32] = &wsi->base;
+
+ return VK_SUCCESS;
+
+fail:
+ wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32] = NULL;
+
+ return result;
+}
+
+void
+wsi_win32_finish_wsi(struct wsi_device *wsi_device,
+ const VkAllocationCallbacks *alloc)
+{
+ struct wsi_win32 *wsi =
+ (struct wsi_win32 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WIN32];
+ if (!wsi)
+ return;
+
+ if (wsi->dxgi.factory)
+ wsi->dxgi.factory->Release();
+ if (wsi->dxgi.dcomp)
+ wsi->dxgi.dcomp->Release();
+
+ vk_free(alloc, wsi);
+}
diff --git a/src/vulkan/wsi/wsi_common_x11.c b/src/vulkan/wsi/wsi_common_x11.c
index d8d5aaa6d0d..29b123e624e 100644
--- a/src/vulkan/wsi/wsi_common_x11.c
+++ b/src/vulkan/wsi/wsi_common_x11.c
@@ -23,30 +23,44 @@
#include <X11/Xlib-xcb.h>
#include <X11/xshmfence.h>
+#define XK_MISCELLANY
+#define XK_LATIN1
+#include <X11/keysymdef.h>
#include <xcb/xcb.h>
+#ifdef XCB_KEYSYMS_AVAILABLE
+#include <xcb/xcb_keysyms.h>
+#endif
#include <xcb/dri3.h>
#include <xcb/present.h>
#include <xcb/shm.h>
#include "util/macros.h"
+#include <stdatomic.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
-#include <poll.h>
#include <xf86drm.h>
#include "drm-uapi/drm_fourcc.h"
#include "util/hash_table.h"
+#include "util/mesa-blake3.h"
+#include "util/os_file.h"
+#include "util/os_time.h"
#include "util/u_debug.h"
#include "util/u_thread.h"
#include "util/xmlconfig.h"
+#include "util/timespec.h"
+#include "vk_format.h"
+#include "vk_instance.h"
+#include "vk_physical_device.h"
+#include "vk_device.h"
#include "vk_util.h"
#include "vk_enum_to_str.h"
+#include "wsi_common_entrypoints.h"
#include "wsi_common_private.h"
-#include "wsi_common_x11.h"
#include "wsi_common_queue.h"
#ifdef HAVE_SYS_SHM_H
@@ -54,13 +68,22 @@
#include <sys/shm.h>
#endif
+#ifndef XCB_PRESENT_OPTION_ASYNC_MAY_TEAR
+#define XCB_PRESENT_OPTION_ASYNC_MAY_TEAR 16
+#endif
+#ifndef XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR
+#define XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR 8
+#endif
+
struct wsi_x11_connection {
bool has_dri3;
bool has_dri3_modifiers;
+ bool has_dri3_explicit_sync;
bool has_present;
bool is_proprietary_x11;
bool is_xwayland;
bool has_mit_shm;
+ bool has_xfixes;
};
struct wsi_x11 {
@@ -71,10 +94,16 @@ struct wsi_x11 {
struct hash_table *connections;
};
+struct wsi_x11_vk_surface {
+ union {
+ VkIcdSurfaceXlib xlib;
+ VkIcdSurfaceXcb xcb;
+ };
+ bool has_alpha;
+};
-/** wsi_dri3_open
- *
- * Wrapper around xcb_dri3_open
+/**
+ * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
*/
static int
wsi_dri3_open(xcb_connection_t *conn,
@@ -93,6 +122,7 @@ wsi_dri3_open(xcb_connection_t *conn,
if (!reply)
return -1;
+ /* According to DRI3 extension nfd must equal one. */
if (reply->nfd != 1) {
free(reply);
return -1;
@@ -105,6 +135,13 @@ wsi_dri3_open(xcb_connection_t *conn,
return fd;
}
+/**
+ * Checks compatibility of the device wsi_dev with the device the X server
+ * provides via DRI3.
+ *
+ * This returns true when no device could be retrieved from the X server or when
+ * the information for the X server device indicate that it is the same device.
+ */
static bool
wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
xcb_connection_t *conn)
@@ -113,6 +150,9 @@ wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
xcb_setup_roots_iterator(xcb_get_setup(conn));
xcb_screen_t *screen = screen_iter.data;
+ /* Open the DRI3 device from the X server. If we do not retrieve one we
+ * assume our local device is compatible.
+ */
int dri3_fd = wsi_dri3_open(conn, screen->root, None);
if (dri3_fd == -1)
return true;
@@ -125,8 +165,18 @@ wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
}
static bool
-wsi_x11_detect_xwayland(xcb_connection_t *conn)
+wsi_x11_detect_xwayland(xcb_connection_t *conn,
+ xcb_query_extension_reply_t *randr_reply,
+ xcb_query_extension_reply_t *xwl_reply)
{
+ /* Newer Xwayland exposes an X11 extension we can check for */
+ if (xwl_reply && xwl_reply->present)
+ return true;
+
+ /* Older Xwayland uses the word "XWAYLAND" in the RandR output names */
+ if (!randr_reply || !randr_reply->present)
+ return false;
+
xcb_randr_query_version_cookie_t ver_cookie =
xcb_randr_query_version_unchecked(conn, 1, 3);
xcb_randr_query_version_reply_t *ver_reply =
@@ -174,10 +224,18 @@ static struct wsi_x11_connection *
wsi_x11_connection_create(struct wsi_device *wsi_dev,
xcb_connection_t *conn)
{
- xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie, amd_cookie, nv_cookie, shm_cookie;
- xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply, *amd_reply, *nv_reply, *shm_reply = NULL;
+ xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
+ amd_cookie, nv_cookie, shm_cookie, sync_cookie,
+ xfixes_cookie, xwl_cookie;
+ xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
+ *amd_reply, *nv_reply, *shm_reply = NULL,
+ *xfixes_reply, *xwl_reply;
+ bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
+ wsi_dev->has_import_memory_host;
bool has_dri3_v1_2 = false;
bool has_present_v1_2 = false;
+ bool has_dri3_v1_4 = false;
+ bool has_present_v1_4 = false;
struct wsi_x11_connection *wsi_conn =
vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
@@ -185,11 +243,14 @@ wsi_x11_connection_create(struct wsi_device *wsi_dev,
if (!wsi_conn)
return NULL;
+ sync_cookie = xcb_query_extension(conn, 4, "SYNC");
dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
pres_cookie = xcb_query_extension(conn, 7, "Present");
randr_cookie = xcb_query_extension(conn, 5, "RANDR");
+ xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
+ xwl_cookie = xcb_query_extension(conn, 8, "XWAYLAND");
- if (wsi_dev->sw)
+ if (wants_shm)
shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
/* We try to be nice to users and emit a warning if they try to use a
@@ -204,20 +265,25 @@ wsi_x11_connection_create(struct wsi_device *wsi_dev,
amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
+ xcb_discard_reply(conn, sync_cookie.sequence);
dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
- if (wsi_dev->sw)
+ xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
+ xwl_reply = xcb_query_extension_reply(conn, xwl_cookie, NULL);
+ if (wants_shm)
shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
- if (!dri3_reply || !pres_reply) {
+ if (!dri3_reply || !pres_reply || !xfixes_reply) {
free(dri3_reply);
free(pres_reply);
+ free(xfixes_reply);
+ free(xwl_reply);
free(randr_reply);
free(amd_reply);
free(nv_reply);
- if (wsi_dev->sw)
+ if (wants_shm)
free(shm_reply);
vk_free(&wsi_dev->instance_alloc, wsi_conn);
return NULL;
@@ -229,10 +295,12 @@ wsi_x11_connection_create(struct wsi_device *wsi_dev,
xcb_dri3_query_version_cookie_t ver_cookie;
xcb_dri3_query_version_reply_t *ver_reply;
- ver_cookie = xcb_dri3_query_version(conn, 1, 2);
+ ver_cookie = xcb_dri3_query_version(conn, 1, 4);
ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
- has_dri3_v1_2 =
+ has_dri3_v1_2 = ver_reply != NULL &&
(ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
+ has_dri3_v1_4 = ver_reply != NULL &&
+ (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
free(ver_reply);
}
#endif
@@ -243,20 +311,32 @@ wsi_x11_connection_create(struct wsi_device *wsi_dev,
xcb_present_query_version_cookie_t ver_cookie;
xcb_present_query_version_reply_t *ver_reply;
- ver_cookie = xcb_present_query_version(conn, 1, 2);
+ ver_cookie = xcb_present_query_version(conn, 1, 4);
ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
has_present_v1_2 =
(ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
+ has_present_v1_4 =
+ (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
free(ver_reply);
}
#endif
- if (randr_reply && randr_reply->present != 0)
- wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn);
- else
- wsi_conn->is_xwayland = false;
+ wsi_conn->has_xfixes = xfixes_reply->present != 0;
+ if (wsi_conn->has_xfixes) {
+ xcb_xfixes_query_version_cookie_t ver_cookie;
+ xcb_xfixes_query_version_reply_t *ver_reply;
+
+ ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
+ ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
+ wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
+ free(ver_reply);
+ }
+
+ wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn, randr_reply,
+ xwl_reply);
wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
+ wsi_conn->has_dri3_explicit_sync = has_dri3_v1_4 && has_present_v1_4;
wsi_conn->is_proprietary_x11 = false;
if (amd_reply && amd_reply->present)
wsi_conn->is_proprietary_x11 = true;
@@ -264,7 +344,7 @@ wsi_x11_connection_create(struct wsi_device *wsi_dev,
wsi_conn->is_proprietary_x11 = true;
wsi_conn->has_mit_shm = false;
- if (wsi_conn->has_dri3 && wsi_conn->has_present && wsi_dev->sw) {
+ if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
bool has_mit_shm = shm_reply->present != 0;
xcb_shm_query_version_cookie_t ver_cookie;
@@ -286,14 +366,17 @@ wsi_x11_connection_create(struct wsi_device *wsi_dev,
free(error);
}
}
- free(shm_reply);
}
free(dri3_reply);
free(pres_reply);
free(randr_reply);
+ free(xwl_reply);
free(amd_reply);
free(nv_reply);
+ free(xfixes_reply);
+ if (wants_shm)
+ free(shm_reply);
return wsi_conn;
}
@@ -317,6 +400,14 @@ wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
return false;
}
+/**
+ * Get internal struct representing an xcb_connection_t.
+ *
+ * This can allocate the struct but the caller does not own the struct. It is
+ * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
+ *
+ * If the allocation fails NULL is returned.
+ */
static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device *wsi_dev,
xcb_connection_t *conn)
@@ -355,8 +446,10 @@ wsi_x11_get_connection(struct wsi_device *wsi_dev,
}
static const VkFormat formats[] = {
+ VK_FORMAT_R5G6B5_UNORM_PACK16,
VK_FORMAT_B8G8R8A8_SRGB,
VK_FORMAT_B8G8R8A8_UNORM,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
};
static const VkPresentModeKHR present_modes[] = {
@@ -404,8 +497,7 @@ screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
}
static xcb_visualtype_t *
-connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
- unsigned *depth)
+connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
{
xcb_screen_iterator_t screen_iter =
xcb_setup_roots_iterator(xcb_get_setup(conn));
@@ -415,7 +507,7 @@ connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
*/
for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
- visual_id, depth);
+ visual_id, NULL);
if (visual)
return visual;
}
@@ -425,7 +517,7 @@ connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
static xcb_visualtype_t *
get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
- unsigned *depth)
+ unsigned *depth, xcb_visualtype_t **rootvis)
{
xcb_query_tree_cookie_t tree_cookie;
xcb_get_window_attributes_cookie_t attrib_cookie;
@@ -452,6 +544,8 @@ get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
if (screen == NULL)
return NULL;
+ if (rootvis)
+ *rootvis = screen_get_visualtype(screen, screen->root_visual, depth);
return screen_get_visualtype(screen, visual_id, depth);
}
@@ -468,12 +562,27 @@ visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
return (all_mask & ~rgb_mask) != 0;
}
-VkBool32 wsi_get_physical_device_xcb_presentation_support(
- struct wsi_device *wsi_device,
- uint32_t queueFamilyIndex,
- xcb_connection_t* connection,
- xcb_visualid_t visual_id)
+static bool
+visual_supported(xcb_visualtype_t *visual)
+{
+ if (!visual)
+ return false;
+
+ return visual->_class == XCB_VISUAL_CLASS_TRUE_COLOR ||
+ visual->_class == XCB_VISUAL_CLASS_DIRECT_COLOR;
+}
+
+VKAPI_ATTR VkBool32 VKAPI_CALL
+wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ xcb_connection_t *connection,
+ xcb_visualid_t visual_id)
{
+ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+ struct wsi_device *wsi_device = pdevice->wsi_device;
+ if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
+ return false;
+
struct wsi_x11_connection *wsi_conn =
wsi_x11_get_connection(wsi_device, connection);
@@ -485,16 +594,24 @@ VkBool32 wsi_get_physical_device_xcb_presentation_support(
return false;
}
- unsigned visual_depth;
- if (!connection_get_visualtype(connection, visual_id, &visual_depth))
- return false;
-
- if (visual_depth != 24 && visual_depth != 32)
+ if (!visual_supported(connection_get_visualtype(connection, visual_id)))
return false;
return true;
}
+VKAPI_ATTR VkBool32 VKAPI_CALL
+wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ Display *dpy,
+ VisualID visualID)
+{
+ return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
+ queueFamilyIndex,
+ XGetXCBConnection(dpy),
+ visualID);
+}
+
static xcb_connection_t*
x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
{
@@ -534,13 +651,7 @@ x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
}
}
- unsigned visual_depth;
- if (!get_visualtype_for_window(conn, window, &visual_depth)) {
- *pSupported = false;
- return VK_SUCCESS;
- }
-
- if (visual_depth != 24 && visual_depth != 32) {
+ if (!visual_supported(get_visualtype_for_window(conn, window, NULL, NULL))) {
*pSupported = false;
return VK_SUCCESS;
}
@@ -550,7 +661,7 @@ x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
}
static uint32_t
-x11_get_min_image_count(struct wsi_device *wsi_device)
+x11_get_min_image_count(const struct wsi_device *wsi_device, bool is_xwayland)
{
if (wsi_device->x11.override_minImageCount)
return wsi_device->x11.override_minImageCount;
@@ -572,55 +683,48 @@ x11_get_min_image_count(struct wsi_device *wsi_device)
*
* This is a tradeoff as it uses more memory than needed for non-fullscreen
* and non-performance intensive applications.
+ *
+ * For Xwayland Venus reports four images as described in
+ * wsi_wl_surface_get_capabilities
*/
- return 3;
+ return is_xwayland && wsi_device->x11.extra_xwayland_image ? 4 : 3;
}
+static unsigned
+x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
+ struct wsi_x11_connection *wsi_conn,
+ VkPresentModeKHR present_mode);
+
static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
struct wsi_device *wsi_device,
+ const VkSurfacePresentModeEXT *present_mode,
VkSurfaceCapabilitiesKHR *caps)
{
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
xcb_window_t window = x11_surface_get_window(icd_surface);
+ struct wsi_x11_vk_surface *surface = (struct wsi_x11_vk_surface*)icd_surface;
+ struct wsi_x11_connection *wsi_conn =
+ wsi_x11_get_connection(wsi_device, conn);
xcb_get_geometry_cookie_t geom_cookie;
xcb_generic_error_t *err;
xcb_get_geometry_reply_t *geom;
- unsigned visual_depth;
geom_cookie = xcb_get_geometry(conn, window);
- /* This does a round-trip. This is why we do get_geometry first and
- * wait to read the reply until after we have a visual.
- */
- xcb_visualtype_t *visual =
- get_visualtype_for_window(conn, window, &visual_depth);
-
- if (!visual)
- return VK_ERROR_SURFACE_LOST_KHR;
-
geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
- if (geom) {
+ if (!geom)
+ return VK_ERROR_SURFACE_LOST_KHR;
+ {
VkExtent2D extent = { geom->width, geom->height };
caps->currentExtent = extent;
caps->minImageExtent = extent;
caps->maxImageExtent = extent;
- } else {
- /* This can happen if the client didn't wait for the configure event
- * to come back from the compositor. In that case, we don't know the
- * size of the window so we just return valid "I don't know" stuff.
- */
- caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
- caps->minImageExtent = (VkExtent2D) { 1, 1 };
- caps->maxImageExtent = (VkExtent2D) {
- wsi_device->maxImageDimension2D,
- wsi_device->maxImageDimension2D,
- };
}
free(err);
free(geom);
- if (visual_has_alpha(visual, visual_depth)) {
+ if (surface->has_alpha) {
caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
} else {
@@ -628,19 +732,23 @@ x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
}
- caps->minImageCount = x11_get_min_image_count(wsi_device);
+ if (present_mode) {
+ caps->minImageCount = x11_get_min_image_count_for_present_mode(wsi_device, wsi_conn, present_mode->presentMode);
+ } else {
+ caps->minImageCount = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
+ }
+
/* There is no real maximum */
caps->maxImageCount = 0;
caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
caps->maxImageArrayLayers = 1;
- caps->supportedUsageFlags =
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT |
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ caps->supportedUsageFlags = wsi_caps_get_image_usage();
+
+ VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
+ if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
+ caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
return VK_SUCCESS;
}
@@ -653,10 +761,15 @@ x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
{
assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
+ const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
+
VkResult result =
- x11_surface_get_capabilities(icd_surface, wsi_device,
+ x11_surface_get_capabilities(icd_surface, wsi_device, present_mode,
&caps->surfaceCapabilities);
+ if (result != VK_SUCCESS)
+ return result;
+
vk_foreach_struct(ext, caps->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
@@ -665,6 +778,46 @@ x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
break;
}
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
+ /* Unsupported. */
+ VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
+ scaling->supportedPresentScaling = 0;
+ scaling->supportedPresentGravityX = 0;
+ scaling->supportedPresentGravityY = 0;
+ scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
+ scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
+ /* All present modes are compatible with each other. */
+ VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
+ if (compat->pPresentModes) {
+ assert(present_mode);
+ VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
+ /* Must always return queried present mode even when truncating. */
+ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+ *mode = present_mode->presentMode;
+ }
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(present_modes); i++) {
+ if (present_modes[i] != present_mode->presentMode) {
+ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+ *mode = present_modes[i];
+ }
+ }
+ }
+ } else {
+ if (!present_mode)
+ wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
+ "without a VkSurfacePresentModeEXT set. This is an "
+ "application bug.\n");
+
+ compat->presentModeCount = ARRAY_SIZE(present_modes);
+ }
+ break;
+ }
+
default:
/* Ignored */
break;
@@ -674,13 +827,50 @@ x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
return result;
}
-static void
-get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
+static int
+format_get_component_bits(VkFormat format, int comp)
+{
+ return vk_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, comp);
+}
+
+static bool
+rgb_component_bits_are_equal(VkFormat format, const xcb_visualtype_t* type)
+{
+ return format_get_component_bits(format, 0) == util_bitcount(type->red_mask) &&
+ format_get_component_bits(format, 1) == util_bitcount(type->green_mask) &&
+ format_get_component_bits(format, 2) == util_bitcount(type->blue_mask);
+}
+
+static bool
+get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
+ VkFormat *sorted_formats, unsigned *count)
{
- memcpy(sorted_formats, formats, sizeof(formats));
+ xcb_connection_t *conn = x11_surface_get_connection(surface);
+ xcb_window_t window = x11_surface_get_window(surface);
+ xcb_visualtype_t *rootvis = NULL;
+ xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL, &rootvis);
+
+ if (!visual)
+ return false;
+
+ /* use the root window's visual to set the default */
+ *count = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (rgb_component_bits_are_equal(formats[i], rootvis))
+ sorted_formats[(*count)++] = formats[i];
+ }
+
+ for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ for (unsigned j = 0; j < *count; j++)
+ if (formats[i] == sorted_formats[j])
+ goto next_format;
+ if (rgb_component_bits_are_equal(formats[i], visual))
+ sorted_formats[(*count)++] = formats[i];
+next_format:;
+ }
if (wsi_device->force_bgra8_unorm_first) {
- for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ for (unsigned i = 0; i < *count; i++) {
if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
sorted_formats[i] = sorted_formats[0];
sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
@@ -688,6 +878,8 @@ get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
}
}
}
+
+ return true;
}
static VkResult
@@ -696,15 +888,18 @@ x11_surface_get_formats(VkIcdSurfaceBase *surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats)
{
- VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
+ pSurfaceFormats, pSurfaceFormatCount);
+ unsigned count;
VkFormat sorted_formats[ARRAY_SIZE(formats)];
- get_sorted_vk_formats(wsi_device, sorted_formats);
+ if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
+ return VK_ERROR_SURFACE_LOST_KHR;
- for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
- vk_outarray_append(&out, f) {
+ for (unsigned i = 0; i < count; i++) {
+ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
f->format = sorted_formats[i];
- f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
}
}
@@ -718,16 +913,19 @@ x11_surface_get_formats2(VkIcdSurfaceBase *surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats)
{
- VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
+ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
+ pSurfaceFormats, pSurfaceFormatCount);
+ unsigned count;
VkFormat sorted_formats[ARRAY_SIZE(formats)];
- get_sorted_vk_formats(wsi_device, sorted_formats);
+ if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
+ return VK_ERROR_SURFACE_LOST_KHR;
- for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
- vk_outarray_append(&out, f) {
+ for (unsigned i = 0; i < count; i++) {
+ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
f->surfaceFormat.format = sorted_formats[i];
- f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
}
}
@@ -736,6 +934,7 @@ x11_surface_get_formats2(VkIcdSurfaceBase *surface,
static VkResult
x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
+ struct wsi_device *wsi_device,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes)
{
@@ -759,9 +958,9 @@ x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
{
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
xcb_window_t window = x11_surface_get_window(icd_surface);
- VK_OUTARRAY_MAKE(out, pRects, pRectCount);
+ VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
- vk_outarray_append(&out, rect) {
+ vk_outarray_append_typed(VkRect2D, &out, rect) {
xcb_generic_error_t *err = NULL;
xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
xcb_get_geometry_reply_t *geom =
@@ -772,71 +971,109 @@ x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
.offset = { 0, 0 },
.extent = { geom->width, geom->height },
};
- } else {
- /* This can happen if the client didn't wait for the configure event
- * to come back from the compositor. In that case, we don't know the
- * size of the window so we just return valid "I don't know" stuff.
- */
- *rect = (VkRect2D) {
- .offset = { 0, 0 },
- .extent = { UINT32_MAX, UINT32_MAX },
- };
}
free(geom);
+ if (!geom)
+ return VK_ERROR_SURFACE_LOST_KHR;
}
return vk_outarray_status(&out);
}
-VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
- const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateXcbSurfaceKHR(VkInstance _instance,
+ const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface)
{
- VkIcdSurfaceXcb *surface;
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ struct wsi_x11_vk_surface *surface;
- surface = vk_alloc(pAllocator, sizeof *surface, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
+
+ unsigned visual_depth;
+ xcb_visualtype_t *visual =
+ get_visualtype_for_window(pCreateInfo->connection, pCreateInfo->window, &visual_depth, NULL);
+ if (!visual)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (surface == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
- surface->connection = pCreateInfo->connection;
- surface->window = pCreateInfo->window;
+ surface->xcb.base.platform = VK_ICD_WSI_PLATFORM_XCB;
+ surface->xcb.connection = pCreateInfo->connection;
+ surface->xcb.window = pCreateInfo->window;
+
+ surface->has_alpha = visual_has_alpha(visual, visual_depth);
- *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->xcb.base);
return VK_SUCCESS;
}
-VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
- const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
- VkSurfaceKHR *pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL
+wsi_CreateXlibSurfaceKHR(VkInstance _instance,
+ const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface)
{
- VkIcdSurfaceXlib *surface;
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
+ struct wsi_x11_vk_surface *surface;
- surface = vk_alloc(pAllocator, sizeof *surface, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
+
+ unsigned visual_depth;
+ xcb_visualtype_t *visual =
+ get_visualtype_for_window(XGetXCBConnection(pCreateInfo->dpy), pCreateInfo->window, &visual_depth, NULL);
+ if (!visual)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (surface == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
- surface->dpy = pCreateInfo->dpy;
- surface->window = pCreateInfo->window;
+ surface->xlib.base.platform = VK_ICD_WSI_PLATFORM_XLIB;
+ surface->xlib.dpy = pCreateInfo->dpy;
+ surface->xlib.window = pCreateInfo->window;
- *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
+ surface->has_alpha = visual_has_alpha(visual, visual_depth);
+
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->xlib.base);
return VK_SUCCESS;
}
+struct x11_image_pending_completion {
+ uint32_t serial;
+ uint64_t signal_present_id;
+};
+
struct x11_image {
struct wsi_image base;
xcb_pixmap_t pixmap;
- bool busy;
- bool present_queued;
+ xcb_xfixes_region_t update_region; /* long lived XID */
+ xcb_xfixes_region_t update_area; /* the above or None */
struct xshmfence * shm_fence;
uint32_t sync_fence;
- uint32_t serial;
xcb_shm_seg_t shmseg;
int shmid;
uint8_t * shmaddr;
+ uint64_t present_id;
+ VkPresentModeKHR present_mode;
+
+ /* In IMMEDIATE and MAILBOX modes, we can have multiple pending presentations per image.
+ * We need to keep track of them when considering present ID. */
+
+ /* This is arbitrarily chosen. With IMMEDIATE on a 3 deep swapchain,
+ * we allow up to 48 outstanding presentations per vblank, which is more than enough
+ * for any reasonable application. */
+#define X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS 16
+ uint32_t present_queued_count;
+ struct x11_image_pending_completion pending_completions[X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS];
+#ifdef HAVE_DRI3_EXPLICIT_SYNC
+ uint32_t dri3_syncobj[WSI_ES_COUNT];
+#endif
};
struct x11_swapchain {
@@ -844,6 +1081,7 @@ struct x11_swapchain {
bool has_dri3_modifiers;
bool has_mit_shm;
+ bool has_async_may_tear;
xcb_connection_t * conn;
xcb_window_t window;
@@ -851,26 +1089,82 @@ struct x11_swapchain {
uint32_t depth;
VkExtent2D extent;
+ blake3_hash dri3_modifier_hash;
+
xcb_present_event_t event_id;
xcb_special_event_t * special_event;
uint64_t send_sbc;
uint64_t last_present_msc;
uint32_t stamp;
- int sent_image_count;
+ uint32_t sent_image_count;
- bool has_present_queue;
- bool has_acquire_queue;
- VkResult status;
+ atomic_int status;
bool copy_is_suboptimal;
struct wsi_queue present_queue;
struct wsi_queue acquire_queue;
pthread_t queue_manager;
+ pthread_t event_manager;
+
+ /* Used for communicating between event_manager and queue_manager.
+ * Lock is also taken when reading and writing status.
+ * When reading status in application threads,
+ * x11_swapchain_read_status_atomic can be used as a wrapper function. */
+ pthread_mutex_t thread_state_lock;
+ pthread_cond_t thread_state_cond;
+
+ /* Lock and condition variable for present wait.
+ * Signalled by event thread and waited on by callers to PresentWaitKHR. */
+ pthread_mutex_t present_progress_mutex;
+ pthread_cond_t present_progress_cond;
+ uint64_t present_id;
+ VkResult present_progress_error;
struct x11_image images[0];
};
VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
VK_OBJECT_TYPE_SWAPCHAIN_KHR)
+static void x11_present_complete(struct x11_swapchain *swapchain,
+ struct x11_image *image, uint32_t index)
+{
+ uint64_t signal_present_id = image->pending_completions[index].signal_present_id;
+ if (signal_present_id) {
+ pthread_mutex_lock(&swapchain->present_progress_mutex);
+ if (signal_present_id > swapchain->present_id) {
+ swapchain->present_id = signal_present_id;
+ pthread_cond_broadcast(&swapchain->present_progress_cond);
+ }
+ pthread_mutex_unlock(&swapchain->present_progress_mutex);
+ }
+
+ image->present_queued_count--;
+ if (image->present_queued_count) {
+ memmove(image->pending_completions + index,
+ image->pending_completions + index + 1,
+ (image->present_queued_count - index) *
+ sizeof(image->pending_completions[0]));
+ }
+
+ pthread_cond_signal(&swapchain->thread_state_cond);
+}
+
+static void x11_notify_pending_present(struct x11_swapchain *swapchain,
+ struct x11_image *image)
+{
+ pthread_cond_signal(&swapchain->thread_state_cond);
+}
+
+/* It is assumed that thread_state_lock is taken when calling this function. */
+static void x11_swapchain_notify_error(struct x11_swapchain *swapchain, VkResult result)
+{
+ pthread_mutex_lock(&swapchain->present_progress_mutex);
+ swapchain->present_id = UINT64_MAX;
+ swapchain->present_progress_error = result;
+ pthread_cond_broadcast(&swapchain->present_progress_cond);
+ pthread_mutex_unlock(&swapchain->present_progress_mutex);
+ pthread_cond_broadcast(&swapchain->thread_state_cond);
+}
+
/**
* Update the swapchain status with the result of an operation, and return
* the combined status. The chain status will eventually be returned from
@@ -879,11 +1173,16 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
* We make sure to 'stick' more pessimistic statuses: an out-of-date error
* is permanent once seen, and every subsequent call will return this. If
* this has not been seen, success will be returned.
+ *
+ * It is assumed that thread_state_lock is taken when calling this function.
*/
static VkResult
_x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
const char *file, int line)
{
+ if (result < 0)
+ x11_swapchain_notify_error(chain, result);
+
/* Prioritise returning existing errors for consistency. */
if (chain->status < 0)
return chain->status;
@@ -929,6 +1228,32 @@ x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
return &chain->images[image_index].base;
}
+static bool
+wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain);
+
+static VkResult
+x11_wait_for_explicit_sync_release_submission(struct x11_swapchain *chain,
+ uint64_t rel_timeout_ns,
+ uint32_t *image_index)
+{
+ STACK_ARRAY(struct wsi_image*, images, chain->base.image_count);
+ for (uint32_t i = 0; i < chain->base.image_count; i++)
+ images[i] = &chain->images[i].base;
+
+ VkResult result =
+ wsi_drm_wait_for_explicit_sync_release(&chain->base,
+ chain->base.image_count,
+ images,
+ rel_timeout_ns,
+ image_index);
+ STACK_ARRAY_FINISH(images);
+ return result;
+}
+
+/* XXX this belongs in presentproto */
+#ifndef PresentWindowDestroyed
+#define PresentWindowDestroyed (1 << 0)
+#endif
/**
* Process an X11 Present event. Does not update chain->status.
*/
@@ -939,10 +1264,15 @@ x11_handle_dri3_present_event(struct x11_swapchain *chain,
switch (event->evtype) {
case XCB_PRESENT_CONFIGURE_NOTIFY: {
xcb_present_configure_notify_event_t *config = (void *) event;
-
- if (config->width != chain->extent.width ||
- config->height != chain->extent.height)
- return VK_SUBOPTIMAL_KHR;
+ if (config->pixmap_flags & PresentWindowDestroyed)
+ return VK_ERROR_SURFACE_LOST_KHR;
+
+ struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
+ if (!wsi_device->x11.ignore_suboptimal) {
+ if (config->width != chain->extent.width ||
+ config->height != chain->extent.height)
+ return VK_SUBOPTIMAL_KHR;
+ }
break;
}
@@ -950,13 +1280,12 @@ x11_handle_dri3_present_event(struct x11_swapchain *chain,
case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
xcb_present_idle_notify_event_t *idle = (void *) event;
+ assert(!chain->base.image_info.explicit_sync);
for (unsigned i = 0; i < chain->base.image_count; i++) {
if (chain->images[i].pixmap == idle->pixmap) {
- chain->images[i].busy = false;
chain->sent_image_count--;
assert(chain->sent_image_count >= 0);
- if (chain->has_acquire_queue)
- wsi_queue_push(&chain->acquire_queue, i);
+ wsi_queue_push(&chain->acquire_queue, i);
break;
}
}
@@ -967,16 +1296,24 @@ x11_handle_dri3_present_event(struct x11_swapchain *chain,
case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
xcb_present_complete_notify_event_t *complete = (void *) event;
if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
- unsigned i;
+ unsigned i, j;
for (i = 0; i < chain->base.image_count; i++) {
struct x11_image *image = &chain->images[i];
- if (image->present_queued && image->serial == complete->serial)
- image->present_queued = false;
+ for (j = 0; j < image->present_queued_count; j++) {
+ if (image->pending_completions[j].serial == complete->serial) {
+ x11_present_complete(chain, image, j);
+ }
+ }
}
chain->last_present_msc = complete->msc;
}
VkResult result = VK_SUCCESS;
+
+ struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
+ if (wsi_device->x11.ignore_suboptimal)
+ return result;
+
switch (complete->mode) {
case XCB_PRESENT_COMPLETE_MODE_COPY:
if (chain->copy_is_suboptimal)
@@ -994,7 +1331,14 @@ x11_handle_dri3_present_event(struct x11_swapchain *chain,
/* The winsys is now trying to flip directly and cannot due to our
* configuration. Request the user reallocate.
*/
- result = VK_SUBOPTIMAL_KHR;
+
+ /* Sometimes, this complete mode is spurious, and a false positive.
+ * Xwayland may report SUBOPTIMAL_COPY even if there are no changes in the modifiers.
+ * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26616 for more details. */
+ if (chain->status == VK_SUCCESS &&
+ wsi_x11_swapchain_query_dri3_modifiers_changed(chain)) {
+ result = VK_SUBOPTIMAL_KHR;
+ }
break;
#endif
default:
@@ -1011,111 +1355,12 @@ x11_handle_dri3_present_event(struct x11_swapchain *chain,
return VK_SUCCESS;
}
-
-static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
-{
- uint64_t current_time = wsi_common_get_current_time();
-
- timeout = MIN2(UINT64_MAX - current_time, timeout);
-
- return current_time + timeout;
-}
-
-static VkResult
-x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
- uint32_t *image_index, uint64_t timeout)
-{
- xcb_generic_event_t *event;
- struct pollfd pfds;
- uint64_t atimeout;
- while (1) {
- for (uint32_t i = 0; i < chain->base.image_count; i++) {
- if (!chain->images[i].busy) {
- /* We found a non-busy image */
- xshmfence_await(chain->images[i].shm_fence);
- *image_index = i;
- chain->images[i].busy = true;
- return x11_swapchain_result(chain, VK_SUCCESS);
- }
- }
-
- xcb_flush(chain->conn);
-
- if (timeout == UINT64_MAX) {
- event = xcb_wait_for_special_event(chain->conn, chain->special_event);
- if (!event)
- return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
- } else {
- event = xcb_poll_for_special_event(chain->conn, chain->special_event);
- if (!event) {
- int ret;
- if (timeout == 0)
- return x11_swapchain_result(chain, VK_NOT_READY);
-
- atimeout = wsi_get_absolute_timeout(timeout);
-
- pfds.fd = xcb_get_file_descriptor(chain->conn);
- pfds.events = POLLIN;
- ret = poll(&pfds, 1, timeout / 1000 / 1000);
- if (ret == 0)
- return x11_swapchain_result(chain, VK_TIMEOUT);
- if (ret == -1)
- return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
-
- /* If a non-special event happens, the fd will still
- * poll. So recalculate the timeout now just in case.
- */
- uint64_t current_time = wsi_common_get_current_time();
- if (atimeout > current_time)
- timeout = atimeout - current_time;
- else
- timeout = 0;
- continue;
- }
- }
-
- /* Update the swapchain status here. We may catch non-fatal errors here,
- * in which case we need to update the status and continue.
- */
- VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
- /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
- result = x11_swapchain_result(chain, result);
- free(event);
- if (result < 0)
- return result;
- }
-}
-
-static VkResult
-x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
- uint32_t *image_index_out, uint64_t timeout)
-{
- assert(chain->has_acquire_queue);
-
- uint32_t image_index;
- VkResult result = wsi_queue_pull(&chain->acquire_queue,
- &image_index, timeout);
- if (result < 0 || result == VK_TIMEOUT) {
- /* On error, the thread has shut down, so safe to update chain->status.
- * Calling x11_swapchain_result with VK_TIMEOUT won't modify
- * chain->status so that is also safe.
- */
- return x11_swapchain_result(chain, result);
- } else if (chain->status < 0) {
- return chain->status;
- }
-
- assert(image_index < chain->base.image_count);
- xshmfence_await(chain->images[image_index].shm_fence);
-
- *image_index_out = image_index;
-
- return chain->status;
-}
-
+/**
+ * Send image to X server via Present extension.
+ */
static VkResult
x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
- uint64_t target_msc)
+ uint64_t target_msc, VkPresentModeKHR present_mode)
{
struct x11_image *image = &chain->images[image_index];
@@ -1131,99 +1376,338 @@ x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
if (!wsi_conn)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
- (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
+ if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
+ (present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
wsi_conn->is_xwayland) ||
- chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
+ present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
options |= XCB_PRESENT_OPTION_ASYNC;
+ if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR
+ && chain->has_async_may_tear)
+ options |= XCB_PRESENT_OPTION_ASYNC_MAY_TEAR;
+
#ifdef HAVE_DRI3_MODIFIERS
if (chain->has_dri3_modifiers)
options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
#endif
- /* Poll for any available event and update the swapchain status. This could
- * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
- * associated X11 surface has been resized.
- */
- xcb_generic_event_t *event;
- while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
- VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
- /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
- result = x11_swapchain_result(chain, result);
- free(event);
- if (result < 0)
- return result;
- }
-
xshmfence_reset(image->shm_fence);
- ++chain->sent_image_count;
- assert(chain->sent_image_count <= chain->base.image_count);
+ if (!chain->base.image_info.explicit_sync) {
+ ++chain->sent_image_count;
+ assert(chain->sent_image_count <= chain->base.image_count);
+ }
++chain->send_sbc;
- image->present_queued = true;
- image->serial = (uint32_t) chain->send_sbc;
-
- xcb_void_cookie_t cookie =
- xcb_present_pixmap(chain->conn,
- chain->window,
- image->pixmap,
- image->serial,
- 0, /* valid */
- 0, /* update */
- 0, /* x_off */
- 0, /* y_off */
- XCB_NONE, /* target_crtc */
- XCB_NONE,
- image->sync_fence,
- options,
- target_msc,
- divisor,
- remainder, 0, NULL);
- xcb_discard_reply(chain->conn, cookie.sequence);
+ uint32_t serial = (uint32_t)chain->send_sbc;
- xcb_flush(chain->conn);
+ assert(image->present_queued_count < ARRAY_SIZE(image->pending_completions));
+ image->pending_completions[image->present_queued_count++] =
+ (struct x11_image_pending_completion) {
+ .signal_present_id = image->present_id,
+ .serial = serial,
+ };
+ xcb_void_cookie_t cookie;
+#ifdef HAVE_DRI3_EXPLICIT_SYNC
+ if (chain->base.image_info.explicit_sync) {
+ uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
+ uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
+ cookie = xcb_present_pixmap_synced(
+ chain->conn,
+ chain->window,
+ image->pixmap,
+ serial,
+ 0, /* valid */
+ image->update_area, /* update */
+ 0, /* x_off */
+ 0, /* y_off */
+ XCB_NONE, /* target_crtc */
+ image->dri3_syncobj[WSI_ES_ACQUIRE], /* acquire_syncobj */
+ image->dri3_syncobj[WSI_ES_RELEASE], /* release_syncobj */
+ acquire_point,
+ release_point,
+ options,
+ target_msc,
+ divisor,
+ remainder, 0, NULL);
+ } else
+#endif
+ {
+ cookie = xcb_present_pixmap(chain->conn,
+ chain->window,
+ image->pixmap,
+ serial,
+ 0, /* valid */
+ image->update_area, /* update */
+ 0, /* x_off */
+ 0, /* y_off */
+ XCB_NONE, /* target_crtc */
+ XCB_NONE,
+ image->sync_fence,
+ options,
+ target_msc,
+ divisor,
+ remainder, 0, NULL);
+ }
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ xcb_flush(chain->conn);
return x11_swapchain_result(chain, VK_SUCCESS);
}
+/**
+ * Send image to X server unaccelerated (software drivers).
+ */
static VkResult
-x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
- uint64_t target_msc)
+x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index)
{
+ assert(!chain->base.image_info.explicit_sync);
struct x11_image *image = &chain->images[image_index];
- xcb_void_cookie_t cookie;
- void *myptr;
- chain->base.wsi->MapMemory(chain->base.device,
- image->base.memory,
- 0, 0, 0, &myptr);
+ /* Begin querying this before submitting the frame for improved async performance.
+ * In this _sw() mode we're expecting network round-trip delay, not just UNIX socket delay. */
+ xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
- cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
- chain->window,
- chain->gc,
- image->base.row_pitches[0] / 4,
- chain->extent.height,
- 0,0,0,24,
- image->base.row_pitches[0] * chain->extent.height,
- myptr);
+ xcb_void_cookie_t cookie;
+ void *myptr = image->base.cpu_map;
+ size_t hdr_len = sizeof(xcb_put_image_request_t);
+ int stride_b = image->base.row_pitches[0];
+ size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
+ uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
+
+ if (size < max_req_len) {
+ cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
+ chain->window,
+ chain->gc,
+ image->base.row_pitches[0] / 4,
+ chain->extent.height,
+ 0,0,0,chain->depth,
+ image->base.row_pitches[0] * chain->extent.height,
+ image->base.cpu_map);
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ } else {
+ int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
+ int y_start = 0;
+ int y_todo = chain->extent.height;
+ while (y_todo) {
+ int this_lines = MIN2(num_lines, y_todo);
+ cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
+ chain->window,
+ chain->gc,
+ image->base.row_pitches[0] / 4,
+ this_lines,
+ 0,y_start,0,chain->depth,
+ this_lines * stride_b,
+ (const uint8_t *)myptr + (y_start * stride_b));
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ y_start += this_lines;
+ y_todo -= this_lines;
+ }
+ }
- chain->base.wsi->UnmapMemory(chain->base.device, image->base.memory);
- xcb_discard_reply(chain->conn, cookie.sequence);
xcb_flush(chain->conn);
- return x11_swapchain_result(chain, VK_SUCCESS);
+
+ /* We don't have queued present here.
+ * Immediately let application acquire again, but query geometry first so
+ * we can report OUT_OF_DATE on resize. */
+ xcb_generic_error_t *err;
+
+ xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
+ VkResult result = VK_SUCCESS;
+ if (geom) {
+ if (chain->extent.width != geom->width ||
+ chain->extent.height != geom->height)
+ result = VK_ERROR_OUT_OF_DATE_KHR;
+ } else {
+ result = VK_ERROR_SURFACE_LOST_KHR;
+ }
+ free(err);
+ free(geom);
+
+ wsi_queue_push(&chain->acquire_queue, image_index);
+ return result;
+}
+
+static void
+x11_capture_trace(struct x11_swapchain *chain)
+{
+#ifdef XCB_KEYSYMS_AVAILABLE
+ VK_FROM_HANDLE(vk_device, device, chain->base.device);
+ if (!device->physical->instance->trace_mode)
+ return;
+
+ xcb_query_keymap_cookie_t keys_cookie = xcb_query_keymap(chain->conn);
+
+ xcb_generic_error_t *error = NULL;
+ xcb_query_keymap_reply_t *keys = xcb_query_keymap_reply(chain->conn, keys_cookie, &error);
+ if (error) {
+ free(error);
+ return;
+ }
+
+ xcb_key_symbols_t *key_symbols = xcb_key_symbols_alloc(chain->conn);
+ xcb_keycode_t *keycodes = xcb_key_symbols_get_keycode(key_symbols, XK_F1);
+ if (keycodes) {
+ xcb_keycode_t keycode = keycodes[0];
+ free(keycodes);
+
+ simple_mtx_lock(&device->trace_mtx);
+ bool capture_key_pressed = keys->keys[keycode / 8] & (1u << (keycode % 8));
+ device->trace_hotkey_trigger = capture_key_pressed && (capture_key_pressed != chain->base.capture_key_pressed);
+ chain->base.capture_key_pressed = capture_key_pressed;
+ simple_mtx_unlock(&device->trace_mtx);
+ }
+
+ xcb_key_symbols_free(key_symbols);
+ free(keys);
+#endif
+}
+
+/* Use a trivial helper here to make it easier to read in code
+ * where we're intending to access chain->status outside the thread lock. */
+static VkResult x11_swapchain_read_status_atomic(struct x11_swapchain *chain)
+{
+ return chain->status;
+}
+
+/**
+ * Decides if an early wait on buffer fences before buffer submission is required.
+ * That is for mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
+ * present time, which could lead to missing a frame. This is an Xorg issue.
+ *
+ * On Wayland compositors, this used to be a problem as well, but not anymore,
+ * and this check assumes that Mesa is running on a reasonable compositor.
+ * The wait behavior can be forced by setting the 'vk_xwayland_wait_ready' DRIConf option to true.
+ * Some drivers, like e.g. Venus may still want to require wait_ready by default,
+ * so the option is kept around for now.
+ *
+ * On Wayland, we don't know at this point if tearing protocol is/can be used by Xwl,
+ * so we have to make the MAILBOX assumption.
+ */
+static bool
+x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
+ struct wsi_x11_connection *wsi_conn,
+ VkPresentModeKHR present_mode)
+{
+ if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
+ return false;
+ }
+
+ switch (present_mode) {
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ return true;
+ case VK_PRESENT_MODE_IMMEDIATE_KHR:
+ return wsi_conn->is_xwayland;
+ default:
+ return false;
+ }
+}
+
+/* This matches Wayland. */
+#define X11_SWAPCHAIN_MAILBOX_IMAGES 4
+
+static bool
+x11_requires_mailbox_image_count(const struct wsi_device *device,
+ struct wsi_x11_connection *wsi_conn,
+ VkPresentModeKHR present_mode)
+{
+ /* If we're resorting to wait for fences, we're assuming a MAILBOX-like model,
+ * and we should allocate accordingly.
+ *
+ * One potential concern here is IMMEDIATE mode on Wayland.
+ * This situation could arise:
+ * - Fullscreen FLIP mode
+ * - Compositor does not support tearing protocol (we cannot know this here)
+ *
+ * With 3 images, during the window between latch and flip, there is only one image left to app,
+ * so peak FPS may not be reached if the window between latch and flip is large,
+ * but tests on contemporary compositors suggest this effect is minor.
+ * Frame rate in the thousands can easily be reached.
+ *
+ * There are pragmatic reasons to expose 3 images for IMMEDIATE on Xwl.
+ * - minImageCount is not intended as a tool to tune performance, its intent is to signal forward progress.
+ * Our X11 and WL implementations do so for pragmatic reasons due to sync acquire interacting poorly with 2 images.
+ * A jump from 3 to 4 is at best a minor improvement which only affects applications
+ * running at extremely high frame rates, way beyond the monitor refresh rate.
+ * On the other hand, lowering minImageCount to 2 would break the fundamental idea of MAILBOX
+ * (and IMMEDIATE without tear), since FPS > refresh rate would not be possible.
+ *
+ * - Several games developed for other platforms and other Linux WSI implementations
+ * do not expect that image counts arbitrarily change when changing present mode,
+ * and will crash when Mesa does so.
+ * There are several games using the strict_image_count drirc to work around this,
+ * and it would be good to be friendlier in the first place, so we don't have to work around more games.
+ * IMMEDIATE is a common presentation mode on those platforms, but MAILBOX is more Wayland-centric in nature,
+ * so increasing image count for that mode is more reasonable.
+ *
+ * - IMMEDIATE expects tearing, and when tearing, 3 images are more than enough.
+ *
+ * - With EXT_swapchain_maintenance1, toggling between FIFO / IMMEDIATE (used extensively by D3D layering)
+ * would require application to allocate >3 images which is unfortunate for memory usage,
+ * and potentially disastrous for latency unless KHR_present_wait is used.
+ */
+ return x11_needs_wait_for_fences(device, wsi_conn, present_mode) ||
+ present_mode == VK_PRESENT_MODE_MAILBOX_KHR;
}
+
+/**
+ * Send image to the X server for presentation at target_msc.
+ */
static VkResult
x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
- uint64_t target_msc)
+ uint64_t target_msc, VkPresentModeKHR present_mode)
{
+ x11_capture_trace(chain);
+
+ VkResult result;
if (chain->base.wsi->sw && !chain->has_mit_shm)
- return x11_present_to_x11_sw(chain, image_index, target_msc);
- return x11_present_to_x11_dri3(chain, image_index, target_msc);
+ result = x11_present_to_x11_sw(chain, image_index);
+ else
+ result = x11_present_to_x11_dri3(chain, image_index, target_msc, present_mode);
+
+ if (result < 0)
+ x11_swapchain_notify_error(chain, result);
+ else
+ x11_notify_pending_present(chain, &chain->images[image_index]);
+
+ return result;
}
static VkResult
+x11_release_images(struct wsi_swapchain *wsi_chain,
+ uint32_t count, const uint32_t *indices)
+{
+ struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
+ if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
+ return chain->status;
+
+ /* If we're using implicit sync, push images to the acquire queue */
+ if (!chain->base.image_info.explicit_sync) {
+ for (uint32_t i = 0; i < count; i++) {
+ uint32_t index = indices[i];
+ assert(index < chain->base.image_count);
+ wsi_queue_push(&chain->acquire_queue, index);
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static void
+x11_set_present_mode(struct wsi_swapchain *wsi_chain,
+ VkPresentModeKHR mode)
+{
+ struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
+ chain->base.present_mode = mode;
+}
+
+/**
+ * Acquire a ready-to-use image from the swapchain.
+ *
+ * This means usually that the image is not waiting on presentation and that the
+ * image has been released by the X server to be used again by the consumer.
+ */
+static VkResult
x11_acquire_next_image(struct wsi_swapchain *anv_chain,
const VkAcquireNextImageInfoKHR *info,
uint32_t *image_index)
@@ -1232,135 +1716,283 @@ x11_acquire_next_image(struct wsi_swapchain *anv_chain,
uint64_t timeout = info->timeout;
/* If the swapchain is in an error state, don't go any further. */
- if (chain->status < 0)
- return chain->status;
+ VkResult result = x11_swapchain_read_status_atomic(chain);
+ if (result < 0)
+ return result;
- if (chain->base.wsi->sw && !chain->has_mit_shm) {
- *image_index = 0;
- return VK_SUCCESS;
+ if (chain->base.image_info.explicit_sync) {
+ result = x11_wait_for_explicit_sync_release_submission(chain, timeout,
+ image_index);
+ } else {
+ result = wsi_queue_pull(&chain->acquire_queue,
+ image_index, timeout);
}
- if (chain->has_acquire_queue) {
- return x11_acquire_next_image_from_queue(chain, image_index, timeout);
+
+ if (result == VK_TIMEOUT)
+ return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
+
+ if (result < 0) {
+ pthread_mutex_lock(&chain->thread_state_lock);
+ result = x11_swapchain_result(chain, result);
+ pthread_mutex_unlock(&chain->thread_state_lock);
} else {
- return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
+ result = x11_swapchain_read_status_atomic(chain);
}
+
+ if (result < 0)
+ return result;
+
+ assert(*image_index < chain->base.image_count);
+ if (chain->images[*image_index].shm_fence &&
+ !chain->base.image_info.explicit_sync)
+ xshmfence_await(chain->images[*image_index].shm_fence);
+
+ return result;
}
+#define MAX_DAMAGE_RECTS 64
+
+/**
+ * Queue a new presentation of an image that was previously acquired by the
+ * consumer.
+ *
+ * Note that in immediate presentation mode this does not really queue the
+ * presentation but directly asks the X server to show it.
+ */
static VkResult
x11_queue_present(struct wsi_swapchain *anv_chain,
uint32_t image_index,
+ uint64_t present_id,
const VkPresentRegionKHR *damage)
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+ xcb_xfixes_region_t update_area = 0;
/* If the swapchain is in an error state, don't go any further. */
- if (chain->status < 0)
- return chain->status;
-
- chain->images[image_index].busy = true;
- if (chain->has_present_queue) {
- wsi_queue_push(&chain->present_queue, image_index);
- return chain->status;
- } else {
- return x11_present_to_x11(chain, image_index, 0);
+ VkResult status = x11_swapchain_read_status_atomic(chain);
+ if (status < 0)
+ return status;
+
+ if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
+ damage->rectangleCount <= MAX_DAMAGE_RECTS) {
+ xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
+
+ update_area = chain->images[image_index].update_region;
+ for (unsigned i = 0; i < damage->rectangleCount; i++) {
+ const VkRectLayerKHR *rect = &damage->pRectangles[i];
+ assert(rect->layer == 0);
+ rects[i].x = rect->offset.x;
+ rects[i].y = rect->offset.y;
+ rects[i].width = rect->extent.width;
+ rects[i].height = rect->extent.height;
+ }
+ xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
}
+ chain->images[image_index].update_area = update_area;
+ chain->images[image_index].present_id = present_id;
+ /* With EXT_swapchain_maintenance1, the present mode can change per present. */
+ chain->images[image_index].present_mode = chain->base.present_mode;
+
+ wsi_queue_push(&chain->present_queue, image_index);
+ return x11_swapchain_read_status_atomic(chain);
}
-static bool
-x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
- struct wsi_x11_connection *wsi_conn,
- VkPresentModeKHR present_mode)
+/**
+ * The number of images that are not owned by X11:
+ * (1) in the ownership of the app, or
+ * (2) app to take ownership through an acquire, or
+ * (3) in the present queue waiting for the FIFO thread to present to X11.
+ */
+static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
{
- if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
- return false;
- }
+ return chain->base.image_count - chain->sent_image_count;
+}
- switch (present_mode) {
- case VK_PRESENT_MODE_MAILBOX_KHR:
- return true;
- case VK_PRESENT_MODE_IMMEDIATE_KHR:
- return wsi_conn->is_xwayland;
- default:
- return false;
+/* This thread is responsible for pumping PRESENT replies.
+ * This is done in a separate thread from the X11 presentation thread
+ * to be able to support non-blocking modes like IMMEDIATE and MAILBOX.
+ * Frame completion events can happen at any time, and we need to handle
+ * the events as soon as they come in to have a quality implementation.
+ * The presentation thread may go to sleep waiting for new presentation events to come in,
+ * and it cannot wait for both X events and application events at the same time.
+ * If we only cared about FIFO, this thread wouldn't be very useful.
+ * Earlier implementation of X11 WSI had a single FIFO thread that blocked on X events after presenting.
+ * For IMMEDIATE and MAILBOX, the application thread pumped the event queue, which caused a lot of pain
+ * when trying to deal with present wait.
+ */
+static void *
+x11_manage_event_queue(void *state)
+{
+ struct x11_swapchain *chain = state;
+ u_thread_setname("WSI swapchain event");
+
+ /* While there is an outstanding IDLE we should wait for it.
+ * In FLIP modes at most one image will not be driver owned eventually.
+ * In BLIT modes, we expect that all images will eventually be driver owned,
+ * but we don't know which mode is being used. */
+ unsigned forward_progress_guaranteed_acquired_images = chain->base.image_count - 1;
+
+ pthread_mutex_lock(&chain->thread_state_lock);
+
+ while (chain->status >= 0) {
+ /* This thread should only go sleep waiting for X events when we know there are pending events.
+ * We expect COMPLETION events when there is at least one image marked as present_queued.
+ * We also expect IDLE events, but we only consider waiting for them when all images are busy,
+ * and application has fewer than N images acquired. */
+
+ bool assume_forward_progress = false;
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ if (chain->images[i].present_queued_count != 0) {
+ /* We must pump through a present wait and unblock FIFO thread if using FIFO mode. */
+ assume_forward_progress = true;
+ break;
+ }
+ }
+
+ if (!assume_forward_progress && !chain->base.image_info.explicit_sync) {
+ /* If true, application expects acquire (IDLE) to happen in finite time. */
+ assume_forward_progress = x11_driver_owned_images(chain) <
+ forward_progress_guaranteed_acquired_images;
+ }
+
+ if (assume_forward_progress) {
+ /* Only yield lock when blocking on X11 event. */
+ pthread_mutex_unlock(&chain->thread_state_lock);
+ xcb_generic_event_t *event =
+ xcb_wait_for_special_event(chain->conn, chain->special_event);
+ pthread_mutex_lock(&chain->thread_state_lock);
+
+ /* Re-check status since we dropped the lock while waiting for X. */
+ VkResult result = chain->status;
+
+ if (result >= 0) {
+ if (event) {
+ /* Queue thread will be woken up if anything interesting happened in handler.
+ * Queue thread blocks on:
+ * - Presentation events completing
+ * - Presentation requests from application
+ * - WaitForFence workaround if applicable */
+ result = x11_handle_dri3_present_event(chain, (void *) event);
+ } else {
+ result = VK_ERROR_SURFACE_LOST_KHR;
+ }
+ }
+
+ /* Updates chain->status and wakes up threads as necessary on error. */
+ x11_swapchain_result(chain, result);
+ free(event);
+ } else {
+ /* Nothing important to do, go to sleep until queue thread wakes us up. */
+ pthread_cond_wait(&chain->thread_state_cond, &chain->thread_state_lock);
+ }
}
+
+ pthread_mutex_unlock(&chain->thread_state_lock);
+ return NULL;
}
+/**
+ * Presentation thread.
+ *
+ * Runs in a separate thread, blocks and reacts to queued images on the
+ * present-queue
+ *
+ * This must be a thread since we have to block in two cases:
+ * - FIFO:
+ * We must wait for previous presentation to complete
+ * in some way so we can compute the target MSC.
+ * - WaitForFence workaround:
+ * In some cases, we need to wait for image to complete rendering before submitting it to X.
+ */
static void *
-x11_manage_fifo_queues(void *state)
+x11_manage_present_queue(void *state)
{
struct x11_swapchain *chain = state;
struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
+ wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
VkResult result = VK_SUCCESS;
- assert(chain->has_present_queue);
-
u_thread_setname("WSI swapchain queue");
- while (chain->status >= 0) {
- /* We can block here unconditionally because after an image was sent to
- * the server (later on in this loop) we ensure at least one image is
- * acquirable by the consumer or wait there on such an event.
- */
+ uint64_t target_msc = 0;
+
+ while (x11_swapchain_read_status_atomic(chain) >= 0) {
uint32_t image_index = 0;
- result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
- assert(result != VK_TIMEOUT);
- if (result < 0) {
- goto fail;
- } else if (chain->status < 0) {
- /* The status can change underneath us if the swapchain is destroyed
- * from another thread.
- */
- return NULL;
+ {
+ MESA_TRACE_SCOPE("pull present queue");
+ result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
+ assert(result != VK_TIMEOUT);
}
+ /* The status can change underneath us if the swapchain is destroyed
+ * from another thread. */
+ if (result >= 0)
+ result = x11_swapchain_read_status_atomic(chain);
+ if (result < 0)
+ break;
+
+ VkPresentModeKHR present_mode = chain->images[image_index].present_mode;
+
if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
- chain->base.present_mode)) {
+ present_mode) &&
+ /* not necessary with explicit sync */
+ !chain->base.image_info.explicit_sync) {
+ MESA_TRACE_SCOPE("wait fence");
result = chain->base.wsi->WaitForFences(chain->base.device, 1,
- &chain->base.fences[image_index],
- true, UINT64_MAX);
+ &chain->base.fences[image_index],
+ true, UINT64_MAX);
if (result != VK_SUCCESS) {
result = VK_ERROR_OUT_OF_DATE_KHR;
- goto fail;
+ break;
}
}
- uint64_t target_msc = 0;
- if (chain->has_acquire_queue)
- target_msc = chain->last_present_msc + 1;
+ pthread_mutex_lock(&chain->thread_state_lock);
- result = x11_present_to_x11(chain, image_index, target_msc);
- if (result < 0)
- goto fail;
+ /* In IMMEDIATE and MAILBOX modes, there is a risk that we have exhausted the presentation queue,
+ * since IDLE could return multiple times before observing a COMPLETE. */
+ while (chain->status >= 0 &&
+ chain->images[image_index].present_queued_count ==
+ ARRAY_SIZE(chain->images[image_index].pending_completions)) {
+ pthread_cond_wait(&chain->thread_state_cond, &chain->thread_state_lock);
+ }
- if (chain->has_acquire_queue) {
- /* Wait for our presentation to occur and ensure we have at least one
- * image that can be acquired by the client afterwards. This ensures we
- * can pull on the present-queue on the next loop.
- */
- while (chain->images[image_index].present_queued ||
- chain->sent_image_count == chain->base.image_count) {
- xcb_generic_event_t *event =
- xcb_wait_for_special_event(chain->conn, chain->special_event);
- if (!event) {
- result = VK_ERROR_OUT_OF_DATE_KHR;
- goto fail;
- }
+ if (chain->status < 0) {
+ pthread_mutex_unlock(&chain->thread_state_lock);
+ break;
+ }
+
+ result = x11_present_to_x11(chain, image_index, target_msc, present_mode);
- result = x11_handle_dri3_present_event(chain, (void *)event);
- /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
- result = x11_swapchain_result(chain, result);
- free(event);
- if (result < 0)
- goto fail;
+ if (result < 0) {
+ pthread_mutex_unlock(&chain->thread_state_lock);
+ break;
+ }
+
+ if (present_mode == VK_PRESENT_MODE_FIFO_KHR ||
+ present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
+ MESA_TRACE_SCOPE("wait present");
+
+ while (chain->status >= 0 && chain->images[image_index].present_queued_count != 0) {
+ /* In FIFO mode, we need to make sure we observe a COMPLETE before queueing up
+ * another present. */
+ pthread_cond_wait(&chain->thread_state_cond, &chain->thread_state_lock);
}
+
+ /* If next present is not FIFO, we still need to ensure we don't override that
+ * present. If FIFO, we need to ensure MSC is larger than the COMPLETED frame. */
+ target_msc = chain->last_present_msc + 1;
}
+
+ pthread_mutex_unlock(&chain->thread_state_lock);
}
-fail:
+ pthread_mutex_lock(&chain->thread_state_lock);
x11_swapchain_result(chain, result);
- if (chain->has_acquire_queue)
+ if (!chain->base.image_info.explicit_sync)
wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
+ pthread_mutex_unlock(&chain->thread_state_lock);
return NULL;
}
@@ -1392,30 +2024,24 @@ static VkResult
x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- const uint64_t *const *modifiers,
- const uint32_t *num_modifiers,
- int num_tranches, struct x11_image *image)
+ struct x11_image *image)
{
xcb_void_cookie_t cookie;
+ xcb_generic_error_t *error = NULL;
VkResult result;
uint32_t bpp = 32;
int fence_fd;
- if (chain->base.use_prime_blit) {
- bool use_modifier = num_tranches > 0;
- result = wsi_create_prime_image(&chain->base, pCreateInfo, use_modifier, &image->base);
- } else {
- result = wsi_create_native_image(&chain->base, pCreateInfo,
- num_tranches, num_modifiers, modifiers,
- chain->has_mit_shm ? &alloc_shm : NULL,
- &image->base);
- }
- if (result < 0)
+ result = wsi_create_image(&chain->base, &chain->base.image_info,
+ &image->base);
+ if (result != VK_SUCCESS)
return result;
+ image->update_region = xcb_generate_id(chain->conn);
+ xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
+
if (chain->base.wsi->sw) {
if (!chain->has_mit_shm) {
- image->busy = false;
return VK_SUCCESS;
}
@@ -1443,6 +2069,18 @@ x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
/* If the image has a modifier, we must have DRI3 v1.2. */
assert(chain->has_dri3_modifiers);
+ /* XCB requires an array of file descriptors but we only have one */
+ int fds[4] = { -1, -1, -1, -1 };
+ for (int i = 0; i < image->base.num_planes; i++) {
+ fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
+ if (fds[i] == -1) {
+ for (int j = 0; j < i; j++)
+ close(fds[j]);
+
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ }
+
cookie =
xcb_dri3_pixmap_from_buffers_checked(chain->conn,
image->pixmap,
@@ -1460,13 +2098,18 @@ x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
image->base.offsets[3],
chain->depth, bpp,
image->base.drm_modifier,
- image->base.fds);
+ fds);
} else
#endif
{
/* Without passing modifiers, we can't have multi-plane RGB images. */
assert(image->base.num_planes == 1);
+ /* XCB will take ownership of the FD we pass it. */
+ int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
+ if (fd == -1)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
cookie =
xcb_dri3_pixmap_from_buffer_checked(chain->conn,
image->pixmap,
@@ -1475,15 +2118,35 @@ x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height,
image->base.row_pitches[0],
- chain->depth, bpp,
- image->base.fds[0]);
+ chain->depth, bpp, fd);
}
- xcb_discard_reply(chain->conn, cookie.sequence);
+ error = xcb_request_check(chain->conn, cookie);
+ if (error != NULL) {
+ free(error);
+ goto fail_image;
+ }
- /* XCB has now taken ownership of the FDs. */
- for (int i = 0; i < image->base.num_planes; i++)
- image->base.fds[i] = -1;
+#ifdef HAVE_DRI3_EXPLICIT_SYNC
+ if (chain->base.image_info.explicit_sync) {
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ image->dri3_syncobj[i] = xcb_generate_id(chain->conn);
+ int fd = dup(image->base.explicit_sync[i].fd);
+ if (fd < 0)
+ goto fail_image;
+
+ cookie = xcb_dri3_import_syncobj_checked(chain->conn,
+ image->dri3_syncobj[i],
+ chain->window,
+ fd /* libxcb closes the fd */);
+ error = xcb_request_check(chain->conn, cookie);
+ if (error != NULL) {
+ free(error);
+ goto fail_image;
+ }
+ }
+ }
+#endif
out_fence:
fence_fd = xshmfence_alloc_shm();
@@ -1501,7 +2164,6 @@ out_fence:
false,
fence_fd);
- image->busy = false;
xshmfence_trigger(image->shm_fence);
return VK_SUCCESS;
@@ -1513,9 +2175,10 @@ fail_pixmap:
cookie = xcb_free_pixmap(chain->conn, image->pixmap);
xcb_discard_reply(chain->conn, cookie.sequence);
+fail_image:
wsi_destroy_image(&chain->base, &image->base);
- return result;
+ return VK_ERROR_INITIALIZATION_FAILED;
}
static void
@@ -1525,13 +2188,25 @@ x11_image_finish(struct x11_swapchain *chain,
{
xcb_void_cookie_t cookie;
- if (!chain->base.wsi->sw) {
+ if (!chain->base.wsi->sw || chain->has_mit_shm) {
cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
xcb_discard_reply(chain->conn, cookie.sequence);
xshmfence_unmap_shm(image->shm_fence);
cookie = xcb_free_pixmap(chain->conn, image->pixmap);
xcb_discard_reply(chain->conn, cookie.sequence);
+
+ cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
+ xcb_discard_reply(chain->conn, cookie.sequence);
+
+#ifdef HAVE_DRI3_EXPLICIT_SYNC
+ if (chain->base.image_info.explicit_sync) {
+ for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
+ cookie = xcb_dri3_free_syncobj(chain->conn, image->dri3_syncobj[i]);
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ }
+ }
+#endif
}
wsi_destroy_image(&chain->base, &image->base);
@@ -1542,10 +2217,24 @@ x11_image_finish(struct x11_swapchain *chain,
}
static void
+wsi_x11_recompute_dri3_modifier_hash(blake3_hash *hash, const struct wsi_drm_image_params *params)
+{
+ mesa_blake3 ctx;
+ _mesa_blake3_init(&ctx);
+ _mesa_blake3_update(&ctx, &params->num_modifier_lists, sizeof(params->num_modifier_lists));
+ for (uint32_t i = 0; i < params->num_modifier_lists; i++) {
+ _mesa_blake3_update(&ctx, &i, sizeof(i));
+ _mesa_blake3_update(&ctx, params->modifiers[i],
+ params->num_modifiers[i] * sizeof(*params->modifiers[i]));
+ }
+ _mesa_blake3_update(&ctx, &params->same_gpu, sizeof(params->same_gpu));
+ _mesa_blake3_final(&ctx, *hash);
+}
+
+static void
wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
xcb_connection_t *conn, xcb_window_t window,
uint8_t depth, uint8_t bpp,
- VkCompositeAlphaFlagsKHR vk_alpha,
uint64_t **modifiers_in, uint32_t *num_modifiers_in,
uint32_t *num_tranches_in,
const VkAllocationCallbacks *pAllocator)
@@ -1618,6 +2307,51 @@ out:
*num_tranches_in = 0;
}
+static bool
+wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain)
+{
+ const struct wsi_device *wsi_device = chain->base.wsi;
+
+ if (wsi_device->sw || !wsi_device->supports_modifiers)
+ return false;
+
+ struct wsi_drm_image_params drm_image_params;
+ uint64_t *modifiers[2] = {NULL, NULL};
+ uint32_t num_modifiers[2] = {0, 0};
+
+ struct wsi_x11_connection *wsi_conn =
+ wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
+
+ xcb_get_geometry_reply_t *geometry =
+ xcb_get_geometry_reply(chain->conn, xcb_get_geometry(chain->conn, chain->window), NULL);
+ if (geometry == NULL)
+ return false;
+ uint32_t bit_depth = geometry->depth;
+ free(geometry);
+
+ drm_image_params = (struct wsi_drm_image_params){
+ .base.image_type = WSI_IMAGE_TYPE_DRM,
+ .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, chain->conn),
+ .explicit_sync = chain->base.image_info.explicit_sync,
+ };
+
+ wsi_x11_get_dri3_modifiers(wsi_conn, chain->conn, chain->window, bit_depth, 32,
+ modifiers, num_modifiers,
+ &drm_image_params.num_modifier_lists,
+ &wsi_device->instance_alloc);
+
+ drm_image_params.num_modifiers = num_modifiers;
+ drm_image_params.modifiers = (const uint64_t **)modifiers;
+
+ blake3_hash hash;
+ wsi_x11_recompute_dri3_modifier_hash(&hash, &drm_image_params);
+
+ for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
+ vk_free(&wsi_device->instance_alloc, modifiers[i]);
+
+ return memcmp(hash, chain->dri3_modifier_hash, sizeof(hash)) != 0;
+}
+
static VkResult
x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
const VkAllocationCallbacks *pAllocator)
@@ -1625,16 +2359,19 @@ x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
xcb_void_cookie_t cookie;
- if (chain->has_present_queue) {
- chain->status = VK_ERROR_OUT_OF_DATE_KHR;
- /* Push a UINT32_MAX to wake up the manager */
- wsi_queue_push(&chain->present_queue, UINT32_MAX);
- pthread_join(chain->queue_manager, NULL);
+ pthread_mutex_lock(&chain->thread_state_lock);
+ chain->status = VK_ERROR_OUT_OF_DATE_KHR;
+ pthread_cond_broadcast(&chain->thread_state_cond);
+ pthread_mutex_unlock(&chain->thread_state_lock);
- if (chain->has_acquire_queue)
- wsi_queue_destroy(&chain->acquire_queue);
- wsi_queue_destroy(&chain->present_queue);
- }
+ /* Push a UINT32_MAX to wake up the manager */
+ wsi_queue_push(&chain->present_queue, UINT32_MAX);
+ pthread_join(chain->queue_manager, NULL);
+ pthread_join(chain->event_manager, NULL);
+
+ if (!chain->base.image_info.explicit_sync)
+ wsi_queue_destroy(&chain->acquire_queue);
+ wsi_queue_destroy(&chain->present_queue);
for (uint32_t i = 0; i < chain->base.image_count; i++)
x11_image_finish(chain, pAllocator, &chain->images[i]);
@@ -1645,6 +2382,11 @@ x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
XCB_PRESENT_EVENT_MASK_NO_EVENT);
xcb_discard_reply(chain->conn, cookie.sequence);
+ pthread_mutex_destroy(&chain->present_progress_mutex);
+ pthread_cond_destroy(&chain->present_progress_cond);
+ pthread_mutex_destroy(&chain->thread_state_lock);
+ pthread_cond_destroy(&chain->thread_state_cond);
+
wsi_swapchain_finish(&chain->base);
vk_free(pAllocator, chain);
@@ -1678,7 +2420,63 @@ wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
free(reply);
}
+static VkResult x11_wait_for_present(struct wsi_swapchain *wsi_chain,
+ uint64_t waitValue,
+ uint64_t timeout)
+{
+ struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
+ struct timespec abs_timespec;
+ uint64_t abs_timeout = 0;
+ if (timeout != 0)
+ abs_timeout = os_time_get_absolute_timeout(timeout);
+
+ /* Need to observe that the swapchain semaphore has been unsignalled,
+ * as this is guaranteed when a present is complete. */
+ VkResult result = wsi_swapchain_wait_for_present_semaphore(
+ &chain->base, waitValue, timeout);
+ if (result != VK_SUCCESS)
+ return result;
+
+ timespec_from_nsec(&abs_timespec, abs_timeout);
+
+ pthread_mutex_lock(&chain->present_progress_mutex);
+ while (chain->present_id < waitValue) {
+ int ret = pthread_cond_timedwait(&chain->present_progress_cond,
+ &chain->present_progress_mutex,
+ &abs_timespec);
+ if (ret == ETIMEDOUT) {
+ result = VK_TIMEOUT;
+ break;
+ }
+ if (ret) {
+ result = VK_ERROR_DEVICE_LOST;
+ break;
+ }
+ }
+ if (result == VK_SUCCESS && chain->present_progress_error)
+ result = chain->present_progress_error;
+ pthread_mutex_unlock(&chain->present_progress_mutex);
+ return result;
+}
+
+static unsigned
+x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
+ struct wsi_x11_connection *wsi_conn,
+ VkPresentModeKHR present_mode)
+{
+ uint32_t min_image_count = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
+ if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode))
+ return MAX2(min_image_count, X11_SWAPCHAIN_MAILBOX_IMAGES);
+ else
+ return min_image_count;
+}
+/**
+ * Create the swapchain.
+ *
+ * Supports immediate, fifo and mailbox presentation mode.
+ *
+ */
static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
VkDevice device,
@@ -1694,22 +2492,36 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+ /* Get xcb connection from the icd_surface and from that our internal struct
+ * representing it.
+ */
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
struct wsi_x11_connection *wsi_conn =
wsi_x11_get_connection(wsi_device, conn);
if (!wsi_conn)
return VK_ERROR_OUT_OF_HOST_MEMORY;
+ /* Get number of images in our swapchain. This count depends on:
+ * - requested minimal image count
+ * - device characteristics
+ * - presentation mode.
+ */
unsigned num_images = pCreateInfo->minImageCount;
- if (wsi_device->x11.strict_imageCount)
- num_images = pCreateInfo->minImageCount;
- else if (x11_needs_wait_for_fences(wsi_device, wsi_conn, present_mode))
- num_images = MAX2(num_images, 5);
- else if (wsi_device->x11.ensure_minImageCount)
- num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
-
- /* Check for whether or not we have a window up-front */
+ if (!wsi_device->x11.strict_imageCount) {
+ if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode) ||
+ wsi_device->x11.ensure_minImageCount) {
+ unsigned present_mode_images = x11_get_min_image_count_for_present_mode(
+ wsi_device, wsi_conn, pCreateInfo->presentMode);
+ num_images = MAX2(num_images, present_mode_images);
+ }
+ }
+
+ /* Check that we have a window up-front. It is an error to not have one. */
xcb_window_t window = x11_surface_get_window(icd_surface);
+
+ /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
+ * chain's images extents should fit it for performance-optimizing flips.
+ */
xcb_get_geometry_reply_t *geometry =
xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
if (geometry == NULL)
@@ -1719,14 +2531,96 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
const uint16_t cur_height = geometry->height;
free(geometry);
+ /* Allocate the actual swapchain. The size depends on image count. */
size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
- chain = vk_alloc(pAllocator, size, 8,
+ chain = vk_zalloc(pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (chain == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- result = wsi_swapchain_init(wsi_device, &chain->base, device,
- pCreateInfo, pAllocator);
+ int ret = pthread_mutex_init(&chain->present_progress_mutex, NULL);
+ if (ret != 0) {
+ vk_free(pAllocator, chain);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ ret = pthread_mutex_init(&chain->thread_state_lock, NULL);
+ if (ret != 0) {
+ pthread_mutex_destroy(&chain->present_progress_mutex);
+ vk_free(pAllocator, chain);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ ret = pthread_cond_init(&chain->thread_state_cond, NULL);
+ if (ret != 0) {
+ pthread_mutex_destroy(&chain->present_progress_mutex);
+ pthread_mutex_destroy(&chain->thread_state_lock);
+ vk_free(pAllocator, chain);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ bool bret = wsi_init_pthread_cond_monotonic(&chain->present_progress_cond);
+ if (!bret) {
+ pthread_mutex_destroy(&chain->present_progress_mutex);
+ pthread_mutex_destroy(&chain->thread_state_lock);
+ pthread_cond_destroy(&chain->thread_state_cond);
+ vk_free(pAllocator, chain);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ uint32_t present_caps = 0;
+ xcb_present_query_capabilities_cookie_t present_query_cookie;
+ xcb_present_query_capabilities_reply_t *present_query_reply;
+ present_query_cookie = xcb_present_query_capabilities(conn, window);
+ present_query_reply = xcb_present_query_capabilities_reply(conn, present_query_cookie, NULL);
+ if (present_query_reply) {
+ present_caps = present_query_reply->capabilities;
+ free(present_query_reply);
+ }
+
+ struct wsi_base_image_params *image_params = NULL;
+ struct wsi_cpu_image_params cpu_image_params;
+ struct wsi_drm_image_params drm_image_params;
+ uint64_t *modifiers[2] = {NULL, NULL};
+ uint32_t num_modifiers[2] = {0, 0};
+ if (wsi_device->sw) {
+ cpu_image_params = (struct wsi_cpu_image_params) {
+ .base.image_type = WSI_IMAGE_TYPE_CPU,
+ .alloc_shm = wsi_conn->has_mit_shm ? &alloc_shm : NULL,
+ };
+ image_params = &cpu_image_params.base;
+ } else {
+ drm_image_params = (struct wsi_drm_image_params) {
+ .base.image_type = WSI_IMAGE_TYPE_DRM,
+ .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, conn),
+ .explicit_sync =
+#ifdef HAVE_DRI3_EXPLICIT_SYNC
+ wsi_conn->has_dri3_explicit_sync &&
+ (present_caps & XCB_PRESENT_CAPABILITY_SYNCOBJ) &&
+ wsi_device_supports_explicit_sync(wsi_device),
+#else
+ false,
+#endif
+ };
+ if (wsi_device->supports_modifiers) {
+ wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, bit_depth, 32,
+ modifiers, num_modifiers,
+ &drm_image_params.num_modifier_lists,
+ pAllocator);
+ drm_image_params.num_modifiers = num_modifiers;
+ drm_image_params.modifiers = (const uint64_t **)modifiers;
+
+ wsi_x11_recompute_dri3_modifier_hash(&chain->dri3_modifier_hash, &drm_image_params);
+ }
+ image_params = &drm_image_params.base;
+ }
+
+ result = wsi_swapchain_init(wsi_device, &chain->base, device, pCreateInfo,
+ image_params, pAllocator);
+
+ for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
+ vk_free(pAllocator, modifiers[i]);
+
if (result != VK_SUCCESS)
goto fail_alloc;
@@ -1734,6 +2628,9 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
chain->base.get_wsi_image = x11_get_wsi_image;
chain->base.acquire_next_image = x11_acquire_next_image;
chain->base.queue_present = x11_queue_present;
+ chain->base.wait_for_present = x11_wait_for_present;
+ chain->base.release_images = x11_release_images;
+ chain->base.set_present_mode = x11_set_present_mode;
chain->base.present_mode = present_mode;
chain->base.image_count = num_images;
chain->conn = conn;
@@ -1743,34 +2640,52 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
chain->send_sbc = 0;
chain->sent_image_count = 0;
chain->last_present_msc = 0;
- chain->has_acquire_queue = false;
- chain->has_present_queue = false;
chain->status = VK_SUCCESS;
chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
chain->has_mit_shm = wsi_conn->has_mit_shm;
+ chain->has_async_may_tear = present_caps & XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR;
- if (chain->extent.width != cur_width || chain->extent.height != cur_height)
- chain->status = VK_SUBOPTIMAL_KHR;
+ /* When images in the swapchain don't fit the window, X can still present them, but it won't
+ * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
+ * the chain extents X may be able to flip
+ */
+ if (!wsi_device->x11.ignore_suboptimal) {
+ if (chain->extent.width != cur_width || chain->extent.height != cur_height)
+ chain->status = VK_SUBOPTIMAL_KHR;
+ }
- /* We used to inherit copy_is_suboptimal from pCreateInfo->oldSwapchain.
- * When it was true, and when the next present was completed with copying,
- * we would return VK_SUBOPTIMAL_KHR and hint the app to reallocate again
- * for no good reason. If all following presents on the surface were
- * completed with copying because of some surface state change, we would
- * always return VK_SUBOPTIMAL_KHR no matter how many times the app had
- * reallocated.
+ /* On a new swapchain this helper variable is set to false. Once we present it will have an
+ * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
+ * that in this case here is a high likelihood X could do flips again if the client reallocates a
+ * new swapchain.
+ *
+ * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
+ * was true, and when the next present was completed with copying, we would return
+ * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
+ * presents on the surface were completed with copying because of some surface state change, we
+ * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
+ *
+ * Note also that is is questionable in general if that mechanism is really useful. It ist not
+ * clear why on a change from flipping to copying we can assume a reallocation has a high chance
+ * of making flips work again per se. In other words it is not clear why there is need for
+ * another way to inform clients about suboptimal copies besides forwarding the
+ * 'PresentOptionSuboptimal' complete mode.
*/
chain->copy_is_suboptimal = false;
- if (!wsi_device->sw)
- if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
- chain->base.use_prime_blit = true;
-
+ /* For our swapchain we need to listen to following Present extension events:
+ * - Configure: Window dimensions changed. Images in the swapchain might need
+ * to be reallocated.
+ * - Complete: An image from our swapchain was presented on the output.
+ * - Idle: An image from our swapchain is not anymore accessed by the X
+ * server and can be reused.
+ */
chain->event_id = xcb_generate_id(chain->conn);
- xcb_present_select_input(chain->conn, chain->event_id, chain->window,
- XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
- XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
- XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
+ uint32_t event_mask = XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
+ XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY;
+ if (!chain->base.image_info.explicit_sync)
+ event_mask |= XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY;
+ xcb_present_select_input(chain->conn, chain->event_id, chain->window, event_mask);
/* Create an XCB event queue to hold present events outside of the usual
* application event queue
@@ -1779,6 +2694,7 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
xcb_register_for_special_xge(chain->conn, &xcb_present_id,
chain->event_id, NULL);
+ /* Create the graphics context. */
chain->gc = xcb_generate_id(chain->conn);
if (!chain->gc) {
/* FINISHME: Choose a better error. */
@@ -1793,71 +2709,44 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
(uint32_t []) { 0 });
xcb_discard_reply(chain->conn, cookie.sequence);
- uint64_t *modifiers[2] = {NULL, NULL};
- uint32_t num_modifiers[2] = {0, 0};
- uint32_t num_tranches = 0;
- if (wsi_device->supports_modifiers)
- wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
- pCreateInfo->compositeAlpha,
- modifiers, num_modifiers, &num_tranches,
- pAllocator);
-
uint32_t image = 0;
for (; image < chain->base.image_count; image++) {
result = x11_image_init(device, chain, pCreateInfo, pAllocator,
- (const uint64_t *const *)modifiers,
- num_modifiers, num_tranches,
&chain->images[image]);
if (result != VK_SUCCESS)
goto fail_init_images;
}
- if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
- chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
- x11_needs_wait_for_fences(wsi_device, wsi_conn,
- chain->base.present_mode)) &&
- !chain->base.wsi->sw) {
- chain->has_present_queue = true;
-
- /* Initialize our queues. We make them base.image_count + 1 because we will
- * occasionally use UINT32_MAX to signal the other thread that an error
- * has occurred and we don't want an overflow.
- */
- int ret;
- ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
- if (ret) {
- goto fail_init_images;
- }
-
- if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
- chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
- chain->has_acquire_queue = true;
-
- ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
- if (ret) {
- wsi_queue_destroy(&chain->present_queue);
- goto fail_init_images;
- }
-
- for (unsigned i = 0; i < chain->base.image_count; i++)
- wsi_queue_push(&chain->acquire_queue, i);
- }
+ /* The queues have a length of base.image_count + 1 because we will
+ * occasionally use UINT32_MAX to signal the other thread that an error
+ * has occurred and we don't want an overflow.
+ */
+ ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
+ if (ret) {
+ goto fail_init_images;
+ }
- ret = pthread_create(&chain->queue_manager, NULL,
- x11_manage_fifo_queues, chain);
+ /* Acquire queue is only needed when using implicit sync */
+ if (!chain->base.image_info.explicit_sync) {
+ ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
if (ret) {
wsi_queue_destroy(&chain->present_queue);
- if (chain->has_acquire_queue)
- wsi_queue_destroy(&chain->acquire_queue);
-
goto fail_init_images;
}
+
+ for (unsigned i = 0; i < chain->base.image_count; i++)
+ wsi_queue_push(&chain->acquire_queue, i);
}
- assert(chain->has_present_queue || !chain->has_acquire_queue);
+ ret = pthread_create(&chain->queue_manager, NULL,
+ x11_manage_present_queue, chain);
+ if (ret)
+ goto fail_init_fifo_queue;
- for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
- vk_free(pAllocator, modifiers[i]);
+ ret = pthread_create(&chain->event_manager, NULL,
+ x11_manage_event_queue, chain);
+ if (ret)
+ goto fail_init_event_queue;
/* It is safe to set it here as only one swapchain can be associated with
* the window, and swapchain creation does the association. At this point
@@ -1869,13 +2758,20 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
return VK_SUCCESS;
+fail_init_event_queue:
+ /* Push a UINT32_MAX to wake up the manager */
+ wsi_queue_push(&chain->present_queue, UINT32_MAX);
+ pthread_join(chain->queue_manager, NULL);
+
+fail_init_fifo_queue:
+ wsi_queue_destroy(&chain->present_queue);
+ if (!chain->base.image_info.explicit_sync)
+ wsi_queue_destroy(&chain->acquire_queue);
+
fail_init_images:
for (uint32_t j = 0; j < image; j++)
x11_image_finish(chain, pAllocator, &chain->images[j]);
- for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
- vk_free(pAllocator, modifiers[i]);
-
fail_register:
xcb_unregister_for_special_event(chain->conn, chain->special_event);
@@ -1939,6 +2835,11 @@ wsi_x11_init_wsi(struct wsi_device *wsi_device,
wsi_device->x11.xwaylandWaitReady =
driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
}
+
+ if (driCheckOption(dri_options, "vk_x11_ignore_suboptimal", DRI_BOOL)) {
+ wsi_device->x11.ignore_suboptimal =
+ driQueryOptionb(dri_options, "vk_x11_ignore_suboptimal");
+ }
}
wsi->base.get_support = x11_surface_get_support;