summaryrefslogtreecommitdiff
path: root/src/intel
diff options
context:
space:
mode:
authorJason Ekstrand <jason@jlekstrand.net>2021-09-24 12:06:32 -0500
committerMarge Bot <eric+marge@anholt.net>2021-10-07 20:51:36 +0000
commit88a8b937b5f3c9620a3edc90897ab267e00785b4 (patch)
tree20f8fdb51901998dd9e5c0cab46bc39c117c8086 /src/intel
parentf6d52768d636beffb7a9e8e96d127ce2bbecd696 (diff)
anv: Use the common vk_error and vk_errorf helpers
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13045>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/vulkan/anv_acceleration_structure.c27
-rw-r--r--src/intel/vulkan/anv_allocator.c59
-rw-r--r--src/intel/vulkan/anv_android.c25
-rw-r--r--src/intel/vulkan/anv_batch_chain.c24
-rw-r--r--src/intel/vulkan/anv_cmd_buffer.c4
-rw-r--r--src/intel/vulkan/anv_descriptor_set.c10
-rw-r--r--src/intel/vulkan/anv_device.c146
-rw-r--r--src/intel/vulkan/anv_formats.c36
-rw-r--r--src/intel/vulkan/anv_image.c21
-rw-r--r--src/intel/vulkan/anv_pass.c2
-rw-r--r--src/intel/vulkan/anv_perf.c2
-rw-r--r--src/intel/vulkan/anv_pipeline.c33
-rw-r--r--src/intel/vulkan/anv_pipeline_cache.c2
-rw-r--r--src/intel/vulkan/anv_private.h44
-rw-r--r--src/intel/vulkan/anv_queue.c84
-rw-r--r--src/intel/vulkan/anv_util.c58
-rw-r--r--src/intel/vulkan/anv_wsi.c4
-rw-r--r--src/intel/vulkan/anv_wsi_display.c2
-rw-r--r--src/intel/vulkan/genX_cmd_buffer.c3
-rw-r--r--src/intel/vulkan/genX_pipeline.c6
-rw-r--r--src/intel/vulkan/genX_query.c2
-rw-r--r--src/intel/vulkan/genX_state.c4
-rw-r--r--src/intel/vulkan/tests/block_pool_grow_first.c2
-rw-r--r--src/intel/vulkan/tests/block_pool_no_free.c2
-rw-r--r--src/intel/vulkan/tests/state_pool.c2
-rw-r--r--src/intel/vulkan/tests/state_pool_free_list_only.c2
-rw-r--r--src/intel/vulkan/tests/state_pool_no_free.c2
-rw-r--r--src/intel/vulkan/tests/state_pool_padding.c2
28 files changed, 255 insertions, 355 deletions
diff --git a/src/intel/vulkan/anv_acceleration_structure.c b/src/intel/vulkan/anv_acceleration_structure.c
index 8bb412af3be..20dc202f4d4 100644
--- a/src/intel/vulkan/anv_acceleration_structure.c
+++ b/src/intel/vulkan/anv_acceleration_structure.c
@@ -83,7 +83,7 @@ anv_CreateAccelerationStructureKHR(
accel = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*accel), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (accel == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &accel->base,
VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR);
@@ -137,49 +137,53 @@ anv_GetDeviceAccelerationStructureCompatibilityKHR(
VkResult
anv_BuildAccelerationStructuresKHR(
- VkDevice device,
+ VkDevice _device,
VkDeferredOperationKHR deferredOperation,
uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
unreachable("Unimplemented");
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
anv_CopyAccelerationStructureKHR(
- VkDevice device,
+ VkDevice _device,
VkDeferredOperationKHR deferredOperation,
const VkCopyAccelerationStructureInfoKHR* pInfo)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
unreachable("Unimplemented");
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
anv_CopyAccelerationStructureToMemoryKHR(
- VkDevice device,
+ VkDevice _device,
VkDeferredOperationKHR deferredOperation,
const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
unreachable("Unimplemented");
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
anv_CopyMemoryToAccelerationStructureKHR(
- VkDevice device,
+ VkDevice _device,
VkDeferredOperationKHR deferredOperation,
const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
unreachable("Unimplemented");
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
anv_WriteAccelerationStructuresPropertiesKHR(
- VkDevice device,
+ VkDevice _device,
uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR* pAccelerationStructures,
VkQueryType queryType,
@@ -187,8 +191,9 @@ anv_WriteAccelerationStructuresPropertiesKHR(
void* pData,
size_t stride)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
unreachable("Unimplemented");
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
}
void
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 5e35fe35ab4..f00807b6578 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -156,14 +156,14 @@ anv_state_table_init(struct anv_state_table *table,
*/
table->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "state table");
if (table->fd == -1) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
if (!u_vector_init(&table->cleanups,
round_to_power_of_two(sizeof(struct anv_state_table_cleanup)),
128)) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
@@ -197,11 +197,11 @@ anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
/* Make sure that we don't go outside the bounds of the memfd */
if (size > BLOCK_POOL_MEMFD_SIZE)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(table->device, VK_ERROR_OUT_OF_HOST_MEMORY);
cleanup = u_vector_add(&table->cleanups);
if (!cleanup)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(table->device, VK_ERROR_OUT_OF_HOST_MEMORY);
*cleanup = ANV_STATE_TABLE_CLEANUP_INIT;
@@ -214,8 +214,8 @@ anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, table->fd, 0);
if (map == MAP_FAILED) {
- return anv_errorf(table->device, &table->device->vk.base,
- VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
+ return vk_errorf(table->device, VK_ERROR_OUT_OF_HOST_MEMORY,
+ "mmap failed: %m");
}
cleanup->map = map;
@@ -395,7 +395,7 @@ anv_block_pool_init(struct anv_block_pool *pool,
*/
pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
if (pool->fd == -1)
- return anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ return vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
pool->wrapper_bo = (struct anv_bo) {
.refcount = 1,
@@ -408,7 +408,7 @@ anv_block_pool_init(struct anv_block_pool *pool,
if (!u_vector_init(&pool->mmap_cleanups,
round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
128)) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
@@ -531,8 +531,8 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
MAP_SHARED | MAP_POPULATE, pool->fd,
BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
if (map == MAP_FAILED)
- return anv_errorf(pool->device, &pool->device->vk.base,
- VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
+ return vk_errorf(pool->device, VK_ERROR_MEMORY_MAP_FAILED,
+ "mmap failed: %m");
struct anv_bo *new_bo;
VkResult result = anv_device_import_bo_from_host_ptr(pool->device,
@@ -549,7 +549,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
if (!cleanup) {
munmap(map, size);
anv_device_release_bo(pool->device, new_bo);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pool->device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
cleanup->map = map;
cleanup->size = size;
@@ -1547,13 +1547,13 @@ anv_scratch_pool_get_surf(struct anv_device *device,
}
VkResult
-anv_bo_cache_init(struct anv_bo_cache *cache)
+anv_bo_cache_init(struct anv_bo_cache *cache, struct anv_device *device)
{
util_sparse_array_init(&cache->bo_map, sizeof(struct anv_bo), 1024);
if (pthread_mutex_init(&cache->mutex, NULL)) {
util_sparse_array_finish(&cache->bo_map);
- return anv_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
+ return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
"pthread_mutex_init failed: %m");
}
@@ -1672,7 +1672,7 @@ anv_device_alloc_bo(struct anv_device *device,
}
if (gem_handle == 0)
- return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
struct anv_bo new_bo = {
.name = name,
@@ -1692,8 +1692,7 @@ anv_device_alloc_bo(struct anv_device *device,
new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0);
if (new_bo.map == MAP_FAILED) {
anv_gem_close(device, new_bo.gem_handle);
- return anv_errorf(device, &device->vk.base,
- VK_ERROR_OUT_OF_HOST_MEMORY,
+ return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
"mmap failed: %m");
}
}
@@ -1728,7 +1727,7 @@ anv_device_alloc_bo(struct anv_device *device,
if (new_bo.map)
anv_gem_munmap(device, new_bo.map, size);
anv_gem_close(device, new_bo.gem_handle);
- return anv_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
@@ -1778,7 +1777,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
if (!gem_handle)
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
pthread_mutex_lock(&cache->mutex);
@@ -1791,21 +1790,21 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
assert(bo->gem_handle == gem_handle);
if (bo_flags != bo->flags) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"same host pointer imported two different ways");
}
if (bo->has_client_visible_address !=
((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported with and without buffer "
"device address");
}
if (client_address && client_address != intel_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported at two different "
"addresses");
}
@@ -1836,7 +1835,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
if (new_bo.offset == 0) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
@@ -1877,7 +1876,7 @@ anv_device_import_bo(struct anv_device *device,
uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
if (!gem_handle) {
pthread_mutex_unlock(&cache->mutex);
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
@@ -1902,7 +1901,7 @@ anv_device_import_bo(struct anv_device *device,
if ((bo->flags & EXEC_OBJECT_PINNED) !=
(bo_flags & EXEC_OBJECT_PINNED)) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported two different ways");
}
@@ -1917,21 +1916,21 @@ anv_device_import_bo(struct anv_device *device,
(bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
(bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported on two different heaps");
}
if (bo->has_client_visible_address !=
((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported with and without buffer "
"device address");
}
if (client_address && client_address != intel_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported at two different "
"addresses");
}
@@ -1944,7 +1943,7 @@ anv_device_import_bo(struct anv_device *device,
if (size == (off_t)-1) {
anv_gem_close(device, gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
struct anv_bo new_bo = {
@@ -1969,7 +1968,7 @@ anv_device_import_bo(struct anv_device *device,
if (new_bo.offset == 0) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return anv_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
@@ -1999,7 +1998,7 @@ anv_device_export_bo(struct anv_device *device,
int fd = anv_gem_handle_to_fd(device, bo->gem_handle);
if (fd < 0)
- return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return vk_error(device, VK_ERROR_TOO_MANY_OBJECTS);
*fd_out = fd;
diff --git a/src/intel/vulkan/anv_android.c b/src/intel/vulkan/anv_android.c
index 00a11bce6a8..df624abca31 100644
--- a/src/intel/vulkan/anv_android.c
+++ b/src/intel/vulkan/anv_android.c
@@ -466,8 +466,7 @@ anv_image_init_from_gralloc(struct anv_device *device,
};
if (gralloc_info->handle->numFds != 1) {
- return anv_errorf(device, &device->vk.base,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"VkNativeBufferANDROID::handle::numFds is %d, "
"expected 1", gralloc_info->handle->numFds);
}
@@ -493,7 +492,7 @@ anv_image_init_from_gralloc(struct anv_device *device,
0 /* client_address */,
&bo);
if (result != VK_SUCCESS) {
- return anv_errorf(device, &device->vk.base, result,
+ return vk_errorf(device, result,
"failed to import dma-buf from VkNativeBufferANDROID");
}
@@ -509,14 +508,12 @@ anv_image_init_from_gralloc(struct anv_device *device,
anv_info.isl_tiling_flags = ISL_TILING_Y0_BIT;
break;
case -1:
- result = anv_errorf(device, &device->vk.base,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"DRM_IOCTL_I915_GEM_GET_TILING failed for "
"VkNativeBufferANDROID");
goto fail_tiling;
default:
- result = anv_errorf(device, &device->vk.base,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"DRM_IOCTL_I915_GEM_GET_TILING returned unknown "
"tiling %d for VkNativeBufferANDROID", i915_tiling);
goto fail_tiling;
@@ -544,8 +541,7 @@ anv_image_init_from_gralloc(struct anv_device *device,
mem_reqs.memoryRequirements.alignment);
if (bo->size < aligned_image_size) {
- result = anv_errorf(device, &device->vk.base,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"dma-buf from VkNativeBufferANDROID is too small for "
"VkImage: %"PRIu64"B < %"PRIu64"B",
bo->size, aligned_image_size);
@@ -599,13 +595,13 @@ anv_image_bind_from_gralloc(struct anv_device *device,
0 /* client_address */,
&bo);
if (result != VK_SUCCESS) {
- return anv_errorf(device, &device->vk.base, result,
+ return vk_errorf(device, result,
"failed to import dma-buf from VkNativeBufferANDROID");
}
uint64_t img_size = image->bindings[ANV_IMAGE_MEMORY_BINDING_MAIN].memory_range.size;
if (img_size < bo->size) {
- result = anv_errorf(device, &device->vk.base, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"dma-buf from VkNativeBufferANDROID is too small for "
"VkImage: %"PRIu64"B < %"PRIu64"B",
bo->size, img_size);
@@ -649,7 +645,7 @@ format_supported_with_usage(VkDevice device_h, VkFormat format,
result = anv_GetPhysicalDeviceImageFormatProperties2(phys_dev_h,
&image_format_info, &image_format_props);
if (result != VK_SUCCESS) {
- return anv_errorf(device, &device->vk.base, result,
+ return vk_errorf(device, result,
"anv_GetPhysicalDeviceImageFormatProperties2 failed "
"inside %s", __func__);
}
@@ -688,7 +684,7 @@ setup_gralloc0_usage(struct anv_device *device, VkFormat format,
* gralloc swapchains.
*/
if (imageUsage != 0) {
- return anv_errorf(device, &device->vk.base, VK_ERROR_FORMAT_NOT_SUPPORTED,
+ return vk_errorf(device, VK_ERROR_FORMAT_NOT_SUPPORTED,
"unsupported VkImageUsageFlags(0x%x) for gralloc "
"swapchain", imageUsage);
}
@@ -793,6 +789,7 @@ anv_AcquireImageANDROID(
VkSemaphore semaphore_h,
VkFence fence_h)
{
+ ANV_FROM_HANDLE(anv_device, device, device_h);
VkResult result = VK_SUCCESS;
/* From https://source.android.com/devices/graphics/implement-vulkan :
@@ -817,7 +814,7 @@ anv_AcquireImageANDROID(
VkResult err = (errno == EMFILE) ? VK_ERROR_TOO_MANY_OBJECTS :
VK_ERROR_OUT_OF_HOST_MEMORY;
close(nativeFenceFd);
- return anv_error(err);
+ return vk_error(device, err);
}
} else if (semaphore_h != VK_NULL_HANDLE) {
semaphore_fd = nativeFenceFd;
diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c
index c3c00f5f962..bc159e28308 100644
--- a/src/intel/vulkan/anv_batch_chain.c
+++ b/src/intel/vulkan/anv_batch_chain.c
@@ -70,14 +70,14 @@ anv_reloc_list_init_clone(struct anv_reloc_list *list,
vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (list->relocs == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
list->reloc_bos =
vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (list->reloc_bos == NULL) {
vk_free(alloc, list->relocs);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
}
memcpy(list->relocs, other_list->relocs,
@@ -130,7 +130,7 @@ anv_reloc_list_grow(struct anv_reloc_list *list,
new_length * sizeof(*list->relocs), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_relocs == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
list->relocs = new_relocs;
struct anv_bo **new_reloc_bos =
@@ -138,7 +138,7 @@ anv_reloc_list_grow(struct anv_reloc_list *list,
new_length * sizeof(*list->reloc_bos), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_reloc_bos == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
list->reloc_bos = new_reloc_bos;
list->array_length = new_length;
@@ -162,7 +162,7 @@ anv_reloc_list_grow_deps(struct anv_reloc_list *list,
vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_deps == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
list->deps = new_deps;
/* Zero out the new data */
@@ -349,7 +349,7 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (bbo == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
size, &bbo->bo);
@@ -382,7 +382,7 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (bbo == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
other_bbo->bo->size, &bbo->bo);
@@ -676,7 +676,7 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
if (seen_bbo == NULL) {
anv_batch_bo_destroy(new_bbo, cmd_buffer);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
}
*seen_bbo = new_bbo;
@@ -817,7 +817,7 @@ anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
if (bt_block == NULL) {
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
}
*bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
@@ -1075,7 +1075,7 @@ anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
if (bbo_ptr == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
*bbo_ptr = bbo;
}
@@ -1264,13 +1264,13 @@ anv_execbuf_add_bo(struct anv_device *device,
struct drm_i915_gem_exec_object2 *new_objects =
vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
if (new_objects == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_bo **new_bos =
vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
if (new_bos == NULL) {
vk_free(exec->alloc, new_objects);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (exec->objects) {
diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c
index 959a6640df7..48571622707 100644
--- a/src/intel/vulkan/anv_cmd_buffer.c
+++ b/src/intel/vulkan/anv_cmd_buffer.c
@@ -271,7 +271,7 @@ static VkResult anv_create_cmd_buffer(
cmd_buffer = vk_alloc2(&device->vk.alloc, &pool->alloc, sizeof(*cmd_buffer),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
if (result != VK_SUCCESS)
@@ -1268,7 +1268,7 @@ VkResult anv_CreateCommandPool(
pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
VK_OBJECT_TYPE_COMMAND_POOL);
if (pool == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
assert(pCreateInfo->queueFamilyIndex < device->physical->queue.family_count);
pool->queue_family =
diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c
index 06ba3be345e..578ecfb9b93 100644
--- a/src/intel/vulkan/anv_descriptor_set.c
+++ b/src/intel/vulkan/anv_descriptor_set.c
@@ -391,7 +391,7 @@ VkResult anv_CreateDescriptorSetLayout(
if (!vk_object_multizalloc(&device->vk, &ma, NULL,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT))
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
set_layout->ref_cnt = 1;
set_layout->binding_count = num_bindings;
@@ -714,7 +714,7 @@ VkResult anv_CreatePipelineLayout(
layout = vk_object_alloc(&device->vk, pAllocator, sizeof(*layout),
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
if (layout == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
layout->num_sets = pCreateInfo->setLayoutCount;
@@ -860,7 +860,7 @@ VkResult anv_CreateDescriptorPool(
pool = vk_object_alloc(&device->vk, pAllocator, total_size,
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!pool)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
pool->size = pool_size;
pool->next = 0;
@@ -1071,7 +1071,7 @@ anv_descriptor_set_create(struct anv_device *device,
ANV_UBO_ALIGNMENT);
if (pool_vma_offset == 0) {
anv_descriptor_pool_free_set(pool, set);
- return anv_error(VK_ERROR_FRAGMENTED_POOL);
+ return vk_error(pool, VK_ERROR_FRAGMENTED_POOL);
}
assert(pool_vma_offset >= POOL_HEAP_OFFSET &&
pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX);
@@ -1811,7 +1811,7 @@ VkResult anv_CreateDescriptorUpdateTemplate(
template = vk_object_alloc(&device->vk, pAllocator, size,
VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
if (template == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
template->bind_point = pCreateInfo->pipelineBindPoint;
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index d0047c444fa..3ad59e9b0c4 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -347,16 +347,14 @@ anv_gather_meminfo(struct anv_physical_device *device, int fd, bool update)
intel_i915_query_alloc(fd, DRM_I915_QUERY_MEMORY_REGIONS);
if (mem_regions == NULL) {
if (device->info.has_local_mem) {
- return anv_errorfi(device->instance, NULL,
- VK_ERROR_INCOMPATIBLE_DRIVER,
- "failed to memory regions: %m");
+ return vk_errorf(device, VK_ERROR_INCOMPATIBLE_DRIVER,
+ "failed to memory regions: %m");
}
uint64_t total_phys;
if (!os_get_total_physical_memory(&total_phys)) {
- return anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "failed to get total physical memory: %m");
+ return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get total physical memory: %m");
}
uint64_t available;
@@ -440,9 +438,8 @@ anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
"Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
if (intel_get_aperture_size(fd, &device->gtt_size) == -1) {
- return anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "failed to get aperture size: %m");
+ return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get aperture size: %m");
}
}
@@ -555,16 +552,14 @@ anv_physical_device_init_uuids(struct anv_physical_device *device)
const struct build_id_note *note =
build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
if (!note) {
- return anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "Failed to find build-id");
+ return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "Failed to find build-id");
}
unsigned build_id_len = build_id_length(note);
if (build_id_len < 20) {
- return anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "build-id too short. It needs to be a SHA");
+ return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "build-id too short. It needs to be a SHA");
}
memcpy(device->driver_build_sha1, build_id_data(note), 20);
@@ -759,16 +754,16 @@ anv_physical_device_try_create(struct anv_instance *instance,
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0) {
if (errno == ENOMEM) {
- return anv_errorfi(instance, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
- "Unable to open device %s: out of memory", path);
+ return vk_errorf(instance, VK_ERROR_OUT_OF_HOST_MEMORY,
+ "Unable to open device %s: out of memory", path);
}
- return anv_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
- "Unable to open device %s: %m", path);
+ return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
+ "Unable to open device %s: %m", path);
}
struct intel_device_info devinfo;
if (!intel_get_device_info_from_fd(fd, &devinfo)) {
- result = anv_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ result = vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
goto fail_fd;
}
@@ -781,8 +776,8 @@ anv_physical_device_try_create(struct anv_instance *instance,
} else if (devinfo.ver >= 8 && devinfo.ver <= 12) {
/* Gfx8-12 fully supported */
} else {
- result = anv_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
- "Vulkan not yet supported on %s", devinfo.name);
+ result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
+ "Vulkan not yet supported on %s", devinfo.name);
goto fail_fd;
}
@@ -790,7 +785,7 @@ anv_physical_device_try_create(struct anv_instance *instance,
vk_zalloc(&instance->vk.alloc, sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (device == NULL) {
- result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_fd;
}
@@ -802,7 +797,7 @@ anv_physical_device_try_create(struct anv_instance *instance,
NULL, /* We set up extensions later */
&dispatch_table);
if (result != VK_SUCCESS) {
- anv_error(result);
+ vk_error(instance, result);
goto fail_alloc;
}
device->instance = instance;
@@ -822,47 +817,41 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->cmd_parser_version =
anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
if (device->cmd_parser_version == -1) {
- result = anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "failed to get command parser version");
+ result = vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get command parser version");
goto fail_base;
}
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
- result = anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "kernel missing gem wait");
+ result = vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing gem wait");
goto fail_base;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
- result = anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "kernel missing execbuf2");
+ result = vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing execbuf2");
goto fail_base;
}
if (!device->info.has_llc &&
anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
- result = anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "kernel missing wc mmap");
+ result = vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing wc mmap");
goto fail_base;
}
if (device->info.ver >= 8 && !device->info.is_cherryview &&
!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN)) {
- result = anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "kernel missing softpin");
+ result = vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing softpin");
goto fail_alloc;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY)) {
- result = anv_errorfi(device->instance, NULL,
- VK_ERROR_INITIALIZATION_FAILED,
- "kernel missing syncobj support");
+ result = vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing syncobj support");
goto fail_base;
}
@@ -940,7 +929,7 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->compiler = brw_compiler_create(NULL, &device->info);
if (device->compiler == NULL) {
- result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_base;
}
device->compiler->shader_debug_log = compiler_debug_log;
@@ -1069,7 +1058,7 @@ VkResult anv_EnumerateInstanceExtensionProperties(
VkExtensionProperties* pProperties)
{
if (pLayerName)
- return anv_error(VK_ERROR_LAYER_NOT_PRESENT);
+ return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
return vk_enumerate_instance_extension_properties(
&instance_extensions, pPropertyCount, pProperties);
@@ -1104,7 +1093,7 @@ VkResult anv_CreateInstance(
instance = vk_alloc(pAllocator, sizeof(*instance), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!instance)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
struct vk_instance_dispatch_table dispatch_table;
vk_instance_dispatch_table_from_entrypoints(
@@ -1114,7 +1103,7 @@ VkResult anv_CreateInstance(
&dispatch_table, pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
vk_free(pAllocator, instance);
- return anv_error(result);
+ return vk_error(NULL, result);
}
instance->physical_devices_enumerated = false;
@@ -2918,7 +2907,7 @@ VkResult anv_CreateDevice(
assert(pCreateInfo->queueCreateInfoCount > 0);
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
- return anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ return vk_error(physical_device, VK_ERROR_INITIALIZATION_FAILED);
}
/* Check if client specified queue priority. */
@@ -2934,7 +2923,7 @@ VkResult anv_CreateDevice(
sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(physical_device, VK_ERROR_OUT_OF_HOST_MEMORY);
struct vk_device_dispatch_table dispatch_table;
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
@@ -2945,7 +2934,7 @@ VkResult anv_CreateDevice(
result = vk_device_init(&device->vk, &physical_device->vk,
&dispatch_table, pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
- anv_error(result);
+ vk_error(physical_device, result);
goto fail_alloc;
}
@@ -2968,7 +2957,7 @@ VkResult anv_CreateDevice(
/* XXX(chadv): Can we dup() physicalDevice->fd here? */
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
if (device->fd == -1) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_device;
}
@@ -3002,7 +2991,7 @@ VkResult anv_CreateDevice(
device->context_id = anv_gem_create_context(device);
}
if (device->context_id == -1) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
@@ -3021,7 +3010,7 @@ VkResult anv_CreateDevice(
vk_zalloc(&device->vk.alloc, num_queues * sizeof(*device->queues), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (device->queues == NULL) {
- result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_context_id;
}
@@ -3049,7 +3038,7 @@ VkResult anv_CreateDevice(
if (physical_device->use_softpin) {
if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_queues;
}
@@ -3081,7 +3070,7 @@ VkResult anv_CreateDevice(
I915_CONTEXT_PARAM_PRIORITY,
vk_priority_to_gen(priority));
if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
- result = anv_error(VK_ERROR_NOT_PERMITTED_EXT);
+ result = vk_error(device, VK_ERROR_NOT_PERMITTED_EXT);
goto fail_vmas;
}
}
@@ -3099,28 +3088,28 @@ VkResult anv_CreateDevice(
device->robust_buffer_access = robust_buffer_access;
if (pthread_mutex_init(&device->mutex, NULL) != 0) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_queues;
}
pthread_condattr_t condattr;
if (pthread_condattr_init(&condattr) != 0) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
pthread_condattr_destroy(&condattr);
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
pthread_condattr_destroy(&condattr);
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
pthread_condattr_destroy(&condattr);
- result = anv_bo_cache_init(&device->bo_cache);
+ result = anv_bo_cache_init(&device->bo_cache, device);
if (result != VK_SUCCESS)
goto fail_queue_cond;
@@ -3389,7 +3378,7 @@ VkResult anv_EnumerateInstanceLayerProperties(
}
/* None supported at this time */
- return anv_error(VK_ERROR_LAYER_NOT_PRESENT);
+ return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
}
void
@@ -3402,8 +3391,7 @@ _anv_device_report_lost(struct anv_device *device)
for (uint32_t i = 0; i < device->queue_count; i++) {
struct anv_queue *queue = &device->queues[i];
if (queue->lost) {
- __anv_errorf(device->physical->instance, &device->vk.base,
- VK_ERROR_DEVICE_LOST,
+ __vk_errorf(queue, VK_ERROR_DEVICE_LOST,
queue->error_file, queue->error_line,
"%s", queue->error_msg);
}
@@ -3425,8 +3413,7 @@ _anv_device_set_lost(struct anv_device *device,
device->lost_reported = true;
va_start(ap, msg);
- err = __anv_errorv(device->physical->instance, &device->vk.base,
- VK_ERROR_DEVICE_LOST, file, line, msg, ap);
+ err = __vk_errorv(device, VK_ERROR_DEVICE_LOST, file, line, msg, ap);
va_end(ap);
if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
@@ -3613,7 +3600,7 @@ VkResult anv_AllocateMemory(
align_u64(pAllocateInfo->allocationSize, 4096);
if (aligned_alloc_size > MAX_MEMORY_ALLOCATION_SIZE)
- return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
struct anv_memory_type *mem_type =
@@ -3624,12 +3611,12 @@ VkResult anv_AllocateMemory(
uint64_t mem_heap_used = p_atomic_read(&mem_heap->used);
if (mem_heap_used + aligned_alloc_size > mem_heap->size)
- return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
VK_OBJECT_TYPE_DEVICE_MEMORY);
if (mem == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
mem->type = mem_type;
mem->map = NULL;
@@ -3755,8 +3742,7 @@ VkResult anv_AllocateMemory(
* this sort of attack but only if it can trust the buffer size.
*/
if (mem->bo->size < aligned_alloc_size) {
- result = anv_errorf(device, &device->vk.base,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"aligned allocationSize too large for "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: "
"%"PRIu64"B > %"PRIu64"B",
@@ -3781,7 +3767,7 @@ VkResult anv_AllocateMemory(
if (host_ptr_info && host_ptr_info->handleType) {
if (host_ptr_info->handleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
- result = anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ result = vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
goto fail;
}
@@ -3828,8 +3814,7 @@ VkResult anv_AllocateMemory(
i915_tiling);
if (ret) {
anv_device_release_bo(device, mem->bo);
- result = anv_errorf(device, &device->vk.base,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ result = vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to set BO tiling: %m");
goto fail;
}
@@ -3841,8 +3826,7 @@ VkResult anv_AllocateMemory(
if (mem_heap_used > mem_heap->size) {
p_atomic_add(&mem_heap->used, -mem->bo->size);
anv_device_release_bo(device, mem->bo);
- result = anv_errorf(device, &device->vk.base,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ result = vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Out of heap memory");
goto fail;
}
@@ -3900,7 +3884,7 @@ VkResult anv_GetMemoryFdPropertiesKHR(
*
* So opaque handle types fall into the default "unsupported" case.
*/
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
}
@@ -4020,7 +4004,7 @@ VkResult anv_MapMemory(
void *map = anv_gem_mmap(device, mem->bo->gem_handle,
map_offset, map_size, gem_flags);
if (map == MAP_FAILED)
- return anv_error(VK_ERROR_MEMORY_MAP_FAILED);
+ return vk_error(device, VK_ERROR_MEMORY_MAP_FAILED);
mem->map = map;
mem->map_size = map_size;
@@ -4149,7 +4133,7 @@ VkResult anv_QueueBindSparse(
if (anv_device_is_lost(queue->device))
return VK_ERROR_DEVICE_LOST;
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(queue, VK_ERROR_FEATURE_NOT_PRESENT);
}
// Event functions
@@ -4168,7 +4152,7 @@ VkResult anv_CreateEvent(
event = vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
VK_OBJECT_TYPE_EVENT);
if (event == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
event->state = anv_state_pool_alloc(&device->dynamic_state_pool,
sizeof(uint64_t), 8);
@@ -4326,14 +4310,14 @@ VkResult anv_CreateBuffer(
* allocating a buffer larger than our GTT size.
*/
if (pCreateInfo->size > device->physical->gtt_size)
- return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
VK_OBJECT_TYPE_BUFFER);
if (buffer == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
buffer->create_flags = pCreateInfo->flags;
buffer->size = pCreateInfo->size;
@@ -4455,7 +4439,7 @@ VkResult anv_CreateFramebuffer(
framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
VK_OBJECT_TYPE_FRAMEBUFFER);
if (framebuffer == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
framebuffer->width = pCreateInfo->width;
framebuffer->height = pCreateInfo->height;
diff --git a/src/intel/vulkan/anv_formats.c b/src/intel/vulkan/anv_formats.c
index 1572cfa1302..6c9d766826e 100644
--- a/src/intel/vulkan/anv_formats.c
+++ b/src/intel/vulkan/anv_formats.c
@@ -922,7 +922,6 @@ anv_get_image_format_properties(
uint32_t maxMipLevels;
uint32_t maxArraySize;
VkSampleCountFlags sampleCounts;
- struct anv_instance *instance = physical_device->instance;
const struct intel_device_info *devinfo = &physical_device->info;
const struct anv_format *format = anv_get_format(info->format);
const struct isl_drm_modifier_info *isl_mod_info = NULL;
@@ -1011,10 +1010,9 @@ anv_get_image_format_properties(
* non-mipmapped single-sample) 2D images.
*/
if (info->type != VK_IMAGE_TYPE_2D) {
- anv_errorfi(instance, &physical_device->vk.base,
- VK_ERROR_FORMAT_NOT_SUPPORTED,
- "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT "
- "requires VK_IMAGE_TYPE_2D");
+ vk_errorf(physical_device, VK_ERROR_FORMAT_NOT_SUPPORTED,
+ "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT "
+ "requires VK_IMAGE_TYPE_2D");
goto unsupported;
}
@@ -1271,7 +1269,6 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties2(
VkImageFormatProperties2* base_props)
{
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
- struct anv_instance *instance = physical_device->instance;
const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL;
VkExternalImageFormatProperties *external_props = NULL;
VkSamplerYcbcrConversionImageFormatProperties *ycbcr_props = NULL;
@@ -1407,11 +1404,10 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties2(
* and therefore requires explicit memory layout.
*/
if (!tiling_has_explicit_layout) {
- result = anv_errorfi(instance, &physical_device->vk.base,
- VK_ERROR_FORMAT_NOT_SUPPORTED,
- "VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT "
- "requires VK_IMAGE_TILING_LINEAR or "
- "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT");
+ result = vk_errorf(physical_device, VK_ERROR_FORMAT_NOT_SUPPORTED,
+ "VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT "
+ "requires VK_IMAGE_TILING_LINEAR or "
+ "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT");
goto fail;
}
@@ -1427,11 +1423,10 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties2(
* and therefore requires explicit memory layout.
*/
if (!tiling_has_explicit_layout) {
- result = anv_errorfi(instance, &physical_device->vk.base,
- VK_ERROR_FORMAT_NOT_SUPPORTED,
- "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT "
- "requires VK_IMAGE_TILING_LINEAR or "
- "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT");
+ result = vk_errorf(physical_device, VK_ERROR_FORMAT_NOT_SUPPORTED,
+ "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT "
+ "requires VK_IMAGE_TILING_LINEAR or "
+ "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT");
goto fail;
}
@@ -1457,10 +1452,9 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties2(
* vkGetPhysicalDeviceImageFormatProperties2 returns
* VK_ERROR_FORMAT_NOT_SUPPORTED.
*/
- result = anv_errorfi(instance, &physical_device->vk.base,
- VK_ERROR_FORMAT_NOT_SUPPORTED,
- "unsupported VkExternalMemoryTypeFlagBits 0x%x",
- external_info->handleType);
+ result = vk_errorf(physical_device, VK_ERROR_FORMAT_NOT_SUPPORTED,
+ "unsupported VkExternalMemoryTypeFlagBits 0x%x",
+ external_info->handleType);
goto fail;
}
}
@@ -1582,7 +1576,7 @@ VkResult anv_CreateSamplerYcbcrConversion(
conversion = vk_object_zalloc(&device->vk, pAllocator, sizeof(*conversion),
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
if (!conversion)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
conversion->format = anv_get_format(pCreateInfo->format);
conversion->ycbcr_model = pCreateInfo->ycbcrModel;
diff --git a/src/intel/vulkan/anv_image.c b/src/intel/vulkan/anv_image.c
index 06b0d6c40f3..bcc2e8de2cd 100644
--- a/src/intel/vulkan/anv_image.c
+++ b/src/intel/vulkan/anv_image.c
@@ -132,7 +132,7 @@ image_binding_grow(const struct anv_device *device,
* VkImageDrmFormatModifierExplicitCreateInfoEXT.
*/
if (unlikely(!anv_is_aligned(offset, alignment))) {
- return anv_errorf(device, &device->vk.base,
+ return vk_errorf(device,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[]::offset is misaligned");
@@ -143,7 +143,7 @@ image_binding_grow(const struct anv_device *device,
* VkImageDrmFormatModifierExplicitCreateInfoEXT,
*/
if (unlikely(offset < container->size)) {
- return anv_errorf(device, &device->vk.base,
+ return vk_errorf(device,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[]::offset is too small");
@@ -153,11 +153,10 @@ image_binding_grow(const struct anv_device *device,
if (__builtin_add_overflow(offset, size, &container->size)) {
if (has_implicit_offset) {
assert(!"overflow");
- return anv_errorf(device, &device->vk.base,
- VK_ERROR_UNKNOWN,
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
"internal error: overflow in %s", __func__);
} else {
- return anv_errorf(device, &device->vk.base,
+ return vk_errorf(device,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[]::offset is too large");
@@ -1000,7 +999,7 @@ check_drm_format_mod(const struct anv_device *device,
* usage, then we may enable a private aux surface.
*/
if (plane->aux_usage != isl_mod_info->aux_usage) {
- return anv_errorf(device, &image->vk.base, VK_ERROR_UNKNOWN,
+ return vk_errorf(device, VK_ERROR_UNKNOWN,
"image with modifier unexpectedly has wrong aux "
"usage");
}
@@ -1132,14 +1131,14 @@ add_all_surfaces_explicit_layout(
/* Reject special values in the app-provided plane layouts. */
for (uint32_t i = 0; i < mod_plane_count; ++i) {
if (drm_info->pPlaneLayouts[i].rowPitch == 0) {
- return anv_errorf(device, &device->vk.base,
+ return vk_errorf(device,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[%u]::rowPitch is 0", i);
}
if (drm_info->pPlaneLayouts[i].offset == ANV_OFFSET_IMPLICIT) {
- return anv_errorf(device, &device->vk.base,
+ return vk_errorf(device,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[%u]::offset is %" PRIu64,
@@ -1481,7 +1480,7 @@ VkResult anv_CreateImage(
vk_object_zalloc(&device->vk, pAllocator, sizeof(*image),
VK_OBJECT_TYPE_IMAGE);
if (!image)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result = anv_image_init_from_create_info(device, image,
pCreateInfo);
@@ -2520,7 +2519,7 @@ anv_CreateImageView(VkDevice _device,
iview = vk_image_view_create(&device->vk, pCreateInfo,
pAllocator, sizeof(*iview));
if (iview == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
iview->image = image;
iview->n_planes = anv_image_aspect_get_planes(iview->vk.aspects);
@@ -2721,7 +2720,7 @@ anv_CreateBufferView(VkDevice _device,
view = vk_object_alloc(&device->vk, pAllocator, sizeof(*view),
VK_OBJECT_TYPE_BUFFER_VIEW);
if (!view)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
/* TODO: Handle the format swizzle? */
diff --git a/src/intel/vulkan/anv_pass.c b/src/intel/vulkan/anv_pass.c
index be5feaa25b0..e41b2756b03 100644
--- a/src/intel/vulkan/anv_pass.c
+++ b/src/intel/vulkan/anv_pass.c
@@ -344,7 +344,7 @@ VkResult anv_CreateRenderPass2(
if (!vk_object_multizalloc(&device->vk, &ma, pAllocator,
VK_OBJECT_TYPE_RENDER_PASS))
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
/* Clear the subpasses along with the parent pass. This required because
* each array member of anv_subpass must be a valid pointer if not NULL.
diff --git a/src/intel/vulkan/anv_perf.c b/src/intel/vulkan/anv_perf.c
index 8c225244abc..aa1edf62ffa 100644
--- a/src/intel/vulkan/anv_perf.c
+++ b/src/intel/vulkan/anv_perf.c
@@ -223,7 +223,7 @@ VkResult anv_AcquirePerformanceConfigurationINTEL(
config = vk_object_alloc(&device->vk, NULL, sizeof(*config),
VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL);
if (!config)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
config->register_config =
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
index cb5df4b4612..30e71e9bb81 100644
--- a/src/intel/vulkan/anv_pipeline.c
+++ b/src/intel/vulkan/anv_pipeline.c
@@ -1589,7 +1589,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
pipeline_ctx,
&stages[s]);
if (stages[s].nir == NULL) {
- result = anv_error(VK_ERROR_UNKNOWN);
+ result = vk_error(pipeline, VK_ERROR_UNKNOWN);
goto fail;
}
@@ -1728,7 +1728,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
}
if (stages[s].code == NULL) {
ralloc_free(stage_ctx);
- result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = vk_error(pipeline->base.device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
}
@@ -1747,7 +1747,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
xfb_info, &stages[s].bind_map);
if (!bin) {
ralloc_free(stage_ctx);
- result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
}
@@ -1886,7 +1886,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
if (stage.nir == NULL) {
ralloc_free(mem_ctx);
- return anv_error(VK_ERROR_UNKNOWN);
+ return vk_error(pipeline, VK_ERROR_UNKNOWN);
}
NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
@@ -1908,7 +1908,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
stage.code = brw_compile_cs(compiler, mem_ctx, &params);
if (stage.code == NULL) {
ralloc_free(mem_ctx);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
}
anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
@@ -1930,7 +1930,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
NULL, &stage.bind_map);
if (!bin) {
ralloc_free(mem_ctx);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
}
stage.feedback.duration = os_time_get_nano() - stage_start;
@@ -2540,7 +2540,7 @@ compile_upload_rt_shader(struct anv_ray_tracing_pipeline *pipeline,
&stage->key.bs, &stage->prog_data.bs, nir,
num_resume_shaders, resume_shaders, stage->stats, NULL);
if (stage->code == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
/* Ray-tracing shaders don't have a "real" bind map */
struct anv_pipeline_bind_map empty_bind_map = {};
@@ -2557,7 +2557,7 @@ compile_upload_rt_shader(struct anv_ray_tracing_pipeline *pipeline,
stage->stats, 1,
NULL, &empty_bind_map);
if (bin == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
/* TODO: Figure out executables for resume shaders */
anv_pipeline_add_executables(&pipeline->base, stage, bin);
@@ -2798,7 +2798,7 @@ anv_pipeline_compile_ray_tracing(struct anv_ray_tracing_pipeline *pipeline,
pipeline_ctx, &stages[i]);
if (stages[i].nir == NULL) {
ralloc_free(pipeline_ctx);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
}
anv_pipeline_lower_nir(&pipeline->base, pipeline_ctx, &stages[i], layout);
@@ -3015,7 +3015,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
ralloc_free(tmp_ctx);
if (device->rt_trampoline == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
struct brw_rt_trivial_return {
@@ -3057,7 +3057,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
if (device->rt_trivial_return == NULL) {
anv_shader_bin_unref(device, device->rt_trampoline);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
@@ -3326,16 +3326,18 @@ VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
VkResult
anv_GetRayTracingShaderGroupHandlesKHR(
- VkDevice device,
+ VkDevice _device,
VkPipeline _pipeline,
uint32_t firstGroup,
uint32_t groupCount,
size_t dataSize,
void* pData)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
+
if (pipeline->type != ANV_PIPELINE_RAY_TRACING)
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
struct anv_ray_tracing_pipeline *rt_pipeline =
anv_pipeline_to_ray_tracing(pipeline);
@@ -3351,15 +3353,16 @@ anv_GetRayTracingShaderGroupHandlesKHR(
VkResult
anv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(
- VkDevice device,
+ VkDevice _device,
VkPipeline pipeline,
uint32_t firstGroup,
uint32_t groupCount,
size_t dataSize,
void* pData)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
unreachable("Unimplemented");
- return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device, VK_ERROR_FEATURE_NOT_PRESENT);
}
VkDeviceSize
diff --git a/src/intel/vulkan/anv_pipeline_cache.c b/src/intel/vulkan/anv_pipeline_cache.c
index 89572395776..3fb90a05c00 100644
--- a/src/intel/vulkan/anv_pipeline_cache.c
+++ b/src/intel/vulkan/anv_pipeline_cache.c
@@ -550,7 +550,7 @@ VkResult anv_CreatePipelineCache(
sizeof(*cache), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cache == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
anv_pipeline_cache_init(cache, device,
device->physical->instance->pipeline_cache_enabled,
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 551f1fdaf05..41c8b0d9972 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -364,41 +364,6 @@ static inline uintptr_t anv_pack_ptr(void *ptr, int bits, int flags)
return value | (mask & flags);
}
-/* Whenever we generate an error, pass it through this function. Useful for
- * debugging, where we can break on it. Only call at error site, not when
- * propagating errors. Might be useful to plug in a stack trace here.
- */
-
-VkResult __anv_errorv(struct anv_instance *instance,
- const struct vk_object_base *object, VkResult error,
- const char *file, int line, const char *format,
- va_list args);
-
-VkResult __anv_errorf(struct anv_instance *instance,
- const struct vk_object_base *object, VkResult error,
- const char *file, int line, const char *format, ...)
- anv_printflike(6, 7);
-
-#ifdef DEBUG
-#define anv_error(error) __anv_errorf(NULL, NULL, error, __FILE__, __LINE__, NULL)
-#define anv_errorfi(instance, obj, error, format, ...)\
- __anv_errorf(instance, obj, error,\
- __FILE__, __LINE__, format, ## __VA_ARGS__)
-#define anv_errorf(device, obj, error, format, ...)\
- anv_errorfi(anv_device_instance_or_null(device),\
- obj, error, format, ## __VA_ARGS__)
-#else
-
-static inline VkResult __dummy_anv_error(VkResult error, UNUSED const void *ignored)
-{
- return error;
-}
-
-#define anv_error(error) __dummy_anv_error(error, NULL)
-#define anv_errorfi(instance, obj, error, format, ...) __dummy_anv_error(error, instance)
-#define anv_errorf(device, obj, error, format, ...) __dummy_anv_error(error, device)
-#endif
-
/**
* Warn on ignored extension structs.
*
@@ -847,7 +812,8 @@ struct anv_bo_cache {
pthread_mutex_t mutex;
};
-VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
+VkResult anv_bo_cache_init(struct anv_bo_cache *cache,
+ struct anv_device *device);
void anv_bo_cache_finish(struct anv_bo_cache *cache);
struct anv_queue_family {
@@ -1294,12 +1260,6 @@ anv_use_softpin(const struct anv_physical_device *pdevice)
#endif
}
-static inline struct anv_instance *
-anv_device_instance_or_null(const struct anv_device *device)
-{
- return device ? device->physical->instance : NULL;
-}
-
static inline struct anv_state_pool *
anv_binding_table_pool(struct anv_device *device)
{
diff --git a/src/intel/vulkan/anv_queue.c b/src/intel/vulkan/anv_queue.c
index 03222c30580..d485aeeb51a 100644
--- a/src/intel/vulkan/anv_queue.c
+++ b/src/intel/vulkan/anv_queue.c
@@ -167,7 +167,7 @@ anv_timeline_add_point_locked(struct anv_device *device,
vk_zalloc(&device->vk.alloc, sizeof(**point),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!(*point))
- result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
if (result == VK_SUCCESS) {
result = anv_device_alloc_bo(device, "timeline-semaphore", 4096,
ANV_BO_ALLOC_EXTERNAL |
@@ -501,15 +501,15 @@ anv_queue_init(struct anv_device *device, struct anv_queue *queue,
*/
if (device->has_thread_submit) {
if (pthread_mutex_init(&queue->mutex, NULL) != 0) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_queue;
}
if (pthread_cond_init(&queue->cond, NULL) != 0) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
if (pthread_create(&queue->thread, NULL, anv_queue_task, queue)) {
- result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_cond;
}
}
@@ -558,7 +558,7 @@ anv_queue_submit_add_fence_bo(struct anv_queue *queue,
submit->fence_bos, new_len * sizeof(*submit->fence_bos),
8, submit->alloc_scope);
if (new_fence_bos == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->fence_bos = new_fence_bos;
submit->fence_bo_array_length = new_len;
@@ -590,7 +590,7 @@ anv_queue_submit_add_syncobj(struct anv_queue *queue,
new_len * sizeof(*submit->wait_timeline_syncobjs),
8, submit->alloc_scope);
if (new_wait_timeline_syncobjs == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timeline_syncobjs = new_wait_timeline_syncobjs;
@@ -599,7 +599,7 @@ anv_queue_submit_add_syncobj(struct anv_queue *queue,
submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
8, submit->alloc_scope);
if (new_wait_timeline_values == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timeline_values = new_wait_timeline_values;
submit->wait_timeline_array_length = new_len;
@@ -618,7 +618,7 @@ anv_queue_submit_add_syncobj(struct anv_queue *queue,
submit->fences, new_len * sizeof(*submit->fences),
8, submit->alloc_scope);
if (new_fences == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->fences = new_fences;
@@ -627,7 +627,7 @@ anv_queue_submit_add_syncobj(struct anv_queue *queue,
submit->fence_values, new_len * sizeof(*submit->fence_values),
8, submit->alloc_scope);
if (new_fence_values == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->fence_values = new_fence_values;
submit->fence_array_length = new_len;
@@ -656,7 +656,7 @@ anv_queue_submit_add_timeline_wait(struct anv_queue *queue,
submit->wait_timelines, new_len * sizeof(*submit->wait_timelines),
8, submit->alloc_scope);
if (new_wait_timelines == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timelines = new_wait_timelines;
@@ -665,7 +665,7 @@ anv_queue_submit_add_timeline_wait(struct anv_queue *queue,
submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
8, submit->alloc_scope);
if (new_wait_timeline_values == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timeline_values = new_wait_timeline_values;
@@ -695,7 +695,7 @@ anv_queue_submit_add_timeline_signal(struct anv_queue *queue,
submit->signal_timelines, new_len * sizeof(*submit->signal_timelines),
8, submit->alloc_scope);
if (new_signal_timelines == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->signal_timelines = new_signal_timelines;
@@ -704,7 +704,7 @@ anv_queue_submit_add_timeline_signal(struct anv_queue *queue,
submit->signal_timeline_values, new_len * sizeof(*submit->signal_timeline_values),
8, submit->alloc_scope);
if (new_signal_timeline_values == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->signal_timeline_values = new_signal_timeline_values;
@@ -748,7 +748,7 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
struct anv_device *device = queue->device;
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
if (!submit)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
bool has_syncobj_wait = device->physical->has_syncobj_wait;
VkResult result;
@@ -758,7 +758,7 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
if (has_syncobj_wait) {
syncobj = anv_gem_syncobj_create(device, 0);
if (!syncobj) {
- result = anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ result = vk_error(queue, VK_ERROR_OUT_OF_DEVICE_MEMORY);
goto err_free_submit;
}
@@ -873,7 +873,7 @@ maybe_transfer_temporary_semaphore(struct anv_queue *queue,
new_len * sizeof(*submit->temporary_semaphores),
8, submit->alloc_scope);
if (new_array == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->temporary_semaphores = new_array;
submit->temporary_semaphore_array_length = new_len;
@@ -1120,7 +1120,7 @@ anv_queue_submit_add_cmd_buffer(struct anv_queue *queue,
submit->cmd_buffers, new_len * sizeof(*submit->cmd_buffers),
8, submit->alloc_scope);
if (new_cmd_buffers == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
submit->cmd_buffers = new_cmd_buffers;
submit->cmd_buffer_array_length = new_len;
@@ -1209,7 +1209,7 @@ anv_queue_submit_post_and_alloc_new(struct anv_queue *queue,
*submit = anv_queue_submit_alloc(queue->device);
if (!*submit)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
@@ -1239,7 +1239,7 @@ VkResult anv_QueueSubmit2KHR(
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
if (!submit)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < submitCount; i++) {
const struct wsi_memory_signal_submit_info *mem_signal_info =
@@ -1378,7 +1378,7 @@ VkResult anv_CreateFence(
fence = vk_object_zalloc(&device->vk, pAllocator, sizeof(*fence),
VK_OBJECT_TYPE_FENCE);
if (fence == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
if (device->physical->has_syncobj_wait) {
fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
@@ -1389,7 +1389,7 @@ VkResult anv_CreateFence(
fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
if (!fence->permanent.syncobj)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
} else {
fence->permanent.type = ANV_FENCE_TYPE_BO;
@@ -1594,7 +1594,7 @@ anv_wait_for_syncobj_fences(struct anv_device *device,
sizeof(*syncobjs) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!syncobjs)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
@@ -1911,7 +1911,7 @@ VkResult anv_ImportFenceFdKHR(
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
if (!new_impl.syncobj)
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(fence, VK_ERROR_INVALID_EXTERNAL_HANDLE);
break;
@@ -1934,19 +1934,19 @@ VkResult anv_ImportFenceFdKHR(
new_impl.syncobj = anv_gem_syncobj_create(device, create_flags);
if (!new_impl.syncobj)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(fence, VK_ERROR_OUT_OF_HOST_MEMORY);
if (fd != -1 &&
anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(fence, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
break;
}
default:
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(fence, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
/* From the Vulkan 1.0.53 spec:
@@ -2016,7 +2016,7 @@ VkResult anv_GetFenceFdKHR(
case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
if (fd < 0)
- return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return vk_error(fence, VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
@@ -2029,7 +2029,7 @@ VkResult anv_GetFenceFdKHR(
int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
if (fd < 0)
- return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return vk_error(fence, VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
@@ -2076,7 +2076,7 @@ binary_semaphore_create(struct anv_device *device,
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
impl->syncobj = anv_gem_syncobj_create(device, 0);
if (!impl->syncobj)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
@@ -2089,13 +2089,13 @@ timeline_semaphore_create(struct anv_device *device,
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ_TIMELINE;
impl->syncobj = anv_gem_syncobj_create(device, 0);
if (!impl->syncobj)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
if (initial_value) {
if (anv_gem_syncobj_timeline_signal(device,
&impl->syncobj,
&initial_value, 1)) {
anv_gem_syncobj_destroy(device, impl->syncobj);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
} else {
@@ -2123,7 +2123,7 @@ VkResult anv_CreateSemaphore(
semaphore = vk_object_alloc(&device->vk, NULL, sizeof(*semaphore),
VK_OBJECT_TYPE_SEMAPHORE);
if (semaphore == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
const VkExportSemaphoreCreateInfo *export =
vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
@@ -2157,12 +2157,12 @@ VkResult anv_CreateSemaphore(
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
if (!semaphore->permanent.syncobj) {
vk_object_free(&device->vk, pAllocator, semaphore);
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else {
assert(!"Unknown handle type");
vk_object_free(&device->vk, pAllocator, semaphore);
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
@@ -2305,7 +2305,7 @@ VkResult anv_ImportSemaphoreFdKHR(
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
if (!new_impl.syncobj)
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(semaphore, VK_ERROR_INVALID_EXTERNAL_HANDLE);
/* From the Vulkan spec:
*
@@ -2331,12 +2331,12 @@ VkResult anv_ImportSemaphoreFdKHR(
};
if (!new_impl.syncobj)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
if (fd != -1) {
if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(semaphore, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
/* Ownership of the FD is transfered to Anv. Since we don't need it
@@ -2349,7 +2349,7 @@ VkResult anv_ImportSemaphoreFdKHR(
}
default:
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(semaphore, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
@@ -2391,7 +2391,7 @@ VkResult anv_GetSemaphoreFdKHR(
fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
}
if (fd < 0)
- return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return vk_error(device, VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
@@ -2399,12 +2399,12 @@ VkResult anv_GetSemaphoreFdKHR(
assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
if (fd < 0)
- return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return vk_error(device, VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
default:
- return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return vk_error(semaphore, VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
/* From the Vulkan 1.0.53 spec:
@@ -2584,7 +2584,7 @@ VkResult anv_WaitSemaphores(
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t handle_count = 0;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
diff --git a/src/intel/vulkan/anv_util.c b/src/intel/vulkan/anv_util.c
index 9d673ade8dc..ceae38f6e23 100644
--- a/src/intel/vulkan/anv_util.c
+++ b/src/intel/vulkan/anv_util.c
@@ -38,62 +38,22 @@ __anv_perf_warn(struct anv_device *device,
{
va_list ap;
char buffer[256];
- char report[512];
va_start(ap, format);
vsnprintf(buffer, sizeof(buffer), format, ap);
va_end(ap);
- snprintf(report, sizeof(report), "%s: %s", file, buffer);
-
- vk_debug_report(&device->physical->instance->vk,
- VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
- object, line, 0, "anv", report);
-
- mesa_logw("%s:%d: PERF: %s", file, line, buffer);
-}
-
-VkResult
-__anv_errorv(struct anv_instance *instance,
- const struct vk_object_base *object, VkResult error,
- const char *file, int line, const char *format, va_list ap)
-{
- char buffer[256];
- char report[512];
-
- const char *error_str = vk_Result_to_str(error);
-
- if (format) {
- vsnprintf(buffer, sizeof(buffer), format, ap);
-
- snprintf(report, sizeof(report), "%s:%d: %s (%s)", file, line, buffer,
- error_str);
+ if (object) {
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
+ VK_LOG_OBJS(object), file, line,
+ "PERF: %s", buffer);
} else {
- snprintf(report, sizeof(report), "%s:%d: %s", file, line, error_str);
- }
-
- if (instance) {
- vk_debug_report(&instance->vk, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- object, line, 0, "anv", report);
+ __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT,
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
+ VK_LOG_NO_OBJS(device->physical->instance), file, line,
+ "PERF: %s", buffer);
}
-
- mesa_loge("%s", report);
-
- return error;
-}
-
-VkResult
-__anv_errorf(struct anv_instance *instance,
- const struct vk_object_base *object, VkResult error,
- const char *file, int line, const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
- __anv_errorv(instance, object, error, file, line, format, ap);
- va_end(ap);
-
- return error;
}
void
diff --git a/src/intel/vulkan/anv_wsi.c b/src/intel/vulkan/anv_wsi.c
index 1a396b778c4..42b47076083 100644
--- a/src/intel/vulkan/anv_wsi.c
+++ b/src/intel/vulkan/anv_wsi.c
@@ -313,7 +313,7 @@ VkResult anv_QueuePresentKHR(
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t wait_count = 0;
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
@@ -343,7 +343,7 @@ VkResult anv_QueuePresentKHR(
vk_free(&device->vk.alloc, values);
if (ret)
- return anv_error(VK_ERROR_DEVICE_LOST);
+ return vk_error(queue, VK_ERROR_DEVICE_LOST);
}
VkResult result = wsi_common_queue_present(&device->physical->wsi_device,
diff --git a/src/intel/vulkan/anv_wsi_display.c b/src/intel/vulkan/anv_wsi_display.c
index 9ed2a725ad5..93b9625caa8 100644
--- a/src/intel/vulkan/anv_wsi_display.c
+++ b/src/intel/vulkan/anv_wsi_display.c
@@ -257,7 +257,7 @@ anv_RegisterDeviceEventEXT(VkDevice _device,
fence = vk_zalloc2(&device->vk.alloc, allocator, sizeof (*fence), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
fence->permanent.type = ANV_FENCE_TYPE_WSI;
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
index a9a076621cc..38250983b48 100644
--- a/src/intel/vulkan/genX_cmd_buffer.c
+++ b/src/intel/vulkan/genX_cmd_buffer.c
@@ -4815,8 +4815,7 @@ verify_cmd_parser(const struct anv_device *device,
const char *function)
{
if (device->physical->cmd_parser_version < required_version) {
- return anv_errorf(device, &device->physical->vk.base,
- VK_ERROR_FEATURE_NOT_PRESENT,
+ return vk_errorf(device->physical, VK_ERROR_FEATURE_NOT_PRESENT,
"cmd parser version %d is required for %s",
required_version, function);
} else {
diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c
index b5138f3dc86..7675475e17f 100644
--- a/src/intel/vulkan/genX_pipeline.c
+++ b/src/intel/vulkan/genX_pipeline.c
@@ -2473,7 +2473,7 @@ genX(graphics_pipeline_create)(
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_graphics_pipeline_init(pipeline, device, cache,
pCreateInfo, pAllocator);
@@ -2742,7 +2742,7 @@ compute_pipeline_create(
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(&pipeline->base, device,
ANV_PIPELINE_COMPUTE, pCreateInfo->flags,
@@ -2889,7 +2889,7 @@ ray_tracing_pipeline_create(
VK_MULTIALLOC_DECL(&ma, struct anv_rt_shader_group, groups, pCreateInfo->groupCount);
if (!vk_multialloc_zalloc2(&ma, &device->vk.alloc, pAllocator,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(&pipeline->base, device,
ANV_PIPELINE_RAY_TRACING, pCreateInfo->flags,
diff --git a/src/intel/vulkan/genX_query.c b/src/intel/vulkan/genX_query.c
index 9e81f0859f3..9fb746654bb 100644
--- a/src/intel/vulkan/genX_query.c
+++ b/src/intel/vulkan/genX_query.c
@@ -163,7 +163,7 @@ VkResult genX(CreateQueryPool)(
if (!vk_object_multialloc(&device->vk, &ma, pAllocator,
VK_OBJECT_TYPE_QUERY_POOL))
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
pool->type = pCreateInfo->queryType;
pool->pipeline_statistics = pipeline_statistics;
diff --git a/src/intel/vulkan/genX_state.c b/src/intel/vulkan/genX_state.c
index 6c0acdad9f6..66964bf0b19 100644
--- a/src/intel/vulkan/genX_state.c
+++ b/src/intel/vulkan/genX_state.c
@@ -341,7 +341,7 @@ genX(init_device_state)(struct anv_device *device)
res = init_render_queue_state(queue);
break;
default:
- res = anv_error(VK_ERROR_INITIALIZATION_FAILED);
+ res = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
break;
}
if (res != VK_SUCCESS)
@@ -708,7 +708,7 @@ VkResult genX(CreateSampler)(
sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
VK_OBJECT_TYPE_SAMPLER);
if (!sampler)
- return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
sampler->n_planes = 1;
diff --git a/src/intel/vulkan/tests/block_pool_grow_first.c b/src/intel/vulkan/tests/block_pool_grow_first.c
index e50f65c8d68..a993fda48a8 100644
--- a/src/intel/vulkan/tests/block_pool_grow_first.c
+++ b/src/intel/vulkan/tests/block_pool_grow_first.c
@@ -41,7 +41,7 @@ int main(void)
const uint32_t initial_size = block_size / 2;
pthread_mutex_init(&device.mutex, NULL);
- anv_bo_cache_init(&device.bo_cache);
+ anv_bo_cache_init(&device.bo_cache, &device);
anv_block_pool_init(&pool, &device, "test", 4096, initial_size);
ASSERT(pool.size == initial_size);
diff --git a/src/intel/vulkan/tests/block_pool_no_free.c b/src/intel/vulkan/tests/block_pool_no_free.c
index 37030bdd7a3..1dac4f00f23 100644
--- a/src/intel/vulkan/tests/block_pool_no_free.c
+++ b/src/intel/vulkan/tests/block_pool_no_free.c
@@ -117,7 +117,7 @@ static void run_test()
struct anv_block_pool pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_bo_cache_init(&device.bo_cache);
+ anv_bo_cache_init(&device.bo_cache, &device);
anv_block_pool_init(&pool, &device, "test", 4096, 4096);
for (unsigned i = 0; i < NUM_THREADS; i++) {
diff --git a/src/intel/vulkan/tests/state_pool.c b/src/intel/vulkan/tests/state_pool.c
index 2f54efe783c..30180cc7bb8 100644
--- a/src/intel/vulkan/tests/state_pool.c
+++ b/src/intel/vulkan/tests/state_pool.c
@@ -42,7 +42,7 @@ int main(void)
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_bo_cache_init(&device.bo_cache);
+ anv_bo_cache_init(&device.bo_cache, &device);
for (unsigned i = 0; i < NUM_RUNS; i++) {
anv_state_pool_init(&state_pool, &device, "test", 4096, 0, 256);
diff --git a/src/intel/vulkan/tests/state_pool_free_list_only.c b/src/intel/vulkan/tests/state_pool_free_list_only.c
index 193169867c1..8d27f652f14 100644
--- a/src/intel/vulkan/tests/state_pool_free_list_only.c
+++ b/src/intel/vulkan/tests/state_pool_free_list_only.c
@@ -41,7 +41,7 @@ int main(void)
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_bo_cache_init(&device.bo_cache);
+ anv_bo_cache_init(&device.bo_cache, &device);
anv_state_pool_init(&state_pool, &device, "test", 4096, 0, 4096);
/* Grab one so a zero offset is impossible */
diff --git a/src/intel/vulkan/tests/state_pool_no_free.c b/src/intel/vulkan/tests/state_pool_no_free.c
index 4288e1a1b87..b8446863f46 100644
--- a/src/intel/vulkan/tests/state_pool_no_free.c
+++ b/src/intel/vulkan/tests/state_pool_no_free.c
@@ -62,7 +62,7 @@ static void run_test()
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_bo_cache_init(&device.bo_cache);
+ anv_bo_cache_init(&device.bo_cache, &device);
anv_state_pool_init(&state_pool, &device, "test", 4096, 0, 64);
pthread_barrier_init(&barrier, NULL, NUM_THREADS);
diff --git a/src/intel/vulkan/tests/state_pool_padding.c b/src/intel/vulkan/tests/state_pool_padding.c
index 70fb773b5b1..d6703cf315f 100644
--- a/src/intel/vulkan/tests/state_pool_padding.c
+++ b/src/intel/vulkan/tests/state_pool_padding.c
@@ -35,7 +35,7 @@ int main(void)
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_bo_cache_init(&device.bo_cache);
+ anv_bo_cache_init(&device.bo_cache, &device);
anv_state_pool_init(&state_pool, &device, "test", 4096, 0, 4096);
/* Get the size of the underlying block_pool */