summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-09-30 05:29:04 +1000
committerMarge Bot <eric+marge@anholt.net>2020-10-01 00:23:40 +0000
commite94fd4cc65899bccceb4642363bc4376c6831580 (patch)
tree6d7fff3c6891b764a1738bb2e1b4414f5a317f67
parent5e8791a0bf00384cbd7e3a7231bddbc48bd550a8 (diff)
lavapipe: rename vallium to lavapipe
Just a cooler name, and a lot easier to search for. thanks Marek Acked-by: Marek Olšák <marek.olsak@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6921>
-rw-r--r--src/gallium/frontends/lavapipe/lvp_cmd_buffer.c (renamed from src/gallium/frontends/vallium/val_cmd_buffer.c)584
-rw-r--r--src/gallium/frontends/lavapipe/lvp_conv.h (renamed from src/gallium/frontends/vallium/val_conv.h)0
-rw-r--r--src/gallium/frontends/lavapipe/lvp_descriptor_set.c (renamed from src/gallium/frontends/vallium/val_descriptor_set.c)164
-rw-r--r--src/gallium/frontends/lavapipe/lvp_device.c (renamed from src/gallium/frontends/vallium/val_device.c)450
-rw-r--r--src/gallium/frontends/lavapipe/lvp_entrypoints_gen.py (renamed from src/gallium/frontends/vallium/val_entrypoints_gen.py)102
-rw-r--r--src/gallium/frontends/lavapipe/lvp_execute.c (renamed from src/gallium/frontends/vallium/val_execute.c)374
-rw-r--r--src/gallium/frontends/lavapipe/lvp_extensions.py (renamed from src/gallium/frontends/vallium/val_extensions.py)12
-rw-r--r--src/gallium/frontends/lavapipe/lvp_formats.c (renamed from src/gallium/frontends/vallium/val_formats.c)36
-rw-r--r--src/gallium/frontends/lavapipe/lvp_image.c (renamed from src/gallium/frontends/vallium/val_image.c)76
-rw-r--r--src/gallium/frontends/lavapipe/lvp_lower_input_attachments.c (renamed from src/gallium/frontends/vallium/val_lower_input_attachments.c)4
-rw-r--r--src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c (renamed from src/gallium/frontends/vallium/val_lower_vulkan_resource.c)20
-rw-r--r--src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.h (renamed from src/gallium/frontends/vallium/val_lower_vulkan_resource.h)14
-rw-r--r--src/gallium/frontends/lavapipe/lvp_pass.c (renamed from src/gallium/frontends/vallium/val_pass.c)66
-rw-r--r--src/gallium/frontends/lavapipe/lvp_pipeline.c (renamed from src/gallium/frontends/vallium/val_pipeline.c)106
-rw-r--r--src/gallium/frontends/lavapipe/lvp_pipeline_cache.c (renamed from src/gallium/frontends/vallium/val_pipeline_cache.c)24
-rw-r--r--src/gallium/frontends/lavapipe/lvp_private.h (renamed from src/gallium/frontends/vallium/val_private.h)640
-rw-r--r--src/gallium/frontends/lavapipe/lvp_query.c (renamed from src/gallium/frontends/vallium/val_query.c)24
-rw-r--r--src/gallium/frontends/lavapipe/lvp_util.c (renamed from src/gallium/frontends/vallium/val_util.c)8
-rw-r--r--src/gallium/frontends/lavapipe/lvp_wsi.c (renamed from src/gallium/frontends/vallium/val_wsi.c)80
-rw-r--r--src/gallium/frontends/lavapipe/lvp_wsi.h (renamed from src/gallium/frontends/vallium/val_wsi.h)44
-rw-r--r--src/gallium/frontends/lavapipe/lvp_wsi_wayland.c (renamed from src/gallium/frontends/vallium/val_wsi_wayland.c)10
-rw-r--r--src/gallium/frontends/lavapipe/lvp_wsi_x11.c (renamed from src/gallium/frontends/vallium/val_wsi_x11.c)18
-rw-r--r--src/gallium/frontends/lavapipe/meson.build66
-rw-r--r--src/gallium/frontends/vallium/meson.build66
-rw-r--r--src/gallium/meson.build4
-rw-r--r--src/gallium/targets/lavapipe/lvp_icd.py (renamed from src/gallium/targets/vallium/val_icd.py)4
-rw-r--r--src/gallium/targets/lavapipe/meson.build (renamed from src/gallium/targets/vallium/meson.build)16
-rw-r--r--src/gallium/targets/lavapipe/target.c (renamed from src/gallium/targets/vallium/target.c)0
28 files changed, 1506 insertions, 1506 deletions
diff --git a/src/gallium/frontends/vallium/val_cmd_buffer.c b/src/gallium/frontends/lavapipe/lvp_cmd_buffer.c
index fd0f8cd622d..71a28205aee 100644
--- a/src/gallium/frontends/vallium/val_cmd_buffer.c
+++ b/src/gallium/frontends/lavapipe/lvp_cmd_buffer.c
@@ -21,16 +21,16 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "pipe/p_context.h"
-static VkResult val_create_cmd_buffer(
- struct val_device * device,
- struct val_cmd_pool * pool,
+static VkResult lvp_create_cmd_buffer(
+ struct lvp_device * device,
+ struct lvp_cmd_pool * pool,
VkCommandBufferLevel level,
VkCommandBuffer* pCommandBuffer)
{
- struct val_cmd_buffer *cmd_buffer;
+ struct lvp_cmd_buffer *cmd_buffer;
cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -42,7 +42,7 @@ static VkResult val_create_cmd_buffer(
cmd_buffer->device = device;
cmd_buffer->pool = pool;
list_inithead(&cmd_buffer->cmds);
- cmd_buffer->status = VAL_CMD_BUFFER_STATUS_INITIAL;
+ cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
if (pool) {
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
} else {
@@ -51,36 +51,36 @@ static VkResult val_create_cmd_buffer(
*/
list_inithead(&cmd_buffer->pool_link);
}
- *pCommandBuffer = val_cmd_buffer_to_handle(cmd_buffer);
+ *pCommandBuffer = lvp_cmd_buffer_to_handle(cmd_buffer);
return VK_SUCCESS;
}
static void
-val_cmd_buffer_free_all_cmds(struct val_cmd_buffer *cmd_buffer)
+lvp_cmd_buffer_free_all_cmds(struct lvp_cmd_buffer *cmd_buffer)
{
- struct val_cmd_buffer_entry *tmp, *cmd;
+ struct lvp_cmd_buffer_entry *tmp, *cmd;
LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &cmd_buffer->cmds, cmd_link) {
list_del(&cmd->cmd_link);
vk_free(&cmd_buffer->pool->alloc, cmd);
}
}
-static VkResult val_reset_cmd_buffer(struct val_cmd_buffer *cmd_buffer)
+static VkResult lvp_reset_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer)
{
- val_cmd_buffer_free_all_cmds(cmd_buffer);
+ lvp_cmd_buffer_free_all_cmds(cmd_buffer);
list_inithead(&cmd_buffer->cmds);
- cmd_buffer->status = VAL_CMD_BUFFER_STATUS_INITIAL;
+ cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
return VK_SUCCESS;
}
-VkResult val_AllocateCommandBuffers(
+VkResult lvp_AllocateCommandBuffers(
VkDevice _device,
const VkCommandBufferAllocateInfo* pAllocateInfo,
VkCommandBuffer* pCommandBuffers)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_cmd_pool, pool, pAllocateInfo->commandPool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_cmd_pool, pool, pAllocateInfo->commandPool);
VkResult result = VK_SUCCESS;
uint32_t i;
@@ -88,17 +88,17 @@ VkResult val_AllocateCommandBuffers(
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
if (!list_is_empty(&pool->free_cmd_buffers)) {
- struct val_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct val_cmd_buffer, pool_link);
+ struct lvp_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct lvp_cmd_buffer, pool_link);
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
- result = val_reset_cmd_buffer(cmd_buffer);
+ result = lvp_reset_cmd_buffer(cmd_buffer);
cmd_buffer->level = pAllocateInfo->level;
- pCommandBuffers[i] = val_cmd_buffer_to_handle(cmd_buffer);
+ pCommandBuffers[i] = lvp_cmd_buffer_to_handle(cmd_buffer);
} else {
- result = val_create_cmd_buffer(device, pool, pAllocateInfo->level,
+ result = lvp_create_cmd_buffer(device, pool, pAllocateInfo->level,
&pCommandBuffers[i]);
if (result != VK_SUCCESS)
break;
@@ -106,7 +106,7 @@ VkResult val_AllocateCommandBuffers(
}
if (result != VK_SUCCESS) {
- val_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
+ lvp_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
i, pCommandBuffers);
memset(pCommandBuffers, 0,
sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
@@ -116,73 +116,73 @@ VkResult val_AllocateCommandBuffers(
}
static void
-val_cmd_buffer_destroy(struct val_cmd_buffer *cmd_buffer)
+lvp_cmd_buffer_destroy(struct lvp_cmd_buffer *cmd_buffer)
{
- val_cmd_buffer_free_all_cmds(cmd_buffer);
+ lvp_cmd_buffer_free_all_cmds(cmd_buffer);
list_del(&cmd_buffer->pool_link);
vk_object_base_finish(&cmd_buffer->base);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
-void val_FreeCommandBuffers(
+void lvp_FreeCommandBuffers(
VkDevice device,
VkCommandPool commandPool,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers)
{
for (uint32_t i = 0; i < commandBufferCount; i++) {
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
if (cmd_buffer) {
if (cmd_buffer->pool) {
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
} else
- val_cmd_buffer_destroy(cmd_buffer);
+ lvp_cmd_buffer_destroy(cmd_buffer);
}
}
}
-VkResult val_ResetCommandBuffer(
+VkResult lvp_ResetCommandBuffer(
VkCommandBuffer commandBuffer,
VkCommandBufferResetFlags flags)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
- return val_reset_cmd_buffer(cmd_buffer);
+ return lvp_reset_cmd_buffer(cmd_buffer);
}
-VkResult val_BeginCommandBuffer(
+VkResult lvp_BeginCommandBuffer(
VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo* pBeginInfo)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
VkResult result;
- if (cmd_buffer->status != VAL_CMD_BUFFER_STATUS_INITIAL) {
- result = val_reset_cmd_buffer(cmd_buffer);
+ if (cmd_buffer->status != LVP_CMD_BUFFER_STATUS_INITIAL) {
+ result = lvp_reset_cmd_buffer(cmd_buffer);
if (result != VK_SUCCESS)
return result;
}
- cmd_buffer->status = VAL_CMD_BUFFER_STATUS_RECORDING;
+ cmd_buffer->status = LVP_CMD_BUFFER_STATUS_RECORDING;
return VK_SUCCESS;
}
-VkResult val_EndCommandBuffer(
+VkResult lvp_EndCommandBuffer(
VkCommandBuffer commandBuffer)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->status = VAL_CMD_BUFFER_STATUS_EXECUTABLE;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ cmd_buffer->status = LVP_CMD_BUFFER_STATUS_EXECUTABLE;
return VK_SUCCESS;
}
-VkResult val_CreateCommandPool(
+VkResult lvp_CreateCommandPool(
VkDevice _device,
const VkCommandPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkCommandPool* pCmdPool)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_cmd_pool *pool;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_cmd_pool *pool;
pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -199,74 +199,74 @@ VkResult val_CreateCommandPool(
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
- *pCmdPool = val_cmd_pool_to_handle(pool);
+ *pCmdPool = lvp_cmd_pool_to_handle(pool);
return VK_SUCCESS;
}
-void val_DestroyCommandPool(
+void lvp_DestroyCommandPool(
VkDevice _device,
VkCommandPool commandPool,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_cmd_pool, pool, commandPool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
if (!pool)
return;
- list_for_each_entry_safe(struct val_cmd_buffer, cmd_buffer,
+ list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
&pool->cmd_buffers, pool_link) {
- val_cmd_buffer_destroy(cmd_buffer);
+ lvp_cmd_buffer_destroy(cmd_buffer);
}
- list_for_each_entry_safe(struct val_cmd_buffer, cmd_buffer,
+ list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
&pool->free_cmd_buffers, pool_link) {
- val_cmd_buffer_destroy(cmd_buffer);
+ lvp_cmd_buffer_destroy(cmd_buffer);
}
vk_object_base_finish(&pool->base);
vk_free2(&device->alloc, pAllocator, pool);
}
-VkResult val_ResetCommandPool(
+VkResult lvp_ResetCommandPool(
VkDevice device,
VkCommandPool commandPool,
VkCommandPoolResetFlags flags)
{
- VAL_FROM_HANDLE(val_cmd_pool, pool, commandPool);
+ LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
VkResult result;
- list_for_each_entry(struct val_cmd_buffer, cmd_buffer,
+ list_for_each_entry(struct lvp_cmd_buffer, cmd_buffer,
&pool->cmd_buffers, pool_link) {
- result = val_reset_cmd_buffer(cmd_buffer);
+ result = lvp_reset_cmd_buffer(cmd_buffer);
if (result != VK_SUCCESS)
return result;
}
return VK_SUCCESS;
}
-void val_TrimCommandPool(
+void lvp_TrimCommandPool(
VkDevice device,
VkCommandPool commandPool,
VkCommandPoolTrimFlags flags)
{
- VAL_FROM_HANDLE(val_cmd_pool, pool, commandPool);
+ LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
if (!pool)
return;
- list_for_each_entry_safe(struct val_cmd_buffer, cmd_buffer,
+ list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
&pool->free_cmd_buffers, pool_link) {
- val_cmd_buffer_destroy(cmd_buffer);
+ lvp_cmd_buffer_destroy(cmd_buffer);
}
}
-static struct val_cmd_buffer_entry *cmd_buf_entry_alloc_size(struct val_cmd_buffer *cmd_buffer,
+static struct lvp_cmd_buffer_entry *cmd_buf_entry_alloc_size(struct lvp_cmd_buffer *cmd_buffer,
uint32_t extra_size,
- enum val_cmds type)
+ enum lvp_cmds type)
{
- struct val_cmd_buffer_entry *cmd;
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = sizeof(*cmd) + extra_size;
cmd = vk_alloc(&cmd_buffer->pool->alloc,
cmd_size,
@@ -278,25 +278,25 @@ static struct val_cmd_buffer_entry *cmd_buf_entry_alloc_size(struct val_cmd_buff
return cmd;
}
-static struct val_cmd_buffer_entry *cmd_buf_entry_alloc(struct val_cmd_buffer *cmd_buffer,
- enum val_cmds type)
+static struct lvp_cmd_buffer_entry *cmd_buf_entry_alloc(struct lvp_cmd_buffer *cmd_buffer,
+ enum lvp_cmds type)
{
return cmd_buf_entry_alloc_size(cmd_buffer, 0, type);
}
-static void cmd_buf_queue(struct val_cmd_buffer *cmd_buffer,
- struct val_cmd_buffer_entry *cmd)
+static void cmd_buf_queue(struct lvp_cmd_buffer *cmd_buffer,
+ struct lvp_cmd_buffer_entry *cmd)
{
list_addtail(&cmd->cmd_link, &cmd_buffer->cmds);
}
static void
-state_setup_attachments(struct val_attachment_state *attachments,
- struct val_render_pass *pass,
+state_setup_attachments(struct lvp_attachment_state *attachments,
+ struct lvp_render_pass *pass,
const VkClearValue *clear_values)
{
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
- struct val_render_pass_attachment *att = &pass->attachments[i];
+ struct lvp_render_pass_attachment *att = &pass->attachments[i];
VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
VkImageAspectFlags clear_aspects = 0;
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
@@ -324,18 +324,18 @@ state_setup_attachments(struct val_attachment_state *attachments,
}
}
-void val_CmdBeginRenderPass(
+void lvp_CmdBeginRenderPass(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
VkSubpassContents contents)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_render_pass, pass, pRenderPassBegin->renderPass);
- VAL_FROM_HANDLE(val_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
- struct val_cmd_buffer_entry *cmd;
- uint32_t cmd_size = pass->attachment_count * sizeof(struct val_attachment_state);
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_render_pass, pass, pRenderPassBegin->renderPass);
+ LVP_FROM_HANDLE(lvp_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
+ struct lvp_cmd_buffer_entry *cmd;
+ uint32_t cmd_size = pass->attachment_count * sizeof(struct lvp_attachment_state);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BEGIN_RENDER_PASS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BEGIN_RENDER_PASS);
if (!cmd)
return;
@@ -343,20 +343,20 @@ void val_CmdBeginRenderPass(
cmd->u.begin_render_pass.framebuffer = framebuffer;
cmd->u.begin_render_pass.render_area = pRenderPassBegin->renderArea;
- cmd->u.begin_render_pass.attachments = (struct val_attachment_state *)(cmd + 1);
+ cmd->u.begin_render_pass.attachments = (struct lvp_attachment_state *)(cmd + 1);
state_setup_attachments(cmd->u.begin_render_pass.attachments, pass, pRenderPassBegin->pClearValues);
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdNextSubpass(
+void lvp_CmdNextSubpass(
VkCommandBuffer commandBuffer,
VkSubpassContents contents)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_NEXT_SUBPASS);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_NEXT_SUBPASS);
if (!cmd)
return;
@@ -365,31 +365,31 @@ void val_CmdNextSubpass(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBindVertexBuffers(
+void lvp_CmdBindVertexBuffers(
VkCommandBuffer commandBuffer,
uint32_t firstBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
const VkDeviceSize* pOffsets)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
- struct val_buffer **buffers;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
+ struct lvp_buffer **buffers;
VkDeviceSize *offsets;
int i;
- uint32_t cmd_size = bindingCount * sizeof(struct val_buffer *) + bindingCount * sizeof(VkDeviceSize);
+ uint32_t cmd_size = bindingCount * sizeof(struct lvp_buffer *) + bindingCount * sizeof(VkDeviceSize);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BIND_VERTEX_BUFFERS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_VERTEX_BUFFERS);
if (!cmd)
return;
cmd->u.vertex_buffers.first = firstBinding;
cmd->u.vertex_buffers.binding_count = bindingCount;
- buffers = (struct val_buffer **)(cmd + 1);
+ buffers = (struct lvp_buffer **)(cmd + 1);
offsets = (VkDeviceSize *)(buffers + bindingCount);
for (i = 0; i < bindingCount; i++) {
- buffers[i] = val_buffer_from_handle(pBuffers[i]);
+ buffers[i] = lvp_buffer_from_handle(pBuffers[i]);
offsets[i] = pOffsets[i];
}
cmd->u.vertex_buffers.buffers = buffers;
@@ -398,16 +398,16 @@ void val_CmdBindVertexBuffers(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBindPipeline(
+void lvp_CmdBindPipeline(
VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_pipeline, pipeline, _pipeline);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_BIND_PIPELINE);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BIND_PIPELINE);
if (!cmd)
return;
@@ -417,7 +417,7 @@ void val_CmdBindPipeline(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBindDescriptorSets(
+void lvp_CmdBindDescriptorSets(
VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout _layout,
@@ -427,15 +427,15 @@ void val_CmdBindDescriptorSets(
uint32_t dynamicOffsetCount,
const uint32_t* pDynamicOffsets)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_pipeline_layout, layout, _layout);
- struct val_cmd_buffer_entry *cmd;
- struct val_descriptor_set **sets;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
+ struct lvp_cmd_buffer_entry *cmd;
+ struct lvp_descriptor_set **sets;
uint32_t *offsets;
int i;
- uint32_t cmd_size = descriptorSetCount * sizeof(struct val_descriptor_set *) + dynamicOffsetCount * sizeof(uint32_t);
+ uint32_t cmd_size = descriptorSetCount * sizeof(struct lvp_descriptor_set *) + dynamicOffsetCount * sizeof(uint32_t);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BIND_DESCRIPTOR_SETS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_DESCRIPTOR_SETS);
if (!cmd)
return;
@@ -444,9 +444,9 @@ void val_CmdBindDescriptorSets(
cmd->u.descriptor_sets.first = firstSet;
cmd->u.descriptor_sets.count = descriptorSetCount;
- sets = (struct val_descriptor_set **)(cmd + 1);
+ sets = (struct lvp_descriptor_set **)(cmd + 1);
for (i = 0; i < descriptorSetCount; i++) {
- sets[i] = val_descriptor_set_from_handle(pDescriptorSets[i]);
+ sets[i] = lvp_descriptor_set_from_handle(pDescriptorSets[i]);
}
cmd->u.descriptor_sets.sets = sets;
@@ -459,17 +459,17 @@ void val_CmdBindDescriptorSets(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdDraw(
+void lvp_CmdDraw(
VkCommandBuffer commandBuffer,
uint32_t vertexCount,
uint32_t instanceCount,
uint32_t firstVertex,
uint32_t firstInstance)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW);
if (!cmd)
return;
@@ -481,30 +481,30 @@ void val_CmdDraw(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdEndRenderPass(
+void lvp_CmdEndRenderPass(
VkCommandBuffer commandBuffer)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_END_RENDER_PASS);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_RENDER_PASS);
if (!cmd)
return;
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetViewport(
+void lvp_CmdSetViewport(
VkCommandBuffer commandBuffer,
uint32_t firstViewport,
uint32_t viewportCount,
const VkViewport* pViewports)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
int i;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_VIEWPORT);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_VIEWPORT);
if (!cmd)
return;
@@ -516,17 +516,17 @@ void val_CmdSetViewport(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetScissor(
+void lvp_CmdSetScissor(
VkCommandBuffer commandBuffer,
uint32_t firstScissor,
uint32_t scissorCount,
const VkRect2D* pScissors)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
int i;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_SCISSOR);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_SCISSOR);
if (!cmd)
return;
@@ -538,14 +538,14 @@ void val_CmdSetScissor(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetLineWidth(
+void lvp_CmdSetLineWidth(
VkCommandBuffer commandBuffer,
float lineWidth)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_LINE_WIDTH);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_LINE_WIDTH);
if (!cmd)
return;
@@ -554,16 +554,16 @@ void val_CmdSetLineWidth(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetDepthBias(
+void lvp_CmdSetDepthBias(
VkCommandBuffer commandBuffer,
float depthBiasConstantFactor,
float depthBiasClamp,
float depthBiasSlopeFactor)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_DEPTH_BIAS);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BIAS);
if (!cmd)
return;
@@ -574,14 +574,14 @@ void val_CmdSetDepthBias(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetBlendConstants(
+void lvp_CmdSetBlendConstants(
VkCommandBuffer commandBuffer,
const float blendConstants[4])
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_BLEND_CONSTANTS);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_BLEND_CONSTANTS);
if (!cmd)
return;
@@ -590,15 +590,15 @@ void val_CmdSetBlendConstants(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetDepthBounds(
+void lvp_CmdSetDepthBounds(
VkCommandBuffer commandBuffer,
float minDepthBounds,
float maxDepthBounds)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_DEPTH_BOUNDS);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BOUNDS);
if (!cmd)
return;
@@ -608,15 +608,15 @@ void val_CmdSetDepthBounds(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetStencilCompareMask(
+void lvp_CmdSetStencilCompareMask(
VkCommandBuffer commandBuffer,
VkStencilFaceFlags faceMask,
uint32_t compareMask)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_STENCIL_COMPARE_MASK);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_COMPARE_MASK);
if (!cmd)
return;
@@ -626,15 +626,15 @@ void val_CmdSetStencilCompareMask(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetStencilWriteMask(
+void lvp_CmdSetStencilWriteMask(
VkCommandBuffer commandBuffer,
VkStencilFaceFlags faceMask,
uint32_t writeMask)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_STENCIL_WRITE_MASK);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_WRITE_MASK);
if (!cmd)
return;
@@ -645,15 +645,15 @@ void val_CmdSetStencilWriteMask(
}
-void val_CmdSetStencilReference(
+void lvp_CmdSetStencilReference(
VkCommandBuffer commandBuffer,
VkStencilFaceFlags faceMask,
uint32_t reference)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_STENCIL_REFERENCE);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_REFERENCE);
if (!cmd)
return;
@@ -663,7 +663,7 @@ void val_CmdSetStencilReference(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdPushConstants(
+void lvp_CmdPushConstants(
VkCommandBuffer commandBuffer,
VkPipelineLayout layout,
VkShaderStageFlags stageFlags,
@@ -671,10 +671,10 @@ void val_CmdPushConstants(
uint32_t size,
const void* pValues)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, (size - 4), VAL_CMD_PUSH_CONSTANTS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, (size - 4), LVP_CMD_PUSH_CONSTANTS);
if (!cmd)
return;
@@ -686,17 +686,17 @@ void val_CmdPushConstants(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBindIndexBuffer(
+void lvp_CmdBindIndexBuffer(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
VkIndexType indexType)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_BIND_INDEX_BUFFER);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BIND_INDEX_BUFFER);
if (!cmd)
return;
@@ -707,7 +707,7 @@ void val_CmdBindIndexBuffer(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdDrawIndexed(
+void lvp_CmdDrawIndexed(
VkCommandBuffer commandBuffer,
uint32_t indexCount,
uint32_t instanceCount,
@@ -715,10 +715,10 @@ void val_CmdDrawIndexed(
int32_t vertexOffset,
uint32_t firstInstance)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW_INDEXED);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDEXED);
if (!cmd)
return;
@@ -731,18 +731,18 @@ void val_CmdDrawIndexed(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdDrawIndirect(
+void lvp_CmdDrawIndirect(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t drawCount,
uint32_t stride)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, buf, _buffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, buf, _buffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW_INDIRECT);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDIRECT);
if (!cmd)
return;
@@ -754,18 +754,18 @@ void val_CmdDrawIndirect(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdDrawIndexedIndirect(
+void lvp_CmdDrawIndexedIndirect(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t drawCount,
uint32_t stride)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, buf, _buffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, buf, _buffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DRAW_INDEXED_INDIRECT);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDEXED_INDIRECT);
if (!cmd)
return;
@@ -777,16 +777,16 @@ void val_CmdDrawIndexedIndirect(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdDispatch(
+void lvp_CmdDispatch(
VkCommandBuffer commandBuffer,
uint32_t x,
uint32_t y,
uint32_t z)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DISPATCH);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH);
if (!cmd)
return;
@@ -797,53 +797,53 @@ void val_CmdDispatch(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdDispatchIndirect(
+void lvp_CmdDispatchIndirect(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_DISPATCH_INDIRECT);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH_INDIRECT);
if (!cmd)
return;
- cmd->u.dispatch_indirect.buffer = val_buffer_from_handle(_buffer);
+ cmd->u.dispatch_indirect.buffer = lvp_buffer_from_handle(_buffer);
cmd->u.dispatch_indirect.offset = offset;
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdExecuteCommands(
+void lvp_CmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
const VkCommandBuffer* pCmdBuffers)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
- uint32_t cmd_size = commandBufferCount * sizeof(struct val_cmd_buffer *);
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
+ uint32_t cmd_size = commandBufferCount * sizeof(struct lvp_cmd_buffer *);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_EXECUTE_COMMANDS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_EXECUTE_COMMANDS);
if (!cmd)
return;
cmd->u.execute_commands.command_buffer_count = commandBufferCount;
for (unsigned i = 0; i < commandBufferCount; i++)
- cmd->u.execute_commands.cmd_buffers[i] = val_cmd_buffer_from_handle(pCmdBuffers[i]);
+ cmd->u.execute_commands.cmd_buffers[i] = lvp_cmd_buffer_from_handle(pCmdBuffers[i]);
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdSetEvent(VkCommandBuffer commandBuffer,
+void lvp_CmdSetEvent(VkCommandBuffer commandBuffer,
VkEvent _event,
VkPipelineStageFlags stageMask)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_event, event, _event);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_event, event, _event);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_EVENT);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_EVENT);
if (!cmd)
return;
@@ -854,15 +854,15 @@ void val_CmdSetEvent(VkCommandBuffer commandBuffer,
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdResetEvent(VkCommandBuffer commandBuffer,
+void lvp_CmdResetEvent(VkCommandBuffer commandBuffer,
VkEvent _event,
VkPipelineStageFlags stageMask)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_event, event, _event);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_event, event, _event);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_SET_EVENT);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_EVENT);
if (!cmd)
return;
@@ -874,7 +874,7 @@ void val_CmdResetEvent(VkCommandBuffer commandBuffer,
}
-void val_CmdWaitEvents(VkCommandBuffer commandBuffer,
+void lvp_CmdWaitEvents(VkCommandBuffer commandBuffer,
uint32_t eventCount,
const VkEvent* pEvents,
VkPipelineStageFlags srcStageMask,
@@ -886,25 +886,25 @@ void val_CmdWaitEvents(VkCommandBuffer commandBuffer,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = 0;
- cmd_size += eventCount * sizeof(struct val_event *);
+ cmd_size += eventCount * sizeof(struct lvp_event *);
cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_WAIT_EVENTS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_WAIT_EVENTS);
if (!cmd)
return;
cmd->u.wait_events.src_stage_mask = srcStageMask;
cmd->u.wait_events.dst_stage_mask = dstStageMask;
cmd->u.wait_events.event_count = eventCount;
- cmd->u.wait_events.events = (struct val_event **)(cmd + 1);
+ cmd->u.wait_events.events = (struct lvp_event **)(cmd + 1);
for (unsigned i = 0; i < eventCount; i++)
- cmd->u.wait_events.events[i] = val_event_from_handle(pEvents[i]);
+ cmd->u.wait_events.events[i] = lvp_event_from_handle(pEvents[i]);
cmd->u.wait_events.memory_barrier_count = memoryBarrierCount;
cmd->u.wait_events.buffer_memory_barrier_count = bufferMemoryBarrierCount;
cmd->u.wait_events.image_memory_barrier_count = imageMemoryBarrierCount;
@@ -914,7 +914,7 @@ void val_CmdWaitEvents(VkCommandBuffer commandBuffer,
}
-void val_CmdCopyBufferToImage(
+void lvp_CmdCopyBufferToImage(
VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkImage destImage,
@@ -922,13 +922,13 @@ void val_CmdCopyBufferToImage(
uint32_t regionCount,
const VkBufferImageCopy* pRegions)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, src_buffer, srcBuffer);
- VAL_FROM_HANDLE(val_image, dst_image, destImage);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, src_buffer, srcBuffer);
+ LVP_FROM_HANDLE(lvp_image, dst_image, destImage);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = regionCount * sizeof(VkBufferImageCopy);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_BUFFER_TO_IMAGE);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_BUFFER_TO_IMAGE);
if (!cmd)
return;
@@ -948,7 +948,7 @@ void val_CmdCopyBufferToImage(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdCopyImageToBuffer(
+void lvp_CmdCopyImageToBuffer(
VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
@@ -956,13 +956,13 @@ void val_CmdCopyImageToBuffer(
uint32_t regionCount,
const VkBufferImageCopy* pRegions)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_image, src_image, srcImage);
- VAL_FROM_HANDLE(val_buffer, dst_buffer, destBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
+ LVP_FROM_HANDLE(lvp_buffer, dst_buffer, destBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = regionCount * sizeof(VkBufferImageCopy);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_IMAGE_TO_BUFFER);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_IMAGE_TO_BUFFER);
if (!cmd)
return;
@@ -982,7 +982,7 @@ void val_CmdCopyImageToBuffer(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdCopyImage(
+void lvp_CmdCopyImage(
VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
@@ -991,13 +991,13 @@ void val_CmdCopyImage(
uint32_t regionCount,
const VkImageCopy* pRegions)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_image, src_image, srcImage);
- VAL_FROM_HANDLE(val_image, dest_image, destImage);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
+ LVP_FROM_HANDLE(lvp_image, dest_image, destImage);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = regionCount * sizeof(VkImageCopy);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_IMAGE);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_IMAGE);
if (!cmd)
return;
@@ -1019,20 +1019,20 @@ void val_CmdCopyImage(
}
-void val_CmdCopyBuffer(
+void lvp_CmdCopyBuffer(
VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkBuffer destBuffer,
uint32_t regionCount,
const VkBufferCopy* pRegions)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, src_buffer, srcBuffer);
- VAL_FROM_HANDLE(val_buffer, dest_buffer, destBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, src_buffer, srcBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, dest_buffer, destBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = regionCount * sizeof(VkBufferCopy);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_COPY_BUFFER);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_BUFFER);
if (!cmd)
return;
@@ -1051,7 +1051,7 @@ void val_CmdCopyBuffer(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBlitImage(
+void lvp_CmdBlitImage(
VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
@@ -1061,13 +1061,13 @@ void val_CmdBlitImage(
const VkImageBlit* pRegions,
VkFilter filter)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_image, src_image, srcImage);
- VAL_FROM_HANDLE(val_image, dest_image, destImage);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
+ LVP_FROM_HANDLE(lvp_image, dest_image, destImage);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = regionCount * sizeof(VkImageBlit);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_BLIT_IMAGE);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BLIT_IMAGE);
if (!cmd)
return;
@@ -1089,18 +1089,18 @@ void val_CmdBlitImage(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdClearAttachments(
+void lvp_CmdClearAttachments(
VkCommandBuffer commandBuffer,
uint32_t attachmentCount,
const VkClearAttachment* pAttachments,
uint32_t rectCount,
const VkClearRect* pRects)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = attachmentCount * sizeof(VkClearAttachment) + rectCount * sizeof(VkClearRect);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_CLEAR_ATTACHMENTS);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_ATTACHMENTS);
if (!cmd)
return;
@@ -1116,18 +1116,18 @@ void val_CmdClearAttachments(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdFillBuffer(
+void lvp_CmdFillBuffer(
VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize fillSize,
uint32_t data)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, dst_buffer, dstBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, dst_buffer, dstBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_FILL_BUFFER);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_FILL_BUFFER);
if (!cmd)
return;
@@ -1139,18 +1139,18 @@ void val_CmdFillBuffer(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdUpdateBuffer(
+void lvp_CmdUpdateBuffer(
VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize dataSize,
const void* pData)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_buffer, dst_buffer, dstBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_buffer, dst_buffer, dstBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, dataSize, VAL_CMD_UPDATE_BUFFER);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, dataSize, LVP_CMD_UPDATE_BUFFER);
if (!cmd)
return;
@@ -1162,7 +1162,7 @@ void val_CmdUpdateBuffer(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdClearColorImage(
+void lvp_CmdClearColorImage(
VkCommandBuffer commandBuffer,
VkImage image_h,
VkImageLayout imageLayout,
@@ -1170,12 +1170,12 @@ void val_CmdClearColorImage(
uint32_t rangeCount,
const VkImageSubresourceRange* pRanges)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_image, image, image_h);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_image, image, image_h);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_CLEAR_COLOR_IMAGE);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_COLOR_IMAGE);
if (!cmd)
return;
@@ -1190,7 +1190,7 @@ void val_CmdClearColorImage(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdClearDepthStencilImage(
+void lvp_CmdClearDepthStencilImage(
VkCommandBuffer commandBuffer,
VkImage image_h,
VkImageLayout imageLayout,
@@ -1198,12 +1198,12 @@ void val_CmdClearDepthStencilImage(
uint32_t rangeCount,
const VkImageSubresourceRange* pRanges)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_image, image, image_h);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_image, image, image_h);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_DEPTH_STENCIL_IMAGE);
if (!cmd)
return;
@@ -1219,7 +1219,7 @@ void val_CmdClearDepthStencilImage(
}
-void val_CmdResolveImage(
+void lvp_CmdResolveImage(
VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
@@ -1228,13 +1228,13 @@ void val_CmdResolveImage(
uint32_t regionCount,
const VkImageResolve* regions)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_image, src_image, srcImage);
- VAL_FROM_HANDLE(val_image, dst_image, destImage);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
+ LVP_FROM_HANDLE(lvp_image, dst_image, destImage);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = regionCount * sizeof(VkImageResolve);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_RESOLVE_IMAGE);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_RESOLVE_IMAGE);
if (!cmd)
return;
@@ -1250,17 +1250,17 @@ void val_CmdResolveImage(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdResetQueryPool(
+void lvp_CmdResetQueryPool(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t firstQuery,
uint32_t queryCount)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_RESET_QUERY_POOL);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_RESET_QUERY_POOL);
if (!cmd)
return;
@@ -1271,18 +1271,18 @@ void val_CmdResetQueryPool(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBeginQueryIndexedEXT(
+void lvp_CmdBeginQueryIndexedEXT(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query,
VkQueryControlFlags flags,
uint32_t index)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_BEGIN_QUERY);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BEGIN_QUERY);
if (!cmd)
return;
@@ -1294,26 +1294,26 @@ void val_CmdBeginQueryIndexedEXT(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdBeginQuery(
+void lvp_CmdBeginQuery(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query,
VkQueryControlFlags flags)
{
- val_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
+ lvp_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
}
-void val_CmdEndQueryIndexedEXT(
+void lvp_CmdEndQueryIndexedEXT(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query,
uint32_t index)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_END_QUERY);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_QUERY);
if (!cmd)
return;
@@ -1324,25 +1324,25 @@ void val_CmdEndQueryIndexedEXT(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdEndQuery(
+void lvp_CmdEndQuery(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t query)
{
- val_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
+ lvp_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
}
-void val_CmdWriteTimestamp(
+void lvp_CmdWriteTimestamp(
VkCommandBuffer commandBuffer,
VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool,
uint32_t query)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_WRITE_TIMESTAMP);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_WRITE_TIMESTAMP);
if (!cmd)
return;
@@ -1353,7 +1353,7 @@ void val_CmdWriteTimestamp(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdCopyQueryPoolResults(
+void lvp_CmdCopyQueryPoolResults(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
uint32_t firstQuery,
@@ -1363,12 +1363,12 @@ void val_CmdCopyQueryPoolResults(
VkDeviceSize stride,
VkQueryResultFlags flags)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- VAL_FROM_HANDLE(val_query_pool, query_pool, queryPool);
- VAL_FROM_HANDLE(val_buffer, buffer, dstBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, dstBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
- cmd = cmd_buf_entry_alloc(cmd_buffer, VAL_CMD_COPY_QUERY_POOL_RESULTS);
+ cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_COPY_QUERY_POOL_RESULTS);
if (!cmd)
return;
@@ -1383,7 +1383,7 @@ void val_CmdCopyQueryPoolResults(
cmd_buf_queue(cmd_buffer, cmd);
}
-void val_CmdPipelineBarrier(
+void lvp_CmdPipelineBarrier(
VkCommandBuffer commandBuffer,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags destStageMask,
@@ -1395,15 +1395,15 @@ void val_CmdPipelineBarrier(
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
- VAL_FROM_HANDLE(val_cmd_buffer, cmd_buffer, commandBuffer);
- struct val_cmd_buffer_entry *cmd;
+ LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
+ struct lvp_cmd_buffer_entry *cmd;
uint32_t cmd_size = 0;
cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
- cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, VAL_CMD_PIPELINE_BARRIER);
+ cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_PIPELINE_BARRIER);
if (!cmd)
return;
diff --git a/src/gallium/frontends/vallium/val_conv.h b/src/gallium/frontends/lavapipe/lvp_conv.h
index 5efc0784972..5efc0784972 100644
--- a/src/gallium/frontends/vallium/val_conv.h
+++ b/src/gallium/frontends/lavapipe/lvp_conv.h
diff --git a/src/gallium/frontends/vallium/val_descriptor_set.c b/src/gallium/frontends/lavapipe/lvp_descriptor_set.c
index 82c112929f9..58eeac335d2 100644
--- a/src/gallium/frontends/vallium/val_descriptor_set.c
+++ b/src/gallium/frontends/lavapipe/lvp_descriptor_set.c
@@ -21,18 +21,18 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "vk_util.h"
#include "u_math.h"
-VkResult val_CreateDescriptorSetLayout(
+VkResult lvp_CreateDescriptorSetLayout(
VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorSetLayout* pSetLayout)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_descriptor_set_layout *set_layout;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
uint32_t max_binding = 0;
@@ -43,9 +43,9 @@ VkResult val_CreateDescriptorSetLayout(
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
}
- size_t size = sizeof(struct val_descriptor_set_layout) +
+ size_t size = sizeof(struct lvp_descriptor_set_layout) +
(max_binding + 1) * sizeof(set_layout->binding[0]) +
- immutable_sampler_count * sizeof(struct val_sampler *);
+ immutable_sampler_count * sizeof(struct lvp_sampler *);
set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -55,8 +55,8 @@ VkResult val_CreateDescriptorSetLayout(
vk_object_base_init(&device->vk, &set_layout->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
/* We just allocate all the samplers at the end of the struct */
- struct val_sampler **samplers =
- (struct val_sampler **)&set_layout->binding[max_binding + 1];
+ struct lvp_sampler **samplers =
+ (struct lvp_sampler **)&set_layout->binding[max_binding + 1];
set_layout->binding_count = max_binding + 1;
set_layout->shader_stages = 0;
@@ -90,7 +90,7 @@ VkResult val_CreateDescriptorSetLayout(
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- val_foreach_stage(s, binding->stageFlags) {
+ lvp_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].sampler_index = set_layout->stage[s].sampler_count;
set_layout->stage[s].sampler_count += binding->descriptorCount;
}
@@ -102,14 +102,14 @@ VkResult val_CreateDescriptorSetLayout(
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- val_foreach_stage(s, binding->stageFlags) {
+ lvp_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].const_buffer_index = set_layout->stage[s].const_buffer_count;
set_layout->stage[s].const_buffer_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- val_foreach_stage(s, binding->stageFlags) {
+ lvp_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].shader_buffer_index = set_layout->stage[s].shader_buffer_count;
set_layout->stage[s].shader_buffer_count += binding->descriptorCount;
}
@@ -118,7 +118,7 @@ VkResult val_CreateDescriptorSetLayout(
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- val_foreach_stage(s, binding->stageFlags) {
+ lvp_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].image_index = set_layout->stage[s].image_count;
set_layout->stage[s].image_count += binding->descriptorCount;
}
@@ -126,7 +126,7 @@ VkResult val_CreateDescriptorSetLayout(
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- val_foreach_stage(s, binding->stageFlags) {
+ lvp_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].sampler_view_index = set_layout->stage[s].sampler_view_count;
set_layout->stage[s].sampler_view_count += binding->descriptorCount;
}
@@ -141,7 +141,7 @@ VkResult val_CreateDescriptorSetLayout(
for (uint32_t i = 0; i < binding->descriptorCount; i++)
set_layout->binding[b].immutable_samplers[i] =
- val_sampler_from_handle(binding->pImmutableSamplers[i]);
+ lvp_sampler_from_handle(binding->pImmutableSamplers[i]);
} else {
set_layout->binding[b].immutable_samplers = NULL;
}
@@ -151,18 +151,18 @@ VkResult val_CreateDescriptorSetLayout(
set_layout->dynamic_offset_count = dynamic_offset_count;
- *pSetLayout = val_descriptor_set_layout_to_handle(set_layout);
+ *pSetLayout = lvp_descriptor_set_layout_to_handle(set_layout);
return VK_SUCCESS;
}
-void val_DestroyDescriptorSetLayout(
+void lvp_DestroyDescriptorSetLayout(
VkDevice _device,
VkDescriptorSetLayout _set_layout,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout, _set_layout);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout, _set_layout);
if (!_set_layout)
return;
@@ -170,14 +170,14 @@ void val_DestroyDescriptorSetLayout(
vk_free2(&device->alloc, pAllocator, set_layout);
}
-VkResult val_CreatePipelineLayout(
+VkResult lvp_CreatePipelineLayout(
VkDevice _device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_pipeline_layout *layout;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_pipeline_layout *layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
@@ -191,7 +191,7 @@ VkResult val_CreatePipelineLayout(
layout->num_sets = pCreateInfo->setLayoutCount;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
- VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout,
+ LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->set[set].layout = set_layout;
}
@@ -203,18 +203,18 @@ VkResult val_CreatePipelineLayout(
range->offset + range->size);
}
layout->push_constant_size = align(layout->push_constant_size, 16);
- *pPipelineLayout = val_pipeline_layout_to_handle(layout);
+ *pPipelineLayout = lvp_pipeline_layout_to_handle(layout);
return VK_SUCCESS;
}
-void val_DestroyPipelineLayout(
+void lvp_DestroyPipelineLayout(
VkDevice _device,
VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_pipeline_layout, pipeline_layout, _pipelineLayout);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!_pipelineLayout)
return;
@@ -223,11 +223,11 @@ void val_DestroyPipelineLayout(
}
VkResult
-val_descriptor_set_create(struct val_device *device,
- const struct val_descriptor_set_layout *layout,
- struct val_descriptor_set **out_set)
+lvp_descriptor_set_create(struct lvp_device *device,
+ const struct lvp_descriptor_set_layout *layout,
+ struct lvp_descriptor_set **out_set)
{
- struct val_descriptor_set *set;
+ struct lvp_descriptor_set *set;
size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
set = vk_alloc(&device->alloc /* XXX: Use the pool */, size, 8,
@@ -245,7 +245,7 @@ val_descriptor_set_create(struct val_device *device,
set->layout = layout;
/* Go through and fill out immutable samplers if we have any */
- struct val_descriptor *desc = set->descriptors;
+ struct lvp_descriptor *desc = set->descriptors;
for (uint32_t b = 0; b < layout->binding_count; b++) {
if (layout->binding[b].immutable_samplers) {
for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
@@ -260,62 +260,62 @@ val_descriptor_set_create(struct val_device *device,
}
void
-val_descriptor_set_destroy(struct val_device *device,
- struct val_descriptor_set *set)
+lvp_descriptor_set_destroy(struct lvp_device *device,
+ struct lvp_descriptor_set *set)
{
vk_object_base_finish(&set->base);
vk_free(&device->alloc, set);
}
-VkResult val_AllocateDescriptorSets(
+VkResult lvp_AllocateDescriptorSets(
VkDevice _device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_descriptor_pool, pool, pAllocateInfo->descriptorPool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
- struct val_descriptor_set *set;
+ struct lvp_descriptor_set *set;
uint32_t i;
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
- VAL_FROM_HANDLE(val_descriptor_set_layout, layout,
+ LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
- result = val_descriptor_set_create(device, layout, &set);
+ result = lvp_descriptor_set_create(device, layout, &set);
if (result != VK_SUCCESS)
break;
list_addtail(&set->link, &pool->sets);
- pDescriptorSets[i] = val_descriptor_set_to_handle(set);
+ pDescriptorSets[i] = lvp_descriptor_set_to_handle(set);
}
if (result != VK_SUCCESS)
- val_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
+ lvp_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
i, pDescriptorSets);
return result;
}
-VkResult val_FreeDescriptorSets(
+VkResult lvp_FreeDescriptorSets(
VkDevice _device,
VkDescriptorPool descriptorPool,
uint32_t count,
const VkDescriptorSet* pDescriptorSets)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
for (uint32_t i = 0; i < count; i++) {
- VAL_FROM_HANDLE(val_descriptor_set, set, pDescriptorSets[i]);
+ LVP_FROM_HANDLE(lvp_descriptor_set, set, pDescriptorSets[i]);
if (!set)
continue;
list_del(&set->link);
- val_descriptor_set_destroy(device, set);
+ lvp_descriptor_set_destroy(device, set);
}
return VK_SUCCESS;
}
-void val_UpdateDescriptorSets(
+void lvp_UpdateDescriptorSets(
VkDevice _device,
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet* pDescriptorWrites,
@@ -324,20 +324,20 @@ void val_UpdateDescriptorSets(
{
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
- VAL_FROM_HANDLE(val_descriptor_set, set, write->dstSet);
- const struct val_descriptor_set_binding_layout *bind_layout =
+ LVP_FROM_HANDLE(lvp_descriptor_set, set, write->dstSet);
+ const struct lvp_descriptor_set_binding_layout *bind_layout =
&set->layout->binding[write->dstBinding];
- struct val_descriptor *desc =
+ struct lvp_descriptor *desc =
&set->descriptors[bind_layout->descriptor_index];
desc += write->dstArrayElement;
switch (write->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- VAL_FROM_HANDLE(val_sampler, sampler,
+ LVP_FROM_HANDLE(lvp_sampler, sampler,
write->pImageInfo[j].sampler);
- desc[j] = (struct val_descriptor) {
+ desc[j] = (struct lvp_descriptor) {
.type = VK_DESCRIPTOR_TYPE_SAMPLER,
.sampler = sampler,
};
@@ -346,9 +346,9 @@ void val_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- VAL_FROM_HANDLE(val_image_view, iview,
+ LVP_FROM_HANDLE(lvp_image_view, iview,
write->pImageInfo[j].imageView);
- VAL_FROM_HANDLE(val_sampler, sampler,
+ LVP_FROM_HANDLE(lvp_sampler, sampler,
write->pImageInfo[j].sampler);
desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
@@ -366,10 +366,10 @@ void val_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- VAL_FROM_HANDLE(val_image_view, iview,
+ LVP_FROM_HANDLE(lvp_image_view, iview,
write->pImageInfo[j].imageView);
- desc[j] = (struct val_descriptor) {
+ desc[j] = (struct lvp_descriptor) {
.type = write->descriptorType,
.image_view = iview,
};
@@ -379,10 +379,10 @@ void val_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- VAL_FROM_HANDLE(val_buffer_view, bview,
+ LVP_FROM_HANDLE(lvp_buffer_view, bview,
write->pTexelBufferView[j]);
- desc[j] = (struct val_descriptor) {
+ desc[j] = (struct lvp_descriptor) {
.type = write->descriptorType,
.buffer_view = bview,
};
@@ -395,9 +395,9 @@ void val_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
assert(write->pBufferInfo[j].buffer);
- VAL_FROM_HANDLE(val_buffer, buffer, write->pBufferInfo[j].buffer);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
assert(buffer);
- desc[j] = (struct val_descriptor) {
+ desc[j] = (struct lvp_descriptor) {
.type = write->descriptorType,
.buf.offset = write->pBufferInfo[j].offset,
.buf.buffer = buffer,
@@ -413,18 +413,18 @@ void val_UpdateDescriptorSets(
for (uint32_t i = 0; i < descriptorCopyCount; i++) {
const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
- VAL_FROM_HANDLE(val_descriptor_set, src, copy->srcSet);
- VAL_FROM_HANDLE(val_descriptor_set, dst, copy->dstSet);
+ LVP_FROM_HANDLE(lvp_descriptor_set, src, copy->srcSet);
+ LVP_FROM_HANDLE(lvp_descriptor_set, dst, copy->dstSet);
- const struct val_descriptor_set_binding_layout *src_layout =
+ const struct lvp_descriptor_set_binding_layout *src_layout =
&src->layout->binding[copy->srcBinding];
- struct val_descriptor *src_desc =
+ struct lvp_descriptor *src_desc =
&src->descriptors[src_layout->descriptor_index];
src_desc += copy->srcArrayElement;
- const struct val_descriptor_set_binding_layout *dst_layout =
+ const struct lvp_descriptor_set_binding_layout *dst_layout =
&dst->layout->binding[copy->dstBinding];
- struct val_descriptor *dst_desc =
+ struct lvp_descriptor *dst_desc =
&dst->descriptors[dst_layout->descriptor_index];
dst_desc += copy->dstArrayElement;
@@ -433,15 +433,15 @@ void val_UpdateDescriptorSets(
}
}
-VkResult val_CreateDescriptorPool(
+VkResult lvp_CreateDescriptorPool(
VkDevice _device,
const VkDescriptorPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorPool* pDescriptorPool)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_descriptor_pool *pool;
- size_t size = sizeof(struct val_descriptor_pool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_descriptor_pool *pool;
+ size_t size = sizeof(struct lvp_descriptor_pool);
pool = vk_zalloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
@@ -451,49 +451,49 @@ VkResult val_CreateDescriptorPool(
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
pool->flags = pCreateInfo->flags;
list_inithead(&pool->sets);
- *pDescriptorPool = val_descriptor_pool_to_handle(pool);
+ *pDescriptorPool = lvp_descriptor_pool_to_handle(pool);
return VK_SUCCESS;
}
-static void val_reset_descriptor_pool(struct val_device *device,
- struct val_descriptor_pool *pool)
+static void lvp_reset_descriptor_pool(struct lvp_device *device,
+ struct lvp_descriptor_pool *pool)
{
- struct val_descriptor_set *set, *tmp;
+ struct lvp_descriptor_set *set, *tmp;
LIST_FOR_EACH_ENTRY_SAFE(set, tmp, &pool->sets, link) {
list_del(&set->link);
vk_free(&device->alloc, set);
}
}
-void val_DestroyDescriptorPool(
+void lvp_DestroyDescriptorPool(
VkDevice _device,
VkDescriptorPool _pool,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
if (!_pool)
return;
- val_reset_descriptor_pool(device, pool);
+ lvp_reset_descriptor_pool(device, pool);
vk_object_base_finish(&pool->base);
vk_free2(&device->alloc, pAllocator, pool);
}
-VkResult val_ResetDescriptorPool(
+VkResult lvp_ResetDescriptorPool(
VkDevice _device,
VkDescriptorPool _pool,
VkDescriptorPoolResetFlags flags)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
- val_reset_descriptor_pool(device, pool);
+ lvp_reset_descriptor_pool(device, pool);
return VK_SUCCESS;
}
-void val_GetDescriptorSetLayoutSupport(VkDevice device,
+void lvp_GetDescriptorSetLayoutSupport(VkDevice device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
VkDescriptorSetLayoutSupport* pSupport)
{
diff --git a/src/gallium/frontends/vallium/val_device.c b/src/gallium/frontends/lavapipe/lvp_device.c
index b04a36980fc..86dcb1f1b1b 100644
--- a/src/gallium/frontends/vallium/val_device.c
+++ b/src/gallium/frontends/lavapipe/lvp_device.c
@@ -21,7 +21,7 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "pipe-loader/pipe_loader.h"
#include "git_sha1.h"
@@ -38,8 +38,8 @@
#include "util/timespec.h"
static VkResult
-val_physical_device_init(struct val_physical_device *device,
- struct val_instance *instance,
+lvp_physical_device_init(struct lvp_physical_device *device,
+ struct lvp_instance *instance,
struct pipe_loader_device *pld)
{
VkResult result;
@@ -51,11 +51,11 @@ val_physical_device_init(struct val_physical_device *device,
if (!device->pscreen)
return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- fprintf(stderr, "WARNING: vallium/llvmpipe is not a conformant vulkan implementation, testing use only.\n");
+ fprintf(stderr, "WARNING: lavapipe is not a conformant vulkan implementation, testing use only.\n");
device->max_images = device->pscreen->get_shader_param(device->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
- val_physical_device_get_supported_extensions(device, &device->supported_extensions);
- result = val_init_wsi(device);
+ lvp_physical_device_get_supported_extensions(device, &device->supported_extensions);
+ result = lvp_init_wsi(device);
if (result != VK_SUCCESS) {
vk_error(instance, result);
goto fail;
@@ -67,9 +67,9 @@ val_physical_device_init(struct val_physical_device *device,
}
static void
-val_physical_device_finish(struct val_physical_device *device)
+lvp_physical_device_finish(struct lvp_physical_device *device)
{
- val_finish_wsi(device);
+ lvp_finish_wsi(device);
device->pscreen->destroy(device->pscreen);
}
@@ -100,12 +100,12 @@ static const VkAllocationCallbacks default_alloc = {
.pfnFree = default_free_func,
};
-VkResult val_CreateInstance(
+VkResult lvp_CreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkInstance* pInstance)
{
- struct val_instance *instance;
+ struct lvp_instance *instance;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
@@ -134,32 +134,32 @@ VkResult val_CreateInstance(
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
int idx;
- for (idx = 0; idx < VAL_INSTANCE_EXTENSION_COUNT; idx++) {
+ for (idx = 0; idx < LVP_INSTANCE_EXTENSION_COUNT; idx++) {
if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- val_instance_extensions[idx].extensionName))
+ lvp_instance_extensions[idx].extensionName))
break;
}
- if (idx >= VAL_INSTANCE_EXTENSION_COUNT ||
- !val_instance_extensions_supported.extensions[idx]) {
+ if (idx >= LVP_INSTANCE_EXTENSION_COUNT ||
+ !lvp_instance_extensions_supported.extensions[idx]) {
vk_free2(&default_alloc, pAllocator, instance);
return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
}
instance->enabled_extensions.extensions[idx] = true;
}
- bool unchecked = instance->debug_flags & VAL_DEBUG_ALL_ENTRYPOINTS;
+ bool unchecked = instance->debug_flags & LVP_DEBUG_ALL_ENTRYPOINTS;
for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
/* Vulkan requires that entrypoints for extensions which have
* not been enabled must not be advertised.
*/
if (!unchecked &&
- !val_instance_entrypoint_is_enabled(i, instance->apiVersion,
+ !lvp_instance_entrypoint_is_enabled(i, instance->apiVersion,
&instance->enabled_extensions)) {
instance->dispatch.entrypoints[i] = NULL;
} else {
instance->dispatch.entrypoints[i] =
- val_instance_dispatch_table.entrypoints[i];
+ lvp_instance_dispatch_table.entrypoints[i];
}
}
@@ -168,12 +168,12 @@ VkResult val_CreateInstance(
* not been enabled must not be advertised.
*/
if (!unchecked &&
- !val_physical_device_entrypoint_is_enabled(i, instance->apiVersion,
+ !lvp_physical_device_entrypoint_is_enabled(i, instance->apiVersion,
&instance->enabled_extensions)) {
instance->physical_device_dispatch.entrypoints[i] = NULL;
} else {
instance->physical_device_dispatch.entrypoints[i] =
- val_physical_device_dispatch_table.entrypoints[i];
+ lvp_physical_device_dispatch_table.entrypoints[i];
}
}
@@ -182,12 +182,12 @@ VkResult val_CreateInstance(
* not been enabled must not be advertised.
*/
if (!unchecked &&
- !val_device_entrypoint_is_enabled(i, instance->apiVersion,
+ !lvp_device_entrypoint_is_enabled(i, instance->apiVersion,
&instance->enabled_extensions, NULL)) {
instance->device_dispatch.entrypoints[i] = NULL;
} else {
instance->device_dispatch.entrypoints[i] =
- val_device_dispatch_table.entrypoints[i];
+ lvp_device_dispatch_table.entrypoints[i];
}
}
@@ -195,22 +195,22 @@ VkResult val_CreateInstance(
glsl_type_singleton_init_or_ref();
// VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
- *pInstance = val_instance_to_handle(instance);
+ *pInstance = lvp_instance_to_handle(instance);
return VK_SUCCESS;
}
-void val_DestroyInstance(
+void lvp_DestroyInstance(
VkInstance _instance,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
if (!instance)
return;
glsl_type_singleton_decref();
if (instance->physicalDeviceCount > 0)
- val_physical_device_finish(&instance->physicalDevice);
+ lvp_physical_device_finish(&instance->physicalDevice);
// _mesa_locale_fini();
pipe_loader_release(&instance->devs, instance->num_devices);
@@ -219,38 +219,38 @@ void val_DestroyInstance(
vk_free(&instance->alloc, instance);
}
-static void val_get_image(struct dri_drawable *dri_drawable,
+static void lvp_get_image(struct dri_drawable *dri_drawable,
int x, int y, unsigned width, unsigned height, unsigned stride,
void *data)
{
}
-static void val_put_image(struct dri_drawable *dri_drawable,
+static void lvp_put_image(struct dri_drawable *dri_drawable,
void *data, unsigned width, unsigned height)
{
fprintf(stderr, "put image %dx%d\n", width, height);
}
-static void val_put_image2(struct dri_drawable *dri_drawable,
+static void lvp_put_image2(struct dri_drawable *dri_drawable,
void *data, int x, int y, unsigned width, unsigned height,
unsigned stride)
{
fprintf(stderr, "put image 2 %d,%d %dx%d\n", x, y, width, height);
}
-static struct drisw_loader_funcs val_sw_lf = {
- .get_image = val_get_image,
- .put_image = val_put_image,
- .put_image2 = val_put_image2,
+static struct drisw_loader_funcs lvp_sw_lf = {
+ .get_image = lvp_get_image,
+ .put_image = lvp_put_image,
+ .put_image2 = lvp_put_image2,
};
-VkResult val_EnumeratePhysicalDevices(
+VkResult lvp_EnumeratePhysicalDevices(
VkInstance _instance,
uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
VkResult result;
if (instance->physicalDeviceCount < 0) {
@@ -260,10 +260,10 @@ VkResult val_EnumeratePhysicalDevices(
assert(instance->num_devices == 1);
- pipe_loader_sw_probe_dri(&instance->devs, &val_sw_lf);
+ pipe_loader_sw_probe_dri(&instance->devs, &lvp_sw_lf);
- result = val_physical_device_init(&instance->physicalDevice,
+ result = lvp_physical_device_init(&instance->physicalDevice,
instance, &instance->devs[0]);
if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
instance->physicalDeviceCount = 0;
@@ -277,7 +277,7 @@ VkResult val_EnumeratePhysicalDevices(
if (!pPhysicalDevices) {
*pPhysicalDeviceCount = instance->physicalDeviceCount;
} else if (*pPhysicalDeviceCount >= 1) {
- pPhysicalDevices[0] = val_physical_device_to_handle(&instance->physicalDevice);
+ pPhysicalDevices[0] = lvp_physical_device_to_handle(&instance->physicalDevice);
*pPhysicalDeviceCount = 1;
} else {
*pPhysicalDeviceCount = 0;
@@ -286,11 +286,11 @@ VkResult val_EnumeratePhysicalDevices(
return VK_SUCCESS;
}
-void val_GetPhysicalDeviceFeatures(
+void lvp_GetPhysicalDeviceFeatures(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceFeatures* pFeatures)
{
- VAL_FROM_HANDLE(val_physical_device, pdevice, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, pdevice, physicalDevice);
bool indirect = false;//pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_GLSL_FEATURE_LEVEL) >= 400;
memset(pFeatures, 0, sizeof(*pFeatures));
*pFeatures = (VkPhysicalDeviceFeatures) {
@@ -342,11 +342,11 @@ void val_GetPhysicalDeviceFeatures(
};
}
-void val_GetPhysicalDeviceFeatures2(
+void lvp_GetPhysicalDeviceFeatures2(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceFeatures2 *pFeatures)
{
- val_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
+ lvp_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
vk_foreach_struct(ext, pFeatures->pNext) {
switch (ext->sType) {
@@ -372,16 +372,16 @@ void val_GetPhysicalDeviceFeatures2(
}
void
-val_device_get_cache_uuid(void *uuid)
+lvp_device_get_cache_uuid(void *uuid)
{
memset(uuid, 0, VK_UUID_SIZE);
snprintf(uuid, VK_UUID_SIZE, "val-%s", MESA_GIT_SHA1 + 4);
}
-void val_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
+void lvp_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties *pProperties)
{
- VAL_FROM_HANDLE(val_physical_device, pdevice, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, pdevice, physicalDevice);
VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT;
@@ -519,15 +519,15 @@ void val_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
};
strcpy(pProperties->deviceName, pdevice->pscreen->get_name(pdevice->pscreen));
- val_device_get_cache_uuid(pProperties->pipelineCacheUUID);
+ lvp_device_get_cache_uuid(pProperties->pipelineCacheUUID);
}
-void val_GetPhysicalDeviceProperties2(
+void lvp_GetPhysicalDeviceProperties2(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties2 *pProperties)
{
- val_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+ lvp_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
vk_foreach_struct(ext, pProperties->pNext) {
switch (ext->sType) {
@@ -568,7 +568,7 @@ void val_GetPhysicalDeviceProperties2(
}
}
-void val_GetPhysicalDeviceQueueFamilyProperties(
+void lvp_GetPhysicalDeviceQueueFamilyProperties(
VkPhysicalDevice physicalDevice,
uint32_t* pCount,
VkQueueFamilyProperties* pQueueFamilyProperties)
@@ -590,7 +590,7 @@ void val_GetPhysicalDeviceQueueFamilyProperties(
};
}
-void val_GetPhysicalDeviceMemoryProperties(
+void lvp_GetPhysicalDeviceMemoryProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryProperties* pMemoryProperties)
{
@@ -610,11 +610,11 @@ void val_GetPhysicalDeviceMemoryProperties(
};
}
-PFN_vkVoidFunction val_GetInstanceProcAddr(
+PFN_vkVoidFunction lvp_GetInstanceProcAddr(
VkInstance _instance,
const char* pName)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
/* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
* when we have to return valid function pointers, NULL, or it's left
@@ -623,34 +623,34 @@ PFN_vkVoidFunction val_GetInstanceProcAddr(
if (pName == NULL)
return NULL;
-#define LOOKUP_VAL_ENTRYPOINT(entrypoint) \
+#define LOOKUP_LVP_ENTRYPOINT(entrypoint) \
if (strcmp(pName, "vk" #entrypoint) == 0) \
- return (PFN_vkVoidFunction)val_##entrypoint
+ return (PFN_vkVoidFunction)lvp_##entrypoint
- LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceExtensionProperties);
- LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceLayerProperties);
- LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceVersion);
- LOOKUP_VAL_ENTRYPOINT(CreateInstance);
+ LOOKUP_LVP_ENTRYPOINT(EnumerateInstanceExtensionProperties);
+ LOOKUP_LVP_ENTRYPOINT(EnumerateInstanceLayerProperties);
+ LOOKUP_LVP_ENTRYPOINT(EnumerateInstanceVersion);
+ LOOKUP_LVP_ENTRYPOINT(CreateInstance);
/* GetInstanceProcAddr() can also be called with a NULL instance.
* See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
*/
- LOOKUP_VAL_ENTRYPOINT(GetInstanceProcAddr);
+ LOOKUP_LVP_ENTRYPOINT(GetInstanceProcAddr);
-#undef LOOKUP_VAL_ENTRYPOINT
+#undef LOOKUP_LVP_ENTRYPOINT
if (instance == NULL)
return NULL;
- int idx = val_get_instance_entrypoint_index(pName);
+ int idx = lvp_get_instance_entrypoint_index(pName);
if (idx >= 0)
return instance->dispatch.entrypoints[idx];
- idx = val_get_physical_device_entrypoint_index(pName);
+ idx = lvp_get_physical_device_entrypoint_index(pName);
if (idx >= 0)
return instance->physical_device_dispatch.entrypoints[idx];
- idx = val_get_device_entrypoint_index(pName);
+ idx = lvp_get_device_entrypoint_index(pName);
if (idx >= 0)
return instance->device_dispatch.entrypoints[idx];
@@ -670,7 +670,7 @@ VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
VkInstance instance,
const char* pName)
{
- return val_GetInstanceProcAddr(instance, pName);
+ return lvp_GetInstanceProcAddr(instance, pName);
}
PUBLIC
@@ -683,27 +683,27 @@ VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
VkInstance _instance,
const char* pName)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
if (!pName || !instance)
return NULL;
- int idx = val_get_physical_device_entrypoint_index(pName);
+ int idx = lvp_get_physical_device_entrypoint_index(pName);
if (idx < 0)
return NULL;
return instance->physical_device_dispatch.entrypoints[idx];
}
-PFN_vkVoidFunction val_GetDeviceProcAddr(
+PFN_vkVoidFunction lvp_GetDeviceProcAddr(
VkDevice _device,
const char* pName)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
if (!device || !pName)
return NULL;
- int idx = val_get_device_entrypoint_index(pName);
+ int idx = lvp_get_device_entrypoint_index(pName);
if (idx < 0)
return NULL;
@@ -712,24 +712,24 @@ PFN_vkVoidFunction val_GetDeviceProcAddr(
static int queue_thread(void *data)
{
- struct val_queue *queue = data;
+ struct lvp_queue *queue = data;
mtx_lock(&queue->m);
while (!queue->shutdown) {
- struct val_queue_work *task;
+ struct lvp_queue_work *task;
while (list_is_empty(&queue->workqueue) && !queue->shutdown)
cnd_wait(&queue->new_work, &queue->m);
if (queue->shutdown)
break;
- task = list_first_entry(&queue->workqueue, struct val_queue_work,
+ task = list_first_entry(&queue->workqueue, struct lvp_queue_work,
list);
mtx_unlock(&queue->m);
//execute
for (unsigned i = 0; i < task->cmd_buffer_count; i++) {
- val_execute_cmds(queue->device, queue, task->fence, task->cmd_buffers[i]);
+ lvp_execute_cmds(queue->device, queue, task->fence, task->cmd_buffers[i]);
}
if (!task->cmd_buffer_count && task->fence)
task->fence->signaled = true;
@@ -743,7 +743,7 @@ static int queue_thread(void *data)
}
static VkResult
-val_queue_init(struct val_device *device, struct val_queue *queue)
+lvp_queue_init(struct lvp_device *device, struct lvp_queue *queue)
{
queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
queue->device = device;
@@ -759,7 +759,7 @@ val_queue_init(struct val_device *device, struct val_queue *queue)
}
static void
-val_queue_finish(struct val_queue *queue)
+lvp_queue_finish(struct lvp_queue *queue)
{
mtx_lock(&queue->m);
queue->shutdown = true;
@@ -773,28 +773,28 @@ val_queue_finish(struct val_queue *queue)
queue->ctx->destroy(queue->ctx);
}
-static int val_get_device_extension_index(const char *name)
+static int lvp_get_device_extension_index(const char *name)
{
- for (unsigned i = 0; i < VAL_DEVICE_EXTENSION_COUNT; ++i) {
- if (strcmp(name, val_device_extensions[i].extensionName) == 0)
+ for (unsigned i = 0; i < LVP_DEVICE_EXTENSION_COUNT; ++i) {
+ if (strcmp(name, lvp_device_extensions[i].extensionName) == 0)
return i;
}
return -1;
}
static void
-val_device_init_dispatch(struct val_device *device)
+lvp_device_init_dispatch(struct lvp_device *device)
{
- const struct val_instance *instance = device->physical_device->instance;
- const struct val_device_dispatch_table *dispatch_table_layer = NULL;
- bool unchecked = instance->debug_flags & VAL_DEBUG_ALL_ENTRYPOINTS;
+ const struct lvp_instance *instance = device->physical_device->instance;
+ const struct lvp_device_dispatch_table *dispatch_table_layer = NULL;
+ bool unchecked = instance->debug_flags & LVP_DEBUG_ALL_ENTRYPOINTS;
for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
/* Vulkan requires that entrypoints for extensions which have not been
* enabled must not be advertised.
*/
if (!unchecked &&
- !val_device_entrypoint_is_enabled(i, instance->apiVersion,
+ !lvp_device_entrypoint_is_enabled(i, instance->apiVersion,
&instance->enabled_extensions,
&device->enabled_extensions)) {
device->dispatch.entrypoints[i] = NULL;
@@ -804,26 +804,26 @@ val_device_init_dispatch(struct val_device *device)
dispatch_table_layer->entrypoints[i];
} else {
device->dispatch.entrypoints[i] =
- val_device_dispatch_table.entrypoints[i];
+ lvp_device_dispatch_table.entrypoints[i];
}
}
}
-VkResult val_CreateDevice(
+VkResult lvp_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDevice* pDevice)
{
- VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
- struct val_device *device;
+ LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
+ struct lvp_device *device;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
/* Check enabled features */
if (pCreateInfo->pEnabledFeatures) {
VkPhysicalDeviceFeatures supported_features;
- val_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
+ lvp_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
VkBool32 *supported_feature = (VkBool32 *)&supported_features;
VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
@@ -849,7 +849,7 @@ VkResult val_CreateDevice(
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
- int index = val_get_device_extension_index(ext_name);
+ int index = lvp_get_device_extension_index(ext_name);
if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
vk_free(&device->alloc, device);
return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
@@ -857,40 +857,40 @@ VkResult val_CreateDevice(
device->enabled_extensions.extensions[index] = true;
}
- val_device_init_dispatch(device);
+ lvp_device_init_dispatch(device);
mtx_init(&device->fence_lock, mtx_plain);
device->pscreen = physical_device->pscreen;
- val_queue_init(device, &device->queue);
+ lvp_queue_init(device, &device->queue);
- *pDevice = val_device_to_handle(device);
+ *pDevice = lvp_device_to_handle(device);
return VK_SUCCESS;
}
-void val_DestroyDevice(
+void lvp_DestroyDevice(
VkDevice _device,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
- val_queue_finish(&device->queue);
+ lvp_queue_finish(&device->queue);
vk_free(&device->alloc, device);
}
-VkResult val_EnumerateInstanceExtensionProperties(
+VkResult lvp_EnumerateInstanceExtensionProperties(
const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties)
{
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
- for (int i = 0; i < VAL_INSTANCE_EXTENSION_COUNT; i++) {
- if (val_instance_extensions_supported.extensions[i]) {
+ for (int i = 0; i < LVP_INSTANCE_EXTENSION_COUNT; i++) {
+ if (lvp_instance_extensions_supported.extensions[i]) {
vk_outarray_append(&out, prop) {
- *prop = val_instance_extensions[i];
+ *prop = lvp_instance_extensions[i];
}
}
}
@@ -898,26 +898,26 @@ VkResult val_EnumerateInstanceExtensionProperties(
return vk_outarray_status(&out);
}
-VkResult val_EnumerateDeviceExtensionProperties(
+VkResult lvp_EnumerateDeviceExtensionProperties(
VkPhysicalDevice physicalDevice,
const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
- for (int i = 0; i < VAL_DEVICE_EXTENSION_COUNT; i++) {
+ for (int i = 0; i < LVP_DEVICE_EXTENSION_COUNT; i++) {
if (device->supported_extensions.extensions[i]) {
vk_outarray_append(&out, prop) {
- *prop = val_device_extensions[i];
+ *prop = lvp_device_extensions[i];
}
}
}
return vk_outarray_status(&out);
}
-VkResult val_EnumerateInstanceLayerProperties(
+VkResult lvp_EnumerateInstanceLayerProperties(
uint32_t* pPropertyCount,
VkLayerProperties* pProperties)
{
@@ -930,7 +930,7 @@ VkResult val_EnumerateInstanceLayerProperties(
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
}
-VkResult val_EnumerateDeviceLayerProperties(
+VkResult lvp_EnumerateDeviceLayerProperties(
VkPhysicalDevice physicalDevice,
uint32_t* pPropertyCount,
VkLayerProperties* pProperties)
@@ -944,13 +944,13 @@ VkResult val_EnumerateDeviceLayerProperties(
return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
}
-void val_GetDeviceQueue2(
+void lvp_GetDeviceQueue2(
VkDevice _device,
const VkDeviceQueueInfo2* pQueueInfo,
VkQueue* pQueue)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_queue *queue;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_queue *queue;
queue = &device->queue;
if (pQueueInfo->flags != queue->flags) {
@@ -966,11 +966,11 @@ void val_GetDeviceQueue2(
return;
}
- *pQueue = val_queue_to_handle(queue);
+ *pQueue = lvp_queue_to_handle(queue);
}
-void val_GetDeviceQueue(
+void lvp_GetDeviceQueue(
VkDevice _device,
uint32_t queueFamilyIndex,
uint32_t queueIndex,
@@ -982,30 +982,30 @@ void val_GetDeviceQueue(
.queueIndex = queueIndex
};
- val_GetDeviceQueue2(_device, &info, pQueue);
+ lvp_GetDeviceQueue2(_device, &info, pQueue);
}
-VkResult val_QueueSubmit(
+VkResult lvp_QueueSubmit(
VkQueue _queue,
uint32_t submitCount,
const VkSubmitInfo* pSubmits,
VkFence _fence)
{
- VAL_FROM_HANDLE(val_queue, queue, _queue);
- VAL_FROM_HANDLE(val_fence, fence, _fence);
+ LVP_FROM_HANDLE(lvp_queue, queue, _queue);
+ LVP_FROM_HANDLE(lvp_fence, fence, _fence);
if (submitCount == 0)
goto just_signal_fence;
for (uint32_t i = 0; i < submitCount; i++) {
- uint32_t task_size = sizeof(struct val_queue_work) + pSubmits[i].commandBufferCount * sizeof(struct val_cmd_buffer *);
- struct val_queue_work *task = malloc(task_size);
+ uint32_t task_size = sizeof(struct lvp_queue_work) + pSubmits[i].commandBufferCount * sizeof(struct lvp_cmd_buffer *);
+ struct lvp_queue_work *task = malloc(task_size);
task->cmd_buffer_count = pSubmits[i].commandBufferCount;
task->fence = fence;
- task->cmd_buffers = (struct val_cmd_buffer **)(task + 1);
+ task->cmd_buffers = (struct lvp_cmd_buffer **)(task + 1);
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
- task->cmd_buffers[j] = val_cmd_buffer_from_handle(pSubmits[i].pCommandBuffers[j]);
+ task->cmd_buffers[j] = lvp_cmd_buffer_from_handle(pSubmits[i].pCommandBuffers[j]);
}
mtx_lock(&queue->m);
@@ -1020,7 +1020,7 @@ VkResult val_QueueSubmit(
return VK_SUCCESS;
}
-static VkResult queue_wait_idle(struct val_queue *queue, uint64_t timeout)
+static VkResult queue_wait_idle(struct lvp_queue *queue, uint64_t timeout)
{
if (timeout == 0)
return p_atomic_read(&queue->count) == 0 ? VK_SUCCESS : VK_TIMEOUT;
@@ -1040,30 +1040,30 @@ static VkResult queue_wait_idle(struct val_queue *queue, uint64_t timeout)
return VK_SUCCESS;
}
-VkResult val_QueueWaitIdle(
+VkResult lvp_QueueWaitIdle(
VkQueue _queue)
{
- VAL_FROM_HANDLE(val_queue, queue, _queue);
+ LVP_FROM_HANDLE(lvp_queue, queue, _queue);
return queue_wait_idle(queue, UINT64_MAX);
}
-VkResult val_DeviceWaitIdle(
+VkResult lvp_DeviceWaitIdle(
VkDevice _device)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
return queue_wait_idle(&device->queue, UINT64_MAX);
}
-VkResult val_AllocateMemory(
+VkResult lvp_AllocateMemory(
VkDevice _device,
const VkMemoryAllocateInfo* pAllocateInfo,
const VkAllocationCallbacks* pAllocator,
VkDeviceMemory* pMem)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_device_memory *mem;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_device_memory *mem;
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
if (pAllocateInfo->allocationSize == 0) {
@@ -1087,18 +1087,18 @@ VkResult val_AllocateMemory(
mem->type_index = pAllocateInfo->memoryTypeIndex;
- *pMem = val_device_memory_to_handle(mem);
+ *pMem = lvp_device_memory_to_handle(mem);
return VK_SUCCESS;
}
-void val_FreeMemory(
+void lvp_FreeMemory(
VkDevice _device,
VkDeviceMemory _mem,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_device_memory, mem, _mem);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, _mem);
if (mem == NULL)
return;
@@ -1109,7 +1109,7 @@ void val_FreeMemory(
}
-VkResult val_MapMemory(
+VkResult lvp_MapMemory(
VkDevice _device,
VkDeviceMemory _memory,
VkDeviceSize offset,
@@ -1117,8 +1117,8 @@ VkResult val_MapMemory(
VkMemoryMapFlags flags,
void** ppData)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_device_memory, mem, _memory);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
void *map;
if (mem == NULL) {
*ppData = NULL;
@@ -1131,12 +1131,12 @@ VkResult val_MapMemory(
return VK_SUCCESS;
}
-void val_UnmapMemory(
+void lvp_UnmapMemory(
VkDevice _device,
VkDeviceMemory _memory)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_device_memory, mem, _memory);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
if (mem == NULL)
return;
@@ -1144,14 +1144,14 @@ void val_UnmapMemory(
device->pscreen->unmap_memory(device->pscreen, mem->pmem);
}
-VkResult val_FlushMappedMemoryRanges(
+VkResult lvp_FlushMappedMemoryRanges(
VkDevice _device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges)
{
return VK_SUCCESS;
}
-VkResult val_InvalidateMappedMemoryRanges(
+VkResult lvp_InvalidateMappedMemoryRanges(
VkDevice _device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges)
@@ -1159,12 +1159,12 @@ VkResult val_InvalidateMappedMemoryRanges(
return VK_SUCCESS;
}
-void val_GetBufferMemoryRequirements(
+void lvp_GetBufferMemoryRequirements(
VkDevice device,
VkBuffer _buffer,
VkMemoryRequirements* pMemoryRequirements)
{
- VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
/* The Vulkan spec (git aaed022) says:
*
@@ -1181,12 +1181,12 @@ void val_GetBufferMemoryRequirements(
pMemoryRequirements->alignment = 64;
}
-void val_GetBufferMemoryRequirements2(
+void lvp_GetBufferMemoryRequirements2(
VkDevice device,
const VkBufferMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
- val_GetBufferMemoryRequirements(device, pInfo->buffer,
+ lvp_GetBufferMemoryRequirements(device, pInfo->buffer,
&pMemoryRequirements->memoryRequirements);
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
switch (ext->sType) {
@@ -1203,24 +1203,24 @@ void val_GetBufferMemoryRequirements2(
}
}
-void val_GetImageMemoryRequirements(
+void lvp_GetImageMemoryRequirements(
VkDevice device,
VkImage _image,
VkMemoryRequirements* pMemoryRequirements)
{
- VAL_FROM_HANDLE(val_image, image, _image);
+ LVP_FROM_HANDLE(lvp_image, image, _image);
pMemoryRequirements->memoryTypeBits = 1;
pMemoryRequirements->size = image->size;
pMemoryRequirements->alignment = image->alignment;
}
-void val_GetImageMemoryRequirements2(
+void lvp_GetImageMemoryRequirements2(
VkDevice device,
const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
- val_GetImageMemoryRequirements(device, pInfo->image,
+ lvp_GetImageMemoryRequirements(device, pInfo->image,
&pMemoryRequirements->memoryRequirements);
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
@@ -1238,7 +1238,7 @@ void val_GetImageMemoryRequirements2(
}
}
-void val_GetImageSparseMemoryRequirements(
+void lvp_GetImageSparseMemoryRequirements(
VkDevice device,
VkImage image,
uint32_t* pSparseMemoryRequirementCount,
@@ -1247,7 +1247,7 @@ void val_GetImageSparseMemoryRequirements(
stub();
}
-void val_GetImageSparseMemoryRequirements2(
+void lvp_GetImageSparseMemoryRequirements2(
VkDevice device,
const VkImageSparseMemoryRequirementsInfo2* pInfo,
uint32_t* pSparseMemoryRequirementCount,
@@ -1256,7 +1256,7 @@ void val_GetImageSparseMemoryRequirements2(
stub();
}
-void val_GetDeviceMemoryCommitment(
+void lvp_GetDeviceMemoryCommitment(
VkDevice device,
VkDeviceMemory memory,
VkDeviceSize* pCommittedMemoryInBytes)
@@ -1264,14 +1264,14 @@ void val_GetDeviceMemoryCommitment(
*pCommittedMemoryInBytes = 0;
}
-VkResult val_BindBufferMemory2(VkDevice _device,
+VkResult lvp_BindBufferMemory2(VkDevice _device,
uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
- VAL_FROM_HANDLE(val_device_memory, mem, pBindInfos[i].memory);
- VAL_FROM_HANDLE(val_buffer, buffer, pBindInfos[i].buffer);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, pBindInfos[i].memory);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, pBindInfos[i].buffer);
device->pscreen->resource_bind_backing(device->pscreen,
buffer->bo,
@@ -1281,15 +1281,15 @@ VkResult val_BindBufferMemory2(VkDevice _device,
return VK_SUCCESS;
}
-VkResult val_BindBufferMemory(
+VkResult lvp_BindBufferMemory(
VkDevice _device,
VkBuffer _buffer,
VkDeviceMemory _memory,
VkDeviceSize memoryOffset)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_device_memory, mem, _memory);
- VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
device->pscreen->resource_bind_backing(device->pscreen,
buffer->bo,
@@ -1298,14 +1298,14 @@ VkResult val_BindBufferMemory(
return VK_SUCCESS;
}
-VkResult val_BindImageMemory2(VkDevice _device,
+VkResult lvp_BindImageMemory2(VkDevice _device,
uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
- VAL_FROM_HANDLE(val_device_memory, mem, pBindInfos[i].memory);
- VAL_FROM_HANDLE(val_image, image, pBindInfos[i].image);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, pBindInfos[i].memory);
+ LVP_FROM_HANDLE(lvp_image, image, pBindInfos[i].image);
device->pscreen->resource_bind_backing(device->pscreen,
image->bo,
@@ -1315,15 +1315,15 @@ VkResult val_BindImageMemory2(VkDevice _device,
return VK_SUCCESS;
}
-VkResult val_BindImageMemory(
+VkResult lvp_BindImageMemory(
VkDevice _device,
VkImage _image,
VkDeviceMemory _memory,
VkDeviceSize memoryOffset)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_device_memory, mem, _memory);
- VAL_FROM_HANDLE(val_image, image, _image);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
+ LVP_FROM_HANDLE(lvp_image, image, _image);
device->pscreen->resource_bind_backing(device->pscreen,
image->bo,
@@ -1332,7 +1332,7 @@ VkResult val_BindImageMemory(
return VK_SUCCESS;
}
-VkResult val_QueueBindSparse(
+VkResult lvp_QueueBindSparse(
VkQueue queue,
uint32_t bindInfoCount,
const VkBindSparseInfo* pBindInfo,
@@ -1342,14 +1342,14 @@ VkResult val_QueueBindSparse(
}
-VkResult val_CreateFence(
+VkResult lvp_CreateFence(
VkDevice _device,
const VkFenceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkFence* pFence)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_fence *fence;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_fence *fence;
fence = vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -1360,18 +1360,18 @@ VkResult val_CreateFence(
fence->signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
fence->handle = NULL;
- *pFence = val_fence_to_handle(fence);
+ *pFence = lvp_fence_to_handle(fence);
return VK_SUCCESS;
}
-void val_DestroyFence(
+void lvp_DestroyFence(
VkDevice _device,
VkFence _fence,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_fence, fence, _fence);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_fence, fence, _fence);
if (!_fence)
return;
@@ -1382,14 +1382,14 @@ void val_DestroyFence(
vk_free2(&device->alloc, pAllocator, fence);
}
-VkResult val_ResetFences(
+VkResult lvp_ResetFences(
VkDevice _device,
uint32_t fenceCount,
const VkFence* pFences)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
for (unsigned i = 0; i < fenceCount; i++) {
- struct val_fence *fence = val_fence_from_handle(pFences[i]);
+ struct lvp_fence *fence = lvp_fence_from_handle(pFences[i]);
fence->signaled = false;
@@ -1401,12 +1401,12 @@ VkResult val_ResetFences(
return VK_SUCCESS;
}
-VkResult val_GetFenceStatus(
+VkResult lvp_GetFenceStatus(
VkDevice _device,
VkFence _fence)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_fence, fence, _fence);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_fence, fence, _fence);
if (fence->signaled)
return VK_SUCCESS;
@@ -1429,19 +1429,19 @@ VkResult val_GetFenceStatus(
return VK_NOT_READY;
}
-VkResult val_CreateFramebuffer(
+VkResult lvp_CreateFramebuffer(
VkDevice _device,
const VkFramebufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkFramebuffer* pFramebuffer)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_framebuffer *framebuffer;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_framebuffer *framebuffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
size_t size = sizeof(*framebuffer) +
- sizeof(struct val_image_view *) * pCreateInfo->attachmentCount;
+ sizeof(struct lvp_image_view *) * pCreateInfo->attachmentCount;
framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (framebuffer == NULL)
@@ -1452,25 +1452,25 @@ VkResult val_CreateFramebuffer(
framebuffer->attachment_count = pCreateInfo->attachmentCount;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
VkImageView _iview = pCreateInfo->pAttachments[i];
- framebuffer->attachments[i] = val_image_view_from_handle(_iview);
+ framebuffer->attachments[i] = lvp_image_view_from_handle(_iview);
}
framebuffer->width = pCreateInfo->width;
framebuffer->height = pCreateInfo->height;
framebuffer->layers = pCreateInfo->layers;
- *pFramebuffer = val_framebuffer_to_handle(framebuffer);
+ *pFramebuffer = lvp_framebuffer_to_handle(framebuffer);
return VK_SUCCESS;
}
-void val_DestroyFramebuffer(
+void lvp_DestroyFramebuffer(
VkDevice _device,
VkFramebuffer _fb,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_framebuffer, fb, _fb);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_framebuffer, fb, _fb);
if (!fb)
return;
@@ -1478,14 +1478,14 @@ void val_DestroyFramebuffer(
vk_free2(&device->alloc, pAllocator, fb);
}
-VkResult val_WaitForFences(
+VkResult lvp_WaitForFences(
VkDevice _device,
uint32_t fenceCount,
const VkFence* pFences,
VkBool32 waitAll,
uint64_t timeout)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
VkResult qret = queue_wait_idle(&device->queue, timeout);
bool timeout_status = false;
@@ -1494,7 +1494,7 @@ VkResult val_WaitForFences(
mtx_lock(&device->fence_lock);
for (unsigned i = 0; i < fenceCount; i++) {
- struct val_fence *fence = val_fence_from_handle(pFences[i]);
+ struct lvp_fence *fence = lvp_fence_from_handle(pFences[i]);
if (fence->signaled)
continue;
@@ -1518,15 +1518,15 @@ VkResult val_WaitForFences(
return timeout_status ? VK_TIMEOUT : VK_SUCCESS;
}
-VkResult val_CreateSemaphore(
+VkResult lvp_CreateSemaphore(
VkDevice _device,
const VkSemaphoreCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
- struct val_semaphore *sema = vk_alloc2(&device->alloc, pAllocator,
+ struct lvp_semaphore *sema = vk_alloc2(&device->alloc, pAllocator,
sizeof(*sema), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -1534,18 +1534,18 @@ VkResult val_CreateSemaphore(
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &sema->base,
VK_OBJECT_TYPE_SEMAPHORE);
- *pSemaphore = val_semaphore_to_handle(sema);
+ *pSemaphore = lvp_semaphore_to_handle(sema);
return VK_SUCCESS;
}
-void val_DestroySemaphore(
+void lvp_DestroySemaphore(
VkDevice _device,
VkSemaphore _semaphore,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_semaphore, semaphore, _semaphore);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_semaphore, semaphore, _semaphore);
if (!_semaphore)
return;
@@ -1553,14 +1553,14 @@ void val_DestroySemaphore(
vk_free2(&device->alloc, pAllocator, semaphore);
}
-VkResult val_CreateEvent(
+VkResult lvp_CreateEvent(
VkDevice _device,
const VkEventCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkEvent* pEvent)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_event *event = vk_alloc2(&device->alloc, pAllocator,
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_event *event = vk_alloc2(&device->alloc, pAllocator,
sizeof(*event), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -1568,18 +1568,18 @@ VkResult val_CreateEvent(
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
- *pEvent = val_event_to_handle(event);
+ *pEvent = lvp_event_to_handle(event);
return VK_SUCCESS;
}
-void val_DestroyEvent(
+void lvp_DestroyEvent(
VkDevice _device,
VkEvent _event,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_event, event, _event);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_event, event, _event);
if (!event)
return;
@@ -1588,44 +1588,44 @@ void val_DestroyEvent(
vk_free2(&device->alloc, pAllocator, event);
}
-VkResult val_GetEventStatus(
+VkResult lvp_GetEventStatus(
VkDevice _device,
VkEvent _event)
{
- VAL_FROM_HANDLE(val_event, event, _event);
+ LVP_FROM_HANDLE(lvp_event, event, _event);
if (event->event_storage == 1)
return VK_EVENT_SET;
return VK_EVENT_RESET;
}
-VkResult val_SetEvent(
+VkResult lvp_SetEvent(
VkDevice _device,
VkEvent _event)
{
- VAL_FROM_HANDLE(val_event, event, _event);
+ LVP_FROM_HANDLE(lvp_event, event, _event);
event->event_storage = 1;
return VK_SUCCESS;
}
-VkResult val_ResetEvent(
+VkResult lvp_ResetEvent(
VkDevice _device,
VkEvent _event)
{
- VAL_FROM_HANDLE(val_event, event, _event);
+ LVP_FROM_HANDLE(lvp_event, event, _event);
event->event_storage = 0;
return VK_SUCCESS;
}
-VkResult val_CreateSampler(
+VkResult lvp_CreateSampler(
VkDevice _device,
const VkSamplerCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSampler* pSampler)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_sampler *sampler;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_sampler *sampler;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
@@ -1637,18 +1637,18 @@ VkResult val_CreateSampler(
vk_object_base_init(&device->vk, &sampler->base,
VK_OBJECT_TYPE_SAMPLER);
sampler->create_info = *pCreateInfo;
- *pSampler = val_sampler_to_handle(sampler);
+ *pSampler = lvp_sampler_to_handle(sampler);
return VK_SUCCESS;
}
-void val_DestroySampler(
+void lvp_DestroySampler(
VkDevice _device,
VkSampler _sampler,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_sampler, sampler, _sampler);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_sampler, sampler, _sampler);
if (!_sampler)
return;
@@ -1656,47 +1656,47 @@ void val_DestroySampler(
vk_free2(&device->alloc, pAllocator, sampler);
}
-VkResult val_CreatePrivateDataSlotEXT(
+VkResult lvp_CreatePrivateDataSlotEXT(
VkDevice _device,
const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPrivateDataSlotEXT* pPrivateDataSlot)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
return vk_private_data_slot_create(&device->vk, pCreateInfo, pAllocator,
pPrivateDataSlot);
}
-void val_DestroyPrivateDataSlotEXT(
+void lvp_DestroyPrivateDataSlotEXT(
VkDevice _device,
VkPrivateDataSlotEXT privateDataSlot,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
}
-VkResult val_SetPrivateDataEXT(
+VkResult lvp_SetPrivateDataEXT(
VkDevice _device,
VkObjectType objectType,
uint64_t objectHandle,
VkPrivateDataSlotEXT privateDataSlot,
uint64_t data)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
return vk_object_base_set_private_data(&device->vk, objectType,
objectHandle, privateDataSlot,
data);
}
-void val_GetPrivateDataEXT(
+void lvp_GetPrivateDataEXT(
VkDevice _device,
VkObjectType objectType,
uint64_t objectHandle,
VkPrivateDataSlotEXT privateDataSlot,
uint64_t* pData)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
vk_object_base_get_private_data(&device->vk, objectType, objectHandle,
privateDataSlot, pData);
}
diff --git a/src/gallium/frontends/vallium/val_entrypoints_gen.py b/src/gallium/frontends/lavapipe/lvp_entrypoints_gen.py
index c44ac6f5208..84ccd906478 100644
--- a/src/gallium/frontends/vallium/val_entrypoints_gen.py
+++ b/src/gallium/frontends/lavapipe/lvp_entrypoints_gen.py
@@ -31,23 +31,23 @@ import xml.etree.ElementTree as et
from collections import OrderedDict, namedtuple
from mako.template import Template
-from val_extensions import *
+from lvp_extensions import *
# We generate a static hash table for entry point lookup
# (vkGetProcAddress). We use a linear congruential generator for our hash
# function and a power-of-two size table. The prime numbers are determined
# experimentally.
-# We currently don't use layers in val, but keeping the ability for anv
+# We currently don't use layers in lvp, but keeping the ability for anv
# anyways, so we can use it for device groups.
LAYERS = [
- 'val'
+ 'lvp'
]
TEMPLATE_H = Template("""\
/* This file generated from ${filename}, don't edit directly. */
-struct val_instance_dispatch_table {
+struct lvp_instance_dispatch_table {
union {
void *entrypoints[${len(instance_entrypoints)}];
struct {
@@ -66,7 +66,7 @@ struct val_instance_dispatch_table {
};
};
-struct val_physical_device_dispatch_table {
+struct lvp_physical_device_dispatch_table {
union {
void *entrypoints[${len(physical_device_entrypoints)}];
struct {
@@ -85,7 +85,7 @@ struct val_physical_device_dispatch_table {
};
};
-struct val_device_dispatch_table {
+struct lvp_device_dispatch_table {
union {
void *entrypoints[${len(device_entrypoints)}];
struct {
@@ -104,12 +104,12 @@ struct val_device_dispatch_table {
};
};
-extern const struct val_instance_dispatch_table val_instance_dispatch_table;
+extern const struct lvp_instance_dispatch_table lvp_instance_dispatch_table;
%for layer in LAYERS:
-extern const struct val_physical_device_dispatch_table ${layer}_physical_device_dispatch_table;
+extern const struct lvp_physical_device_dispatch_table ${layer}_physical_device_dispatch_table;
%endfor
%for layer in LAYERS:
-extern const struct val_device_dispatch_table ${layer}_device_dispatch_table;
+extern const struct lvp_device_dispatch_table ${layer}_device_dispatch_table;
%endfor
% for e in instance_entrypoints:
@@ -119,7 +119,7 @@ extern const struct val_device_dispatch_table ${layer}_device_dispatch_table;
% if e.guard is not None:
#ifdef ${e.guard}
% endif
- ${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()});
+ ${e.return_type} ${e.prefixed_name('lvp')}(${e.decl_params()});
% if e.guard is not None:
#endif // ${e.guard}
% endif
@@ -182,7 +182,7 @@ TEMPLATE_C = Template(u"""\
/* This file generated from ${filename}, don't edit directly. */
-#include "val_private.h"
+#include "lvp_private.h"
#include "util/macros.h"
struct string_map_entry {
@@ -279,18 +279,18 @@ ${strmap(device_strmap, 'device')}
% if e.guard is not None:
#ifdef ${e.guard}
% endif
- ${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
+ ${e.return_type} ${e.prefixed_name('lvp')}(${e.decl_params()}) __attribute__ ((weak));
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
-const struct val_instance_dispatch_table val_instance_dispatch_table = {
+const struct lvp_instance_dispatch_table lvp_instance_dispatch_table = {
% for e in instance_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
% endif
- .${e.name} = ${e.prefixed_name('val')},
+ .${e.name} = ${e.prefixed_name('lvp')},
% if e.guard is not None:
#endif // ${e.guard}
% endif
@@ -304,18 +304,18 @@ const struct val_instance_dispatch_table val_instance_dispatch_table = {
% if e.guard is not None:
#ifdef ${e.guard}
% endif
- ${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
+ ${e.return_type} ${e.prefixed_name('lvp')}(${e.decl_params()}) __attribute__ ((weak));
% if e.guard is not None:
#endif // ${e.guard}
% endif
% endfor
-const struct val_physical_device_dispatch_table val_physical_device_dispatch_table = {
+const struct lvp_physical_device_dispatch_table lvp_physical_device_dispatch_table = {
% for e in physical_device_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
% endif
- .${e.name} = ${e.prefixed_name('val')},
+ .${e.name} = ${e.prefixed_name('lvp')},
% if e.guard is not None:
#endif // ${e.guard}
% endif
@@ -331,19 +331,19 @@ const struct val_physical_device_dispatch_table val_physical_device_dispatch_tab
% if e.guard is not None:
#ifdef ${e.guard}
% endif
- % if layer == 'val':
+ % if layer == 'lvp':
${e.return_type} __attribute__ ((weak))
- ${e.prefixed_name('val')}(${e.decl_params()})
+ ${e.prefixed_name('lvp')}(${e.decl_params()})
{
% if e.params[0].type == 'VkDevice':
- VAL_FROM_HANDLE(val_device, val_device, ${e.params[0].name});
- return val_device->dispatch.${e.name}(${e.call_params()});
+ LVP_FROM_HANDLE(lvp_device, lvp_device, ${e.params[0].name});
+ return lvp_device->dispatch.${e.name}(${e.call_params()});
% elif e.params[0].type == 'VkCommandBuffer':
- VAL_FROM_HANDLE(val_cmd_buffer, val_cmd_buffer, ${e.params[0].name});
- return val_cmd_buffer->device->dispatch.${e.name}(${e.call_params()});
+ LVP_FROM_HANDLE(lvp_cmd_buffer, lvp_cmd_buffer, ${e.params[0].name});
+ return lvp_cmd_buffer->device->dispatch.${e.name}(${e.call_params()});
% elif e.params[0].type == 'VkQueue':
- VAL_FROM_HANDLE(val_queue, val_queue, ${e.params[0].name});
- return val_queue->device->dispatch.${e.name}(${e.call_params()});
+ LVP_FROM_HANDLE(lvp_queue, lvp_queue, ${e.params[0].name});
+ return lvp_queue->device->dispatch.${e.name}(${e.call_params()});
% else:
assert(!"Unhandled device child trampoline case: ${e.params[0].type}");
% endif
@@ -356,7 +356,7 @@ const struct val_physical_device_dispatch_table val_physical_device_dispatch_tab
% endif
% endfor
- const struct val_device_dispatch_table ${layer}_device_dispatch_table = {
+ const struct lvp_device_dispatch_table ${layer}_device_dispatch_table = {
% for e in device_entrypoints:
% if e.guard is not None:
#ifdef ${e.guard}
@@ -376,8 +376,8 @@ const struct val_physical_device_dispatch_table val_physical_device_dispatch_tab
* If device is NULL, all device extensions are considered enabled.
*/
bool
-val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct val_instance_extension_table *instance)
+lvp_instance_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct lvp_instance_extension_table *instance)
{
switch (index) {
% for e in instance_entrypoints:
@@ -410,8 +410,8 @@ val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
* If device is NULL, all device extensions are considered enabled.
*/
bool
-val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct val_instance_extension_table *instance)
+lvp_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct lvp_instance_extension_table *instance)
{
switch (index) {
% for e in physical_device_entrypoints:
@@ -444,9 +444,9 @@ val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
* If device is NULL, all device extensions are considered enabled.
*/
bool
-val_device_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct val_instance_extension_table *instance,
- const struct val_device_extension_table *device)
+lvp_device_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct lvp_instance_extension_table *instance,
+ const struct lvp_device_extension_table *device)
{
switch (index) {
% for e in device_entrypoints:
@@ -473,61 +473,61 @@ val_device_entrypoint_is_enabled(int index, uint32_t core_version,
}
int
-val_get_instance_entrypoint_index(const char *name)
+lvp_get_instance_entrypoint_index(const char *name)
{
return instance_string_map_lookup(name);
}
int
-val_get_physical_device_entrypoint_index(const char *name)
+lvp_get_physical_device_entrypoint_index(const char *name)
{
return physical_device_string_map_lookup(name);
}
int
-val_get_device_entrypoint_index(const char *name)
+lvp_get_device_entrypoint_index(const char *name)
{
return device_string_map_lookup(name);
}
const char *
-val_get_instance_entry_name(int index)
+lvp_get_instance_entry_name(int index)
{
return instance_entry_name(index);
}
const char *
-val_get_physical_device_entry_name(int index)
+lvp_get_physical_device_entry_name(int index)
{
return physical_device_entry_name(index);
}
const char *
-val_get_device_entry_name(int index)
+lvp_get_device_entry_name(int index)
{
return device_entry_name(index);
}
static void * __attribute__ ((noinline))
-val_resolve_device_entrypoint(uint32_t index)
+lvp_resolve_device_entrypoint(uint32_t index)
{
- return val_device_dispatch_table.entrypoints[index];
+ return lvp_device_dispatch_table.entrypoints[index];
}
void *
-val_lookup_entrypoint(const char *name)
+lvp_lookup_entrypoint(const char *name)
{
- int idx = val_get_instance_entrypoint_index(name);
+ int idx = lvp_get_instance_entrypoint_index(name);
if (idx >= 0)
- return val_instance_dispatch_table.entrypoints[idx];
+ return lvp_instance_dispatch_table.entrypoints[idx];
- idx = val_get_physical_device_entrypoint_index(name);
+ idx = lvp_get_physical_device_entrypoint_index(name);
if (idx >= 0)
- return val_physical_device_dispatch_table.entrypoints[idx];
+ return lvp_physical_device_dispatch_table.entrypoints[idx];
- idx = val_get_device_entrypoint_index(name);
+ idx = lvp_get_device_entrypoint_index(name);
if (idx >= 0)
- return val_resolve_device_entrypoint(idx);
+ return lvp_resolve_device_entrypoint(idx);
return NULL;
}""", output_encoding='utf-8')
@@ -781,16 +781,16 @@ def main():
e.num = num
instance_strmap.bake()
- # For outputting entrypoints.h we generate a val_EntryPoint() prototype
+ # For outputting entrypoints.h we generate a lvp_EntryPoint() prototype
# per entry point.
try:
- with open(os.path.join(args.outdir, 'val_entrypoints.h'), 'wb') as f:
+ with open(os.path.join(args.outdir, 'lvp_entrypoints.h'), 'wb') as f:
f.write(TEMPLATE_H.render(instance_entrypoints=instance_entrypoints,
physical_device_entrypoints=physical_device_entrypoints,
device_entrypoints=device_entrypoints,
LAYERS=LAYERS,
filename=os.path.basename(__file__)))
- with open(os.path.join(args.outdir, 'val_entrypoints.c'), 'wb') as f:
+ with open(os.path.join(args.outdir, 'lvp_entrypoints.c'), 'wb') as f:
f.write(TEMPLATE_C.render(instance_entrypoints=instance_entrypoints,
physical_device_entrypoints=physical_device_entrypoints,
device_entrypoints=device_entrypoints,
diff --git a/src/gallium/frontends/vallium/val_execute.c b/src/gallium/frontends/lavapipe/lvp_execute.c
index d7f3cad96b1..419e6319874 100644
--- a/src/gallium/frontends/vallium/val_execute.c
+++ b/src/gallium/frontends/lavapipe/lvp_execute.c
@@ -23,11 +23,11 @@
/* use a gallium context to execute a command buffer */
-#include "val_private.h"
+#include "lvp_private.h"
#include "pipe/p_context.h"
#include "pipe/p_state.h"
-#include "val_conv.h"
+#include "lvp_conv.h"
#include "pipe/p_shader_tokens.h"
#include "tgsi/tgsi_text.h"
@@ -111,15 +111,15 @@ struct rendering_state {
uint8_t push_constants[128 * 4];
- struct val_render_pass *pass;
+ struct lvp_render_pass *pass;
uint32_t subpass;
- struct val_framebuffer *vk_framebuffer;
+ struct lvp_framebuffer *vk_framebuffer;
VkRect2D render_area;
uint32_t sample_mask;
unsigned min_samples;
- struct val_attachment_state *attachments;
+ struct lvp_attachment_state *attachments;
};
static void emit_compute_state(struct rendering_state *state)
@@ -312,10 +312,10 @@ static void emit_state(struct rendering_state *state)
}
}
-static void handle_compute_pipeline(struct val_cmd_buffer_entry *cmd,
+static void handle_compute_pipeline(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_pipeline *pipeline = cmd->u.pipeline.pipeline;
+ struct lvp_pipeline *pipeline = cmd->u.pipeline.pipeline;
state->dispatch_info.block[0] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.local_size[0];
state->dispatch_info.block[1] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.local_size[1];
@@ -343,10 +343,10 @@ get_viewport_xform(const VkViewport *viewport,
translate[2] = n;
}
-static void handle_graphics_pipeline(struct val_cmd_buffer_entry *cmd,
+static void handle_graphics_pipeline(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_pipeline *pipeline = cmd->u.pipeline.pipeline;
+ struct lvp_pipeline *pipeline = cmd->u.pipeline.pipeline;
bool dynamic_states[VK_DYNAMIC_STATE_STENCIL_REFERENCE+1];
unsigned fb_samples = 0;
@@ -618,21 +618,21 @@ static void handle_graphics_pipeline(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_pipeline(struct val_cmd_buffer_entry *cmd,
+static void handle_pipeline(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_pipeline *pipeline = cmd->u.pipeline.pipeline;
+ struct lvp_pipeline *pipeline = cmd->u.pipeline.pipeline;
if (pipeline->is_compute_pipeline)
handle_compute_pipeline(cmd, state);
else
handle_graphics_pipeline(cmd, state);
}
-static void handle_vertex_buffers(struct val_cmd_buffer_entry *cmd,
+static void handle_vertex_buffers(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_bind_vertex_buffers *vcb = &cmd->u.vertex_buffers;
+ struct lvp_cmd_bind_vertex_buffers *vcb = &cmd->u.vertex_buffers;
for (i = 0; i < vcb->binding_count; i++) {
int idx = i + vcb->first;
@@ -661,7 +661,7 @@ struct dyn_info {
};
static void fill_sampler(struct pipe_sampler_state *ss,
- struct val_sampler *samp)
+ struct lvp_sampler *samp)
{
ss->wrap_s = vk_conv_wrap_mode(samp->create_info.addressModeU);
ss->wrap_t = vk_conv_wrap_mode(samp->create_info.addressModeV);
@@ -708,8 +708,8 @@ static void fill_sampler_stage(struct rendering_state *state,
gl_shader_stage stage,
enum pipe_shader_type p_stage,
int array_idx,
- const struct val_descriptor *descriptor,
- const struct val_descriptor_set_binding_layout *binding)
+ const struct lvp_descriptor *descriptor,
+ const struct lvp_descriptor_set_binding_layout *binding)
{
int ss_idx = binding->stage[stage].sampler_index;
if (ss_idx == -1)
@@ -727,15 +727,15 @@ static void fill_sampler_view_stage(struct rendering_state *state,
gl_shader_stage stage,
enum pipe_shader_type p_stage,
int array_idx,
- const struct val_descriptor *descriptor,
- const struct val_descriptor_set_binding_layout *binding)
+ const struct lvp_descriptor *descriptor,
+ const struct lvp_descriptor_set_binding_layout *binding)
{
int sv_idx = binding->stage[stage].sampler_view_index;
if (sv_idx == -1)
return;
sv_idx += array_idx;
sv_idx += dyn_info->stage[stage].sampler_view_count;
- struct val_image_view *iv = descriptor->image_view;
+ struct lvp_image_view *iv = descriptor->image_view;
struct pipe_sampler_view templ;
enum pipe_format pformat;
@@ -755,9 +755,9 @@ static void fill_sampler_view_stage(struct rendering_state *state,
if (iv->view_type == VK_IMAGE_VIEW_TYPE_CUBE)
templ.target = PIPE_TEXTURE_CUBE;
templ.u.tex.first_layer = iv->subresourceRange.baseArrayLayer;
- templ.u.tex.last_layer = iv->subresourceRange.baseArrayLayer + val_get_layerCount(iv->image, &iv->subresourceRange) - 1;
+ templ.u.tex.last_layer = iv->subresourceRange.baseArrayLayer + lvp_get_layerCount(iv->image, &iv->subresourceRange) - 1;
templ.u.tex.first_level = iv->subresourceRange.baseMipLevel;
- templ.u.tex.last_level = iv->subresourceRange.baseMipLevel + val_get_levelCount(iv->image, &iv->subresourceRange) - 1;
+ templ.u.tex.last_level = iv->subresourceRange.baseMipLevel + lvp_get_levelCount(iv->image, &iv->subresourceRange) - 1;
if (iv->components.r != VK_COMPONENT_SWIZZLE_IDENTITY)
templ.swizzle_r = vk_conv_swizzle(iv->components.r);
if (iv->components.g != VK_COMPONENT_SWIZZLE_IDENTITY)
@@ -786,15 +786,15 @@ static void fill_sampler_buffer_view_stage(struct rendering_state *state,
gl_shader_stage stage,
enum pipe_shader_type p_stage,
int array_idx,
- const struct val_descriptor *descriptor,
- const struct val_descriptor_set_binding_layout *binding)
+ const struct lvp_descriptor *descriptor,
+ const struct lvp_descriptor_set_binding_layout *binding)
{
int sv_idx = binding->stage[stage].sampler_view_index;
if (sv_idx == -1)
return;
sv_idx += array_idx;
sv_idx += dyn_info->stage[stage].sampler_view_count;
- struct val_buffer_view *bv = descriptor->buffer_view;
+ struct lvp_buffer_view *bv = descriptor->buffer_view;
struct pipe_sampler_view templ;
memset(&templ, 0, sizeof(templ));
templ.target = PIPE_BUFFER;
@@ -821,10 +821,10 @@ static void fill_image_view_stage(struct rendering_state *state,
gl_shader_stage stage,
enum pipe_shader_type p_stage,
int array_idx,
- const struct val_descriptor *descriptor,
- const struct val_descriptor_set_binding_layout *binding)
+ const struct lvp_descriptor *descriptor,
+ const struct lvp_descriptor_set_binding_layout *binding)
{
- struct val_image_view *iv = descriptor->image_view;
+ struct lvp_image_view *iv = descriptor->image_view;
int idx = binding->stage[stage].image_index;
if (idx == -1)
return;
@@ -843,7 +843,7 @@ static void fill_image_view_stage(struct rendering_state *state,
state->iv[p_stage][idx].u.tex.last_layer = u_minify(iv->image->bo->depth0, iv->subresourceRange.baseMipLevel) - 1;
} else {
state->iv[p_stage][idx].u.tex.first_layer = iv->subresourceRange.baseArrayLayer;
- state->iv[p_stage][idx].u.tex.last_layer = iv->subresourceRange.baseArrayLayer + val_get_layerCount(iv->image, &iv->subresourceRange) - 1;
+ state->iv[p_stage][idx].u.tex.last_layer = iv->subresourceRange.baseArrayLayer + lvp_get_layerCount(iv->image, &iv->subresourceRange) - 1;
}
state->iv[p_stage][idx].u.tex.level = iv->subresourceRange.baseMipLevel;
if (state->num_shader_images[p_stage] <= idx)
@@ -856,10 +856,10 @@ static void fill_image_buffer_view_stage(struct rendering_state *state,
gl_shader_stage stage,
enum pipe_shader_type p_stage,
int array_idx,
- const struct val_descriptor *descriptor,
- const struct val_descriptor_set_binding_layout *binding)
+ const struct lvp_descriptor *descriptor,
+ const struct lvp_descriptor_set_binding_layout *binding)
{
- struct val_buffer_view *bv = descriptor->buffer_view;
+ struct lvp_buffer_view *bv = descriptor->buffer_view;
int idx = binding->stage[stage].image_index;
if (idx == -1)
return;
@@ -876,11 +876,11 @@ static void fill_image_buffer_view_stage(struct rendering_state *state,
static void handle_descriptor(struct rendering_state *state,
struct dyn_info *dyn_info,
- const struct val_descriptor_set_binding_layout *binding,
+ const struct lvp_descriptor_set_binding_layout *binding,
gl_shader_stage stage,
enum pipe_shader_type p_stage,
int array_idx,
- const struct val_descriptor *descriptor)
+ const struct lvp_descriptor *descriptor)
{
bool is_dynamic = descriptor->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
descriptor->type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
@@ -961,14 +961,14 @@ static void handle_descriptor(struct rendering_state *state,
static void handle_set_stage(struct rendering_state *state,
struct dyn_info *dyn_info,
- const struct val_descriptor_set *set,
+ const struct lvp_descriptor_set *set,
gl_shader_stage stage,
enum pipe_shader_type p_stage)
{
int j;
for (j = 0; j < set->layout->binding_count; j++) {
- const struct val_descriptor_set_binding_layout *binding;
- const struct val_descriptor *descriptor;
+ const struct lvp_descriptor_set_binding_layout *binding;
+ const struct lvp_descriptor *descriptor;
binding = &set->layout->binding[j];
if (binding->valid) {
@@ -981,7 +981,7 @@ static void handle_set_stage(struct rendering_state *state,
}
static void increment_dyn_info(struct dyn_info *dyn_info,
- struct val_descriptor_set_layout *layout, bool inc_dyn)
+ struct lvp_descriptor_set_layout *layout, bool inc_dyn)
{
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
dyn_info->stage[stage].const_buffer_count += layout->stage[stage].const_buffer_count;
@@ -994,18 +994,18 @@ static void increment_dyn_info(struct dyn_info *dyn_info,
dyn_info->dyn_index += layout->dynamic_offset_count;
}
-static void handle_compute_descriptor_sets(struct val_cmd_buffer_entry *cmd,
+static void handle_compute_descriptor_sets(struct lvp_cmd_buffer_entry *cmd,
struct dyn_info *dyn_info,
struct rendering_state *state)
{
- struct val_cmd_bind_descriptor_sets *bds = &cmd->u.descriptor_sets;
+ struct lvp_cmd_bind_descriptor_sets *bds = &cmd->u.descriptor_sets;
int i;
for (i = 0; i < bds->first; i++) {
increment_dyn_info(dyn_info, bds->layout->set[i].layout, false);
}
for (i = 0; i < bds->count; i++) {
- const struct val_descriptor_set *set = bds->sets[i];
+ const struct lvp_descriptor_set *set = bds->sets[i];
if (set->layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT)
handle_set_stage(state, dyn_info, set, MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE);
@@ -1013,10 +1013,10 @@ static void handle_compute_descriptor_sets(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_descriptor_sets(struct val_cmd_buffer_entry *cmd,
+static void handle_descriptor_sets(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_bind_descriptor_sets *bds = &cmd->u.descriptor_sets;
+ struct lvp_cmd_bind_descriptor_sets *bds = &cmd->u.descriptor_sets;
int i;
struct dyn_info dyn_info;
@@ -1035,7 +1035,7 @@ static void handle_descriptor_sets(struct val_cmd_buffer_entry *cmd,
}
for (i = 0; i < bds->count; i++) {
- const struct val_descriptor_set *set = bds->sets[i];
+ const struct lvp_descriptor_set *set = bds->sets[i];
if (set->layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
handle_set_stage(state, &dyn_info, set, MESA_SHADER_VERTEX, PIPE_SHADER_VERTEX);
@@ -1056,7 +1056,7 @@ static void handle_descriptor_sets(struct val_cmd_buffer_entry *cmd,
}
static void add_img_view_surface(struct rendering_state *state,
- struct val_image_view *imgv, VkFormat format, int width, int height)
+ struct lvp_image_view *imgv, VkFormat format, int width, int height)
{
if (!imgv->surface) {
struct pipe_surface template;
@@ -1067,7 +1067,7 @@ static void add_img_view_surface(struct rendering_state *state,
template.width = width;
template.height = height;
template.u.tex.first_layer = imgv->subresourceRange.baseArrayLayer;
- template.u.tex.last_layer = imgv->subresourceRange.baseArrayLayer + val_get_layerCount(imgv->image, &imgv->subresourceRange) - 1;
+ template.u.tex.last_layer = imgv->subresourceRange.baseArrayLayer + lvp_get_layerCount(imgv->image, &imgv->subresourceRange) - 1;
template.u.tex.level = imgv->subresourceRange.baseMipLevel;
if (template.format == PIPE_FORMAT_NONE)
@@ -1089,7 +1089,7 @@ static bool
subpass_needs_clear(struct rendering_state *state)
{
uint32_t a;
- struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+ struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
for (uint32_t i = 0; i < subpass->color_count; i++) {
a = subpass->color_attachments[i].attachment;
if (attachment_needs_clear(state, a))
@@ -1105,7 +1105,7 @@ subpass_needs_clear(struct rendering_state *state)
static void render_subpass_clear(struct rendering_state *state)
{
- struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+ struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
if (!subpass_needs_clear(state))
return;
@@ -1116,8 +1116,8 @@ static void render_subpass_clear(struct rendering_state *state)
if (!attachment_needs_clear(state, a))
continue;
- struct val_render_pass_attachment *att = &state->pass->attachments[a];
- struct val_image_view *imgv = state->vk_framebuffer->attachments[a];
+ struct lvp_render_pass_attachment *att = &state->pass->attachments[a];
+ struct lvp_image_view *imgv = state->vk_framebuffer->attachments[a];
add_img_view_surface(state, imgv, att->format, state->framebuffer.width, state->framebuffer.height);
@@ -1143,8 +1143,8 @@ static void render_subpass_clear(struct rendering_state *state)
if (!attachment_needs_clear(state, ds))
return;
- struct val_render_pass_attachment *att = &state->pass->attachments[ds];
- struct val_image_view *imgv = state->vk_framebuffer->attachments[ds];
+ struct lvp_render_pass_attachment *att = &state->pass->attachments[ds];
+ struct lvp_image_view *imgv = state->vk_framebuffer->attachments[ds];
add_img_view_surface(state, imgv, att->format, state->framebuffer.width, state->framebuffer.height);
@@ -1179,18 +1179,18 @@ static void render_subpass_clear(struct rendering_state *state)
static void render_pass_resolve(struct rendering_state *state)
{
- struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
+ struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
if (!subpass->has_color_resolve)
return;
for (uint32_t i = 0; i < subpass->color_count; i++) {
- struct val_subpass_attachment src_att = subpass->color_attachments[i];
- struct val_subpass_attachment dst_att = subpass->resolve_attachments[i];
+ struct lvp_subpass_attachment src_att = subpass->color_attachments[i];
+ struct lvp_subpass_attachment dst_att = subpass->resolve_attachments[i];
if (dst_att.attachment == VK_ATTACHMENT_UNUSED)
continue;
- struct val_image_view *src_imgv = state->vk_framebuffer->attachments[src_att.attachment];
- struct val_image_view *dst_imgv = state->vk_framebuffer->attachments[dst_att.attachment];
+ struct lvp_image_view *src_imgv = state->vk_framebuffer->attachments[src_att.attachment];
+ struct lvp_image_view *dst_imgv = state->vk_framebuffer->attachments[dst_att.attachment];
struct pipe_blit_info info;
memset(&info, 0, sizeof(info));
@@ -1222,11 +1222,11 @@ static void begin_render_subpass(struct rendering_state *state,
state->framebuffer.nr_cbufs = 0;
- struct val_subpass *subpass = &state->pass->subpasses[subpass_idx];
+ struct lvp_subpass *subpass = &state->pass->subpasses[subpass_idx];
for (unsigned i = 0; i < subpass->color_count; i++) {
- struct val_subpass_attachment *color_att = &subpass->color_attachments[i];
+ struct lvp_subpass_attachment *color_att = &subpass->color_attachments[i];
if (color_att->attachment != VK_ATTACHMENT_UNUSED) {
- struct val_image_view *imgv = state->vk_framebuffer->attachments[color_att->attachment];
+ struct lvp_image_view *imgv = state->vk_framebuffer->attachments[color_att->attachment];
add_img_view_surface(state, imgv, state->pass->attachments[color_att->attachment].format, state->framebuffer.width, state->framebuffer.height);
state->framebuffer.cbufs[state->framebuffer.nr_cbufs] = imgv->surface;
@@ -1236,10 +1236,10 @@ static void begin_render_subpass(struct rendering_state *state,
}
if (subpass->depth_stencil_attachment) {
- struct val_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
+ struct lvp_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
if (ds_att->attachment != VK_ATTACHMENT_UNUSED) {
- struct val_image_view *imgv = state->vk_framebuffer->attachments[ds_att->attachment];
+ struct lvp_image_view *imgv = state->vk_framebuffer->attachments[ds_att->attachment];
add_img_view_surface(state, imgv, state->pass->attachments[ds_att->attachment].format, state->framebuffer.width, state->framebuffer.height);
state->framebuffer.zsbuf = imgv->surface;
}
@@ -1249,7 +1249,7 @@ static void begin_render_subpass(struct rendering_state *state,
&state->framebuffer);
}
-static void handle_begin_render_pass(struct val_cmd_buffer_entry *cmd,
+static void handle_begin_render_pass(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->pass = cmd->u.begin_render_pass.render_pass;
@@ -1265,7 +1265,7 @@ static void handle_begin_render_pass(struct val_cmd_buffer_entry *cmd,
begin_render_subpass(state, 0);
}
-static void handle_end_render_pass(struct val_cmd_buffer_entry *cmd,
+static void handle_end_render_pass(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->pctx->flush(state->pctx, NULL, 0);
@@ -1277,7 +1277,7 @@ static void handle_end_render_pass(struct val_cmd_buffer_entry *cmd,
state->subpass = 0;
}
-static void handle_next_subpass(struct val_cmd_buffer_entry *cmd,
+static void handle_next_subpass(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->pctx->flush(state->pctx, NULL, 0);
@@ -1286,7 +1286,7 @@ static void handle_next_subpass(struct val_cmd_buffer_entry *cmd,
begin_render_subpass(state, state->subpass);
}
-static void handle_draw(struct val_cmd_buffer_entry *cmd,
+static void handle_draw(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->info.index_size = 0;
@@ -1299,7 +1299,7 @@ static void handle_draw(struct val_cmd_buffer_entry *cmd,
state->pctx->draw_vbo(state->pctx, &state->info);
}
-static void handle_set_viewport(struct val_cmd_buffer_entry *cmd,
+static void handle_set_viewport(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
@@ -1312,7 +1312,7 @@ static void handle_set_viewport(struct val_cmd_buffer_entry *cmd,
state->vp_dirty = true;
}
-static void handle_set_scissor(struct val_cmd_buffer_entry *cmd,
+static void handle_set_scissor(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
@@ -1328,14 +1328,14 @@ static void handle_set_scissor(struct val_cmd_buffer_entry *cmd,
state->scissor_dirty = true;
}
-static void handle_set_line_width(struct val_cmd_buffer_entry *cmd,
+static void handle_set_line_width(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->rs_state.line_width = cmd->u.set_line_width.line_width;
state->rs_dirty = true;
}
-static void handle_set_depth_bias(struct val_cmd_buffer_entry *cmd,
+static void handle_set_depth_bias(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->rs_state.offset_units = cmd->u.set_depth_bias.constant_factor;
@@ -1344,14 +1344,14 @@ static void handle_set_depth_bias(struct val_cmd_buffer_entry *cmd,
state->rs_dirty = true;
}
-static void handle_set_blend_constants(struct val_cmd_buffer_entry *cmd,
+static void handle_set_blend_constants(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
memcpy(state->blend_color.color, cmd->u.set_blend_constants.blend_constants, 4 * sizeof(float));
state->blend_color_dirty = true;
}
-static void handle_set_depth_bounds(struct val_cmd_buffer_entry *cmd,
+static void handle_set_depth_bounds(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->dsa_state.depth.bounds_min = cmd->u.set_depth_bounds.min_depth;
@@ -1359,7 +1359,7 @@ static void handle_set_depth_bounds(struct val_cmd_buffer_entry *cmd,
state->dsa_dirty = true;
}
-static void handle_set_stencil_compare_mask(struct val_cmd_buffer_entry *cmd,
+static void handle_set_stencil_compare_mask(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_FRONT_BIT)
@@ -1369,7 +1369,7 @@ static void handle_set_stencil_compare_mask(struct val_cmd_buffer_entry *cmd,
state->dsa_dirty = true;
}
-static void handle_set_stencil_write_mask(struct val_cmd_buffer_entry *cmd,
+static void handle_set_stencil_write_mask(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_FRONT_BIT)
@@ -1379,7 +1379,7 @@ static void handle_set_stencil_write_mask(struct val_cmd_buffer_entry *cmd,
state->dsa_dirty = true;
}
-static void handle_set_stencil_reference(struct val_cmd_buffer_entry *cmd,
+static void handle_set_stencil_reference(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
if (cmd->u.stencil_vals.face_mask & VK_STENCIL_FACE_FRONT_BIT)
@@ -1499,11 +1499,11 @@ copy_depth_box(ubyte *dst,
}
}
-static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry *cmd,
+static void handle_copy_image_to_buffer(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_copy_image_to_buffer *copycmd = &cmd->u.img_to_buffer;
+ struct lvp_cmd_copy_image_to_buffer *copycmd = &cmd->u.img_to_buffer;
struct pipe_box box, dbox;
struct pipe_transfer *src_t, *dst_t;
ubyte *src_data, *dst_data;
@@ -1578,11 +1578,11 @@ static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry *cmd,
+static void handle_copy_buffer_to_image(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_copy_buffer_to_image *copycmd = &cmd->u.buffer_to_img;
+ struct lvp_cmd_copy_buffer_to_image *copycmd = &cmd->u.buffer_to_img;
struct pipe_box box, sbox;
struct pipe_transfer *src_t, *dst_t;
void *src_data, *dst_data;
@@ -1660,11 +1660,11 @@ static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_copy_image(struct val_cmd_buffer_entry *cmd,
+static void handle_copy_image(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_copy_image *copycmd = &cmd->u.copy_image;
+ struct lvp_cmd_copy_image *copycmd = &cmd->u.copy_image;
state->pctx->flush(state->pctx, NULL, 0);
@@ -1688,11 +1688,11 @@ static void handle_copy_image(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_copy_buffer(struct val_cmd_buffer_entry *cmd,
+static void handle_copy_buffer(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_copy_buffer *copycmd = &cmd->u.copy_buffer;
+ struct lvp_cmd_copy_buffer *copycmd = &cmd->u.copy_buffer;
for (i = 0; i < copycmd->region_count; i++) {
struct pipe_box box = { 0 };
@@ -1703,11 +1703,11 @@ static void handle_copy_buffer(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_blit_image(struct val_cmd_buffer_entry *cmd,
+static void handle_blit_image(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_blit_image *blitcmd = &cmd->u.blit_image;
+ struct lvp_cmd_blit_image *blitcmd = &cmd->u.blit_image;
struct pipe_blit_info info;
memset(&info, 0, sizeof(info));
@@ -1773,10 +1773,10 @@ static void handle_blit_image(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_fill_buffer(struct val_cmd_buffer_entry *cmd,
+static void handle_fill_buffer(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_fill_buffer *fillcmd = &cmd->u.fill_buffer;
+ struct lvp_cmd_fill_buffer *fillcmd = &cmd->u.fill_buffer;
uint32_t *dst;
struct pipe_transfer *dst_t;
struct pipe_box box;
@@ -1798,10 +1798,10 @@ static void handle_fill_buffer(struct val_cmd_buffer_entry *cmd,
state->pctx->transfer_unmap(state->pctx, dst_t);
}
-static void handle_update_buffer(struct val_cmd_buffer_entry *cmd,
+static void handle_update_buffer(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_update_buffer *updcmd = &cmd->u.update_buffer;
+ struct lvp_cmd_update_buffer *updcmd = &cmd->u.update_buffer;
uint32_t *dst;
struct pipe_transfer *dst_t;
struct pipe_box box;
@@ -1818,7 +1818,7 @@ static void handle_update_buffer(struct val_cmd_buffer_entry *cmd,
state->pctx->transfer_unmap(state->pctx, dst_t);
}
-static void handle_draw_indexed(struct val_cmd_buffer_entry *cmd,
+static void handle_draw_indexed(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->info.indirect = NULL;
@@ -1842,7 +1842,7 @@ static void handle_draw_indexed(struct val_cmd_buffer_entry *cmd,
state->pctx->draw_vbo(state->pctx, &state->info);
}
-static void handle_draw_indirect(struct val_cmd_buffer_entry *cmd,
+static void handle_draw_indirect(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state, bool indexed)
{
if (indexed) {
@@ -1859,10 +1859,10 @@ static void handle_draw_indirect(struct val_cmd_buffer_entry *cmd,
state->pctx->draw_vbo(state->pctx, &state->info);
}
-static void handle_index_buffer(struct val_cmd_buffer_entry *cmd,
+static void handle_index_buffer(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_bind_index_buffer *ib = &cmd->u.index_buffer;
+ struct lvp_cmd_bind_index_buffer *ib = &cmd->u.index_buffer;
switch (ib->index_type) {
case VK_INDEX_TYPE_UINT16:
state->index_size = 2;
@@ -1882,7 +1882,7 @@ static void handle_index_buffer(struct val_cmd_buffer_entry *cmd,
state->ib_dirty = true;
}
-static void handle_dispatch(struct val_cmd_buffer_entry *cmd,
+static void handle_dispatch(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->dispatch_info.grid[0] = cmd->u.dispatch.x;
@@ -1892,7 +1892,7 @@ static void handle_dispatch(struct val_cmd_buffer_entry *cmd,
state->pctx->launch_grid(state->pctx, &state->dispatch_info);
}
-static void handle_dispatch_indirect(struct val_cmd_buffer_entry *cmd,
+static void handle_dispatch_indirect(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
state->dispatch_info.indirect = cmd->u.dispatch_indirect.buffer->bo;
@@ -1900,7 +1900,7 @@ static void handle_dispatch_indirect(struct val_cmd_buffer_entry *cmd,
state->pctx->launch_grid(state->pctx, &state->dispatch_info);
}
-static void handle_push_constants(struct val_cmd_buffer_entry *cmd,
+static void handle_push_constants(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
memcpy(state->push_constants + cmd->u.push_constants.offset, cmd->u.push_constants.val, cmd->u.push_constants.size);
@@ -1931,50 +1931,50 @@ static void handle_push_constants(struct val_cmd_buffer_entry *cmd,
state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = true;
}
-static void val_execute_cmd_buffer(struct val_cmd_buffer *cmd_buffer,
+static void lvp_execute_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer,
struct rendering_state *state);
-static void handle_execute_commands(struct val_cmd_buffer_entry *cmd,
+static void handle_execute_commands(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
for (unsigned i = 0; i < cmd->u.execute_commands.command_buffer_count; i++) {
- struct val_cmd_buffer *secondary_buf = cmd->u.execute_commands.cmd_buffers[i];
- val_execute_cmd_buffer(secondary_buf, state);
+ struct lvp_cmd_buffer *secondary_buf = cmd->u.execute_commands.cmd_buffers[i];
+ lvp_execute_cmd_buffer(secondary_buf, state);
}
}
-static void handle_event_set(struct val_cmd_buffer_entry *cmd,
+static void handle_event_set(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_event *event = cmd->u.event_set.event;
+ struct lvp_event *event = cmd->u.event_set.event;
if (cmd->u.event_set.flush)
state->pctx->flush(state->pctx, NULL, 0);
event->event_storage = (cmd->u.event_set.value == true) ? 1 : 0;
}
-static void handle_wait_events(struct val_cmd_buffer_entry *cmd,
+static void handle_wait_events(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
for (unsigned i = 0; i < cmd->u.wait_events.event_count; i++) {
- struct val_event *event = cmd->u.wait_events.events[i];
+ struct lvp_event *event = cmd->u.wait_events.events[i];
while (event->event_storage != true);
}
}
-static void handle_pipeline_barrier(struct val_cmd_buffer_entry *cmd,
+static void handle_pipeline_barrier(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
/* why hello nail, I'm a hammer. - TODO */
state->pctx->flush(state->pctx, NULL, 0);
}
-static void handle_begin_query(struct val_cmd_buffer_entry *cmd,
+static void handle_begin_query(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_query_cmd *qcmd = &cmd->u.query;
- struct val_query_pool *pool = qcmd->pool;
+ struct lvp_cmd_query_cmd *qcmd = &cmd->u.query;
+ struct lvp_query_pool *pool = qcmd->pool;
if (!pool->queries[qcmd->query]) {
enum pipe_query_type qtype = pool->base_type;
@@ -1987,21 +1987,21 @@ static void handle_begin_query(struct val_cmd_buffer_entry *cmd,
state->pctx->begin_query(state->pctx, pool->queries[qcmd->query]);
}
-static void handle_end_query(struct val_cmd_buffer_entry *cmd,
+static void handle_end_query(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_query_cmd *qcmd = &cmd->u.query;
- struct val_query_pool *pool = qcmd->pool;
+ struct lvp_cmd_query_cmd *qcmd = &cmd->u.query;
+ struct lvp_query_pool *pool = qcmd->pool;
assert(pool->queries[qcmd->query]);
state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
}
-static void handle_reset_query_pool(struct val_cmd_buffer_entry *cmd,
+static void handle_reset_query_pool(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_query_cmd *qcmd = &cmd->u.query;
- struct val_query_pool *pool = qcmd->pool;
+ struct lvp_cmd_query_cmd *qcmd = &cmd->u.query;
+ struct lvp_query_pool *pool = qcmd->pool;
for (unsigned i = qcmd->query; i < qcmd->query + qcmd->index; i++) {
if (pool->queries[i]) {
state->pctx->destroy_query(state->pctx, pool->queries[i]);
@@ -2010,11 +2010,11 @@ static void handle_reset_query_pool(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_write_timestamp(struct val_cmd_buffer_entry *cmd,
+static void handle_write_timestamp(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_query_cmd *qcmd = &cmd->u.query;
- struct val_query_pool *pool = qcmd->pool;
+ struct lvp_cmd_query_cmd *qcmd = &cmd->u.query;
+ struct lvp_query_pool *pool = qcmd->pool;
if (!pool->queries[qcmd->query]) {
pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
PIPE_QUERY_TIMESTAMP, 0);
@@ -2026,11 +2026,11 @@ static void handle_write_timestamp(struct val_cmd_buffer_entry *cmd,
}
-static void handle_copy_query_pool_results(struct val_cmd_buffer_entry *cmd,
+static void handle_copy_query_pool_results(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_cmd_copy_query_pool_results *copycmd = &cmd->u.copy_query_pool_results;
- struct val_query_pool *pool = copycmd->pool;
+ struct lvp_cmd_copy_query_pool_results *copycmd = &cmd->u.copy_query_pool_results;
+ struct lvp_query_pool *pool = copycmd->pool;
for (unsigned i = copycmd->first_query; i < copycmd->first_query + copycmd->query_count; i++) {
unsigned offset = copycmd->dst->offset + (copycmd->stride * (i - copycmd->first_query));
@@ -2102,10 +2102,10 @@ static void pack_clear_color(enum pipe_format pformat, VkClearColorValue *in_val
}
}
-static void handle_clear_color_image(struct val_cmd_buffer_entry *cmd,
+static void handle_clear_color_image(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_image *image = cmd->u.clear_color_image.image;
+ struct lvp_image *image = cmd->u.clear_color_image.image;
uint32_t col_val[4];
pack_clear_color(image->bo->format, &cmd->u.clear_color_image.clear_val, col_val);
for (unsigned i = 0; i < cmd->u.clear_color_image.range_count; i++) {
@@ -2115,7 +2115,7 @@ static void handle_clear_color_image(struct val_cmd_buffer_entry *cmd,
box.y = 0;
box.z = 0;
- uint32_t level_count = val_get_levelCount(image, range);
+ uint32_t level_count = lvp_get_levelCount(image, range);
for (unsigned j = range->baseMipLevel; j < range->baseMipLevel + level_count; j++) {
box.width = u_minify(image->bo->width0, j);
box.height = u_minify(image->bo->height0, j);
@@ -2124,11 +2124,11 @@ static void handle_clear_color_image(struct val_cmd_buffer_entry *cmd,
box.depth = u_minify(image->bo->depth0, j);
else if (image->bo->target == PIPE_TEXTURE_1D_ARRAY) {
box.y = range->baseArrayLayer;
- box.height = val_get_layerCount(image, range);
+ box.height = lvp_get_layerCount(image, range);
box.depth = 1;
} else {
box.z = range->baseArrayLayer;
- box.depth = val_get_layerCount(image, range);
+ box.depth = lvp_get_layerCount(image, range);
}
state->pctx->clear_texture(state->pctx, image->bo,
@@ -2137,10 +2137,10 @@ static void handle_clear_color_image(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_clear_ds_image(struct val_cmd_buffer_entry *cmd,
+static void handle_clear_ds_image(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
- struct val_image *image = cmd->u.clear_ds_image.image;
+ struct lvp_image *image = cmd->u.clear_ds_image.image;
uint64_t col_val;
col_val = util_pack64_z_stencil(image->bo->format, cmd->u.clear_ds_image.clear_val.depth, cmd->u.clear_ds_image.clear_val.stencil);
for (unsigned i = 0; i < cmd->u.clear_ds_image.range_count; i++) {
@@ -2150,7 +2150,7 @@ static void handle_clear_ds_image(struct val_cmd_buffer_entry *cmd,
box.y = 0;
box.z = 0;
- uint32_t level_count = val_get_levelCount(image, range);
+ uint32_t level_count = lvp_get_levelCount(image, range);
for (unsigned j = range->baseMipLevel; j < range->baseMipLevel + level_count; j++) {
box.width = u_minify(image->bo->width0, j);
box.height = u_minify(image->bo->height0, j);
@@ -2159,11 +2159,11 @@ static void handle_clear_ds_image(struct val_cmd_buffer_entry *cmd,
box.depth = u_minify(image->bo->depth0, j);
else if (image->bo->target == PIPE_TEXTURE_1D_ARRAY) {
box.y = range->baseArrayLayer;
- box.height = val_get_layerCount(image, range);
+ box.height = lvp_get_layerCount(image, range);
box.depth = 1;
} else {
box.z = range->baseArrayLayer;
- box.depth = val_get_layerCount(image, range);
+ box.depth = lvp_get_layerCount(image, range);
}
state->pctx->clear_texture(state->pctx, image->bo,
@@ -2172,21 +2172,21 @@ static void handle_clear_ds_image(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_clear_attachments(struct val_cmd_buffer_entry *cmd,
+static void handle_clear_attachments(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
for (uint32_t a = 0; a < cmd->u.clear_attachments.attachment_count; a++) {
VkClearAttachment *att = &cmd->u.clear_attachments.attachments[a];
- struct val_subpass *subpass = &state->pass->subpasses[state->subpass];
- struct val_image_view *imgv;
+ struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
+ struct lvp_image_view *imgv;
if (att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
- struct val_subpass_attachment *color_att = &subpass->color_attachments[att->colorAttachment];
+ struct lvp_subpass_attachment *color_att = &subpass->color_attachments[att->colorAttachment];
if (!color_att || color_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
imgv = state->vk_framebuffer->attachments[color_att->attachment];
} else {
- struct val_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
+ struct lvp_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
if (!ds_att || ds_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
imgv = state->vk_framebuffer->attachments[ds_att->attachment];
@@ -2214,11 +2214,11 @@ static void handle_clear_attachments(struct val_cmd_buffer_entry *cmd,
}
}
-static void handle_resolve_image(struct val_cmd_buffer_entry *cmd,
+static void handle_resolve_image(struct lvp_cmd_buffer_entry *cmd,
struct rendering_state *state)
{
int i;
- struct val_cmd_resolve_image *resolvecmd = &cmd->u.resolve_image;
+ struct lvp_cmd_resolve_image *resolvecmd = &cmd->u.resolve_image;
struct pipe_blit_info info;
memset(&info, 0, sizeof(info));
@@ -2263,157 +2263,157 @@ static void handle_resolve_image(struct val_cmd_buffer_entry *cmd,
}
}
-static void val_execute_cmd_buffer(struct val_cmd_buffer *cmd_buffer,
+static void lvp_execute_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer,
struct rendering_state *state)
{
- struct val_cmd_buffer_entry *cmd;
+ struct lvp_cmd_buffer_entry *cmd;
LIST_FOR_EACH_ENTRY(cmd, &cmd_buffer->cmds, cmd_link) {
switch (cmd->cmd_type) {
- case VAL_CMD_BIND_PIPELINE:
+ case LVP_CMD_BIND_PIPELINE:
handle_pipeline(cmd, state);
break;
- case VAL_CMD_SET_VIEWPORT:
+ case LVP_CMD_SET_VIEWPORT:
handle_set_viewport(cmd, state);
break;
- case VAL_CMD_SET_SCISSOR:
+ case LVP_CMD_SET_SCISSOR:
handle_set_scissor(cmd, state);
break;
- case VAL_CMD_SET_LINE_WIDTH:
+ case LVP_CMD_SET_LINE_WIDTH:
handle_set_line_width(cmd, state);
break;
- case VAL_CMD_SET_DEPTH_BIAS:
+ case LVP_CMD_SET_DEPTH_BIAS:
handle_set_depth_bias(cmd, state);
break;
- case VAL_CMD_SET_BLEND_CONSTANTS:
+ case LVP_CMD_SET_BLEND_CONSTANTS:
handle_set_blend_constants(cmd, state);
break;
- case VAL_CMD_SET_DEPTH_BOUNDS:
+ case LVP_CMD_SET_DEPTH_BOUNDS:
handle_set_depth_bounds(cmd, state);
break;
- case VAL_CMD_SET_STENCIL_COMPARE_MASK:
+ case LVP_CMD_SET_STENCIL_COMPARE_MASK:
handle_set_stencil_compare_mask(cmd, state);
break;
- case VAL_CMD_SET_STENCIL_WRITE_MASK:
+ case LVP_CMD_SET_STENCIL_WRITE_MASK:
handle_set_stencil_write_mask(cmd, state);
break;
- case VAL_CMD_SET_STENCIL_REFERENCE:
+ case LVP_CMD_SET_STENCIL_REFERENCE:
handle_set_stencil_reference(cmd, state);
break;
- case VAL_CMD_BIND_DESCRIPTOR_SETS:
+ case LVP_CMD_BIND_DESCRIPTOR_SETS:
handle_descriptor_sets(cmd, state);
break;
- case VAL_CMD_BIND_INDEX_BUFFER:
+ case LVP_CMD_BIND_INDEX_BUFFER:
handle_index_buffer(cmd, state);
break;
- case VAL_CMD_BIND_VERTEX_BUFFERS:
+ case LVP_CMD_BIND_VERTEX_BUFFERS:
handle_vertex_buffers(cmd, state);
break;
- case VAL_CMD_DRAW:
+ case LVP_CMD_DRAW:
emit_state(state);
handle_draw(cmd, state);
break;
- case VAL_CMD_DRAW_INDEXED:
+ case LVP_CMD_DRAW_INDEXED:
emit_state(state);
handle_draw_indexed(cmd, state);
break;
- case VAL_CMD_DRAW_INDIRECT:
+ case LVP_CMD_DRAW_INDIRECT:
emit_state(state);
handle_draw_indirect(cmd, state, false);
break;
- case VAL_CMD_DRAW_INDEXED_INDIRECT:
+ case LVP_CMD_DRAW_INDEXED_INDIRECT:
emit_state(state);
handle_draw_indirect(cmd, state, true);
break;
- case VAL_CMD_DISPATCH:
+ case LVP_CMD_DISPATCH:
emit_compute_state(state);
handle_dispatch(cmd, state);
break;
- case VAL_CMD_DISPATCH_INDIRECT:
+ case LVP_CMD_DISPATCH_INDIRECT:
emit_compute_state(state);
handle_dispatch_indirect(cmd, state);
break;
- case VAL_CMD_COPY_BUFFER:
+ case LVP_CMD_COPY_BUFFER:
handle_copy_buffer(cmd, state);
break;
- case VAL_CMD_COPY_IMAGE:
+ case LVP_CMD_COPY_IMAGE:
handle_copy_image(cmd, state);
break;
- case VAL_CMD_BLIT_IMAGE:
+ case LVP_CMD_BLIT_IMAGE:
handle_blit_image(cmd, state);
break;
- case VAL_CMD_COPY_BUFFER_TO_IMAGE:
+ case LVP_CMD_COPY_BUFFER_TO_IMAGE:
handle_copy_buffer_to_image(cmd, state);
break;
- case VAL_CMD_COPY_IMAGE_TO_BUFFER:
+ case LVP_CMD_COPY_IMAGE_TO_BUFFER:
handle_copy_image_to_buffer(cmd, state);
break;
- case VAL_CMD_UPDATE_BUFFER:
+ case LVP_CMD_UPDATE_BUFFER:
handle_update_buffer(cmd, state);
break;
- case VAL_CMD_FILL_BUFFER:
+ case LVP_CMD_FILL_BUFFER:
handle_fill_buffer(cmd, state);
break;
- case VAL_CMD_CLEAR_COLOR_IMAGE:
+ case LVP_CMD_CLEAR_COLOR_IMAGE:
handle_clear_color_image(cmd, state);
break;
- case VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE:
+ case LVP_CMD_CLEAR_DEPTH_STENCIL_IMAGE:
handle_clear_ds_image(cmd, state);
break;
- case VAL_CMD_CLEAR_ATTACHMENTS:
+ case LVP_CMD_CLEAR_ATTACHMENTS:
handle_clear_attachments(cmd, state);
break;
- case VAL_CMD_RESOLVE_IMAGE:
+ case LVP_CMD_RESOLVE_IMAGE:
handle_resolve_image(cmd, state);
break;
- case VAL_CMD_SET_EVENT:
- case VAL_CMD_RESET_EVENT:
+ case LVP_CMD_SET_EVENT:
+ case LVP_CMD_RESET_EVENT:
handle_event_set(cmd, state);
break;
- case VAL_CMD_WAIT_EVENTS:
+ case LVP_CMD_WAIT_EVENTS:
handle_wait_events(cmd, state);
break;
- case VAL_CMD_PIPELINE_BARRIER:
+ case LVP_CMD_PIPELINE_BARRIER:
handle_pipeline_barrier(cmd, state);
break;
- case VAL_CMD_BEGIN_QUERY:
+ case LVP_CMD_BEGIN_QUERY:
handle_begin_query(cmd, state);
break;
- case VAL_CMD_END_QUERY:
+ case LVP_CMD_END_QUERY:
handle_end_query(cmd, state);
break;
- case VAL_CMD_RESET_QUERY_POOL:
+ case LVP_CMD_RESET_QUERY_POOL:
handle_reset_query_pool(cmd, state);
break;
- case VAL_CMD_WRITE_TIMESTAMP:
+ case LVP_CMD_WRITE_TIMESTAMP:
handle_write_timestamp(cmd, state);
break;
- case VAL_CMD_COPY_QUERY_POOL_RESULTS:
+ case LVP_CMD_COPY_QUERY_POOL_RESULTS:
handle_copy_query_pool_results(cmd, state);
break;
- case VAL_CMD_PUSH_CONSTANTS:
+ case LVP_CMD_PUSH_CONSTANTS:
handle_push_constants(cmd, state);
break;
- case VAL_CMD_BEGIN_RENDER_PASS:
+ case LVP_CMD_BEGIN_RENDER_PASS:
handle_begin_render_pass(cmd, state);
break;
- case VAL_CMD_NEXT_SUBPASS:
+ case LVP_CMD_NEXT_SUBPASS:
handle_next_subpass(cmd, state);
break;
- case VAL_CMD_END_RENDER_PASS:
+ case LVP_CMD_END_RENDER_PASS:
handle_end_render_pass(cmd, state);
break;
- case VAL_CMD_EXECUTE_COMMANDS:
+ case LVP_CMD_EXECUTE_COMMANDS:
handle_execute_commands(cmd, state);
break;
}
}
}
-VkResult val_execute_cmds(struct val_device *device,
- struct val_queue *queue,
- struct val_fence *fence,
- struct val_cmd_buffer *cmd_buffer)
+VkResult lvp_execute_cmds(struct lvp_device *device,
+ struct lvp_queue *queue,
+ struct lvp_fence *fence,
+ struct lvp_cmd_buffer *cmd_buffer)
{
struct rendering_state state;
struct pipe_fence_handle *handle = NULL;
@@ -2423,7 +2423,7 @@ VkResult val_execute_cmds(struct val_device *device,
state.dsa_dirty = true;
state.rs_dirty = true;
/* create a gallium context */
- val_execute_cmd_buffer(cmd_buffer, &state);
+ lvp_execute_cmd_buffer(cmd_buffer, &state);
state.pctx->flush(state.pctx, fence ? &handle : NULL, 0);
if (fence) {
diff --git a/src/gallium/frontends/vallium/val_extensions.py b/src/gallium/frontends/lavapipe/lvp_extensions.py
index bb6cacf7466..ca0441c8e1b 100644
--- a/src/gallium/frontends/vallium/val_extensions.py
+++ b/src/gallium/frontends/lavapipe/lvp_extensions.py
@@ -79,10 +79,10 @@ EXTENSIONS = [
Extension('VK_KHR_get_display_properties2', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_KHR_get_memory_requirements2', 1, True),
Extension('VK_KHR_get_physical_device_properties2', 1, True),
- Extension('VK_KHR_get_surface_capabilities2', 1, 'VAL_HAS_SURFACE'),
+ Extension('VK_KHR_get_surface_capabilities2', 1, 'LVP_HAS_SURFACE'),
Extension('VK_KHR_image_format_list', 1, False),
Extension('VK_KHR_imageless_framebuffer', 1, False),
- Extension('VK_KHR_incremental_present', 1, 'VAL_HAS_SURFACE'),
+ Extension('VK_KHR_incremental_present', 1, 'LVP_HAS_SURFACE'),
Extension('VK_KHR_maintenance1', 1, True),
Extension('VK_KHR_maintenance2', 1, False),
Extension('VK_KHR_maintenance3', 1, False),
@@ -95,9 +95,9 @@ EXTENSIONS = [
Extension('VK_KHR_shader_draw_parameters', 1, False),
Extension('VK_KHR_shader_float16_int8', 1, False),
Extension('VK_KHR_storage_buffer_storage_class', 1, True),
- Extension('VK_KHR_surface', 25, 'VAL_HAS_SURFACE'),
- Extension('VK_KHR_surface_protected_capabilities', 1, 'VAL_HAS_SURFACE'),
- Extension('VK_KHR_swapchain', 68, 'VAL_HAS_SURFACE'),
+ Extension('VK_KHR_surface', 25, 'LVP_HAS_SURFACE'),
+ Extension('VK_KHR_surface_protected_capabilities', 1, 'LVP_HAS_SURFACE'),
+ Extension('VK_KHR_swapchain', 68, 'LVP_HAS_SURFACE'),
Extension('VK_KHR_uniform_buffer_standard_layout', 1, False),
Extension('VK_KHR_variable_pointers', 1, False),
Extension('VK_KHR_wayland_surface', 6, 'VK_USE_PLATFORM_WAYLAND_KHR'),
@@ -163,4 +163,4 @@ if __name__ == '__main__':
dest='xml_files')
args = parser.parse_args()
- gen_extensions('val', args.xml_files, API_VERSIONS, MAX_API_VERSION, EXTENSIONS, args.out_c, args.out_h)
+ gen_extensions('lvp', args.xml_files, API_VERSIONS, MAX_API_VERSION, EXTENSIONS, args.out_c, args.out_h)
diff --git a/src/gallium/frontends/vallium/val_formats.c b/src/gallium/frontends/lavapipe/lvp_formats.c
index f0f96206059..c05863dec4e 100644
--- a/src/gallium/frontends/vallium/val_formats.c
+++ b/src/gallium/frontends/lavapipe/lvp_formats.c
@@ -21,7 +21,7 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "util/format/u_format.h"
#include "util/u_math.h"
#define COMMON_NAME(x) [VK_FORMAT_##x] = PIPE_FORMAT_##x
@@ -148,7 +148,7 @@ enum pipe_format vk_format_to_pipe(VkFormat format)
}
static void
-val_physical_device_get_format_properties(struct val_physical_device *physical_device,
+lvp_physical_device_get_format_properties(struct lvp_physical_device *physical_device,
VkFormat format,
VkFormatProperties *out_properties)
{
@@ -231,30 +231,30 @@ val_physical_device_get_format_properties(struct val_physical_device *physical_d
return;
}
-void val_GetPhysicalDeviceFormatProperties(
+void lvp_GetPhysicalDeviceFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties* pFormatProperties)
{
- VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
- val_physical_device_get_format_properties(physical_device,
+ lvp_physical_device_get_format_properties(physical_device,
format,
pFormatProperties);
}
-void val_GetPhysicalDeviceFormatProperties2(
+void lvp_GetPhysicalDeviceFormatProperties2(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties2* pFormatProperties)
{
- VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
- val_physical_device_get_format_properties(physical_device,
+ lvp_physical_device_get_format_properties(physical_device,
format,
&pFormatProperties->formatProperties);
}
-static VkResult val_get_image_format_properties(struct val_physical_device *physical_device,
+static VkResult lvp_get_image_format_properties(struct lvp_physical_device *physical_device,
const VkPhysicalDeviceImageFormatInfo2 *info,
VkImageFormatProperties *pImageFormatProperties)
{
@@ -265,7 +265,7 @@ static VkResult val_get_image_format_properties(struct val_physical_device *phys
uint32_t maxArraySize;
VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
enum pipe_format pformat = vk_format_to_pipe(info->format);
- val_physical_device_get_format_properties(physical_device, info->format,
+ lvp_physical_device_get_format_properties(physical_device, info->format,
&format_props);
if (info->tiling == VK_IMAGE_TILING_LINEAR) {
format_feature_flags = format_props.linearTilingFeatures;
@@ -377,7 +377,7 @@ static VkResult val_get_image_format_properties(struct val_physical_device *phys
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
-VkResult val_GetPhysicalDeviceImageFormatProperties(
+VkResult lvp_GetPhysicalDeviceImageFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
@@ -386,7 +386,7 @@ VkResult val_GetPhysicalDeviceImageFormatProperties(
VkImageCreateFlags createFlags,
VkImageFormatProperties* pImageFormatProperties)
{
- VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceImageFormatInfo2 info = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
@@ -398,18 +398,18 @@ VkResult val_GetPhysicalDeviceImageFormatProperties(
.flags = createFlags,
};
- return val_get_image_format_properties(physical_device, &info,
+ return lvp_get_image_format_properties(physical_device, &info,
pImageFormatProperties);
}
-VkResult val_GetPhysicalDeviceImageFormatProperties2(
+VkResult lvp_GetPhysicalDeviceImageFormatProperties2(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props)
{
- VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VkResult result;
- result = val_get_image_format_properties(physical_device, base_info,
+ result = lvp_get_image_format_properties(physical_device, base_info,
&base_props->imageFormatProperties);
if (result != VK_SUCCESS)
return result;
@@ -417,7 +417,7 @@ VkResult val_GetPhysicalDeviceImageFormatProperties2(
return VK_SUCCESS;
}
-void val_GetPhysicalDeviceSparseImageFormatProperties(
+void lvp_GetPhysicalDeviceSparseImageFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageType type,
@@ -431,7 +431,7 @@ void val_GetPhysicalDeviceSparseImageFormatProperties(
*pNumProperties = 0;
}
-void val_GetPhysicalDeviceSparseImageFormatProperties2(
+void lvp_GetPhysicalDeviceSparseImageFormatProperties2(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo,
uint32_t *pPropertyCount,
diff --git a/src/gallium/frontends/vallium/val_image.c b/src/gallium/frontends/lavapipe/lvp_image.c
index b40e5b9dccb..39c63aa1b05 100644
--- a/src/gallium/frontends/vallium/val_image.c
+++ b/src/gallium/frontends/lavapipe/lvp_image.c
@@ -21,20 +21,20 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "util/format/u_format.h"
#include "util/u_inlines.h"
#include "pipe/p_state.h"
VkResult
-val_image_create(VkDevice _device,
- const struct val_image_create_info *create_info,
+lvp_image_create(VkDevice _device,
+ const struct lvp_image_create_info *create_info,
const VkAllocationCallbacks* alloc,
VkImage *pImage)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
- struct val_image *image;
+ struct lvp_image *image;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
@@ -81,19 +81,19 @@ val_image_create(VkDevice _device,
&template,
&image->size);
}
- *pImage = val_image_to_handle(image);
+ *pImage = lvp_image_to_handle(image);
return VK_SUCCESS;
}
VkResult
-val_CreateImage(VkDevice device,
+lvp_CreateImage(VkDevice device,
const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkImage *pImage)
{
- return val_image_create(device,
- &(struct val_image_create_info) {
+ return lvp_image_create(device,
+ &(struct lvp_image_create_info) {
.vk_info = pCreateInfo,
.bind_flags = 0,
},
@@ -102,11 +102,11 @@ val_CreateImage(VkDevice device,
}
void
-val_DestroyImage(VkDevice _device, VkImage _image,
+lvp_DestroyImage(VkDevice _device, VkImage _image,
const VkAllocationCallbacks *pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_image, image, _image);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_image, image, _image);
if (!_image)
return;
@@ -116,14 +116,14 @@ val_DestroyImage(VkDevice _device, VkImage _image,
}
VkResult
-val_CreateImageView(VkDevice _device,
+lvp_CreateImageView(VkDevice _device,
const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkImageView *pView)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_image, image, pCreateInfo->image);
- struct val_image_view *view;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_image, image, pCreateInfo->image);
+ struct lvp_image_view *view;
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -139,17 +139,17 @@ val_CreateImageView(VkDevice _device,
view->subresourceRange = pCreateInfo->subresourceRange;
view->image = image;
view->surface = NULL;
- *pView = val_image_view_to_handle(view);
+ *pView = lvp_image_view_to_handle(view);
return VK_SUCCESS;
}
void
-val_DestroyImageView(VkDevice _device, VkImageView _iview,
+lvp_DestroyImageView(VkDevice _device, VkImageView _iview,
const VkAllocationCallbacks *pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_image_view, iview, _iview);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_image_view, iview, _iview);
if (!_iview)
return;
@@ -159,14 +159,14 @@ val_DestroyImageView(VkDevice _device, VkImageView _iview,
vk_free2(&device->alloc, pAllocator, iview);
}
-void val_GetImageSubresourceLayout(
+void lvp_GetImageSubresourceLayout(
VkDevice _device,
VkImage _image,
const VkImageSubresource* pSubresource,
VkSubresourceLayout* pLayout)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_image, image, _image);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_image, image, _image);
uint32_t stride, offset;
device->pscreen->resource_get_info(device->pscreen,
image->bo,
@@ -187,14 +187,14 @@ void val_GetImageSubresourceLayout(
}
}
-VkResult val_CreateBuffer(
+VkResult lvp_CreateBuffer(
VkDevice _device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_buffer *buffer;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_buffer *buffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
@@ -231,18 +231,18 @@ VkResult val_CreateBuffer(
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
- *pBuffer = val_buffer_to_handle(buffer);
+ *pBuffer = lvp_buffer_to_handle(buffer);
return VK_SUCCESS;
}
-void val_DestroyBuffer(
+void lvp_DestroyBuffer(
VkDevice _device,
VkBuffer _buffer,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
if (!_buffer)
return;
@@ -253,14 +253,14 @@ void val_DestroyBuffer(
}
VkResult
-val_CreateBufferView(VkDevice _device,
+lvp_CreateBufferView(VkDevice _device,
const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkBufferView *pView)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_buffer, buffer, pCreateInfo->buffer);
- struct val_buffer_view *view;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_buffer, buffer, pCreateInfo->buffer);
+ struct lvp_buffer_view *view;
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!view)
@@ -273,17 +273,17 @@ val_CreateBufferView(VkDevice _device,
view->pformat = vk_format_to_pipe(pCreateInfo->format);
view->offset = pCreateInfo->offset;
view->range = pCreateInfo->range;
- *pView = val_buffer_view_to_handle(view);
+ *pView = lvp_buffer_view_to_handle(view);
return VK_SUCCESS;
}
void
-val_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
+lvp_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_buffer_view, view, bufferView);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_buffer_view, view, bufferView);
if (!bufferView)
return;
diff --git a/src/gallium/frontends/vallium/val_lower_input_attachments.c b/src/gallium/frontends/lavapipe/lvp_lower_input_attachments.c
index 532f71df29c..98d4e2a1204 100644
--- a/src/gallium/frontends/vallium/val_lower_input_attachments.c
+++ b/src/gallium/frontends/lavapipe/lvp_lower_input_attachments.c
@@ -23,7 +23,7 @@
#include "nir.h"
#include "nir_builder.h"
-#include "val_lower_vulkan_resource.h"
+#include "lvp_lower_vulkan_resource.h"
static nir_ssa_def *
load_frag_coord(nir_builder *b)
@@ -80,7 +80,7 @@ try_lower_input_load(nir_function_impl *impl, nir_intrinsic_instr *load,
}
bool
-val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval)
+lvp_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
bool progress = false;
diff --git a/src/gallium/frontends/vallium/val_lower_vulkan_resource.c b/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c
index 131aa9ce7ff..221e84135a3 100644
--- a/src/gallium/frontends/vallium/val_lower_vulkan_resource.c
+++ b/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c
@@ -21,10 +21,10 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "nir.h"
#include "nir_builder.h"
-#include "val_lower_vulkan_resource.h"
+#include "lvp_lower_vulkan_resource.h"
static bool
lower_vulkan_resource_index(const nir_instr *instr, const void *data_cb)
@@ -53,8 +53,8 @@ static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
unsigned desc_set_idx = nir_intrinsic_desc_set(intrin);
unsigned binding_idx = nir_intrinsic_binding(intrin);
- struct val_pipeline_layout *layout = data_cb;
- struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
+ struct lvp_pipeline_layout *layout = data_cb;
+ struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
int value = 0;
bool is_ubo = (binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
@@ -103,7 +103,7 @@ static nir_ssa_def *lower_vri_intrin_lvd(struct nir_builder *b,
static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
nir_tex_src_type deref_src_type,
gl_shader_stage stage,
- struct val_pipeline_layout *layout)
+ struct lvp_pipeline_layout *layout)
{
int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
@@ -115,7 +115,7 @@ static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
unsigned desc_set_idx = var->data.descriptor_set;
unsigned binding_idx = var->data.binding;
int value = 0;
- struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
+ struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
nir_tex_instr_remove_src(tex, deref_src_idx);
for (unsigned s = 0; s < desc_set_idx; s++) {
if (deref_src_type == nir_tex_src_sampler_deref)
@@ -148,7 +148,7 @@ static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
static void lower_vri_instr_tex(struct nir_builder *b,
nir_tex_instr *tex, void *data_cb)
{
- struct val_pipeline_layout *layout = data_cb;
+ struct lvp_pipeline_layout *layout = data_cb;
int tex_value = 0;
lower_vri_instr_tex_deref(tex, nir_tex_src_sampler_deref, b->shader->info.stage, layout);
@@ -192,8 +192,8 @@ static nir_ssa_def *lower_vri_instr(struct nir_builder *b,
return NULL;
}
-void val_lower_pipeline_layout(const struct val_device *device,
- struct val_pipeline_layout *layout,
+void lvp_lower_pipeline_layout(const struct lvp_device *device,
+ struct lvp_pipeline_layout *layout,
nir_shader *shader)
{
nir_shader_lower_instructions(shader, lower_vulkan_resource_index, lower_vri_instr, layout);
@@ -203,7 +203,7 @@ void val_lower_pipeline_layout(const struct val_device *device,
glsl_get_base_type(glsl_without_array(type));
unsigned desc_set_idx = var->data.descriptor_set;
unsigned binding_idx = var->data.binding;
- struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
+ struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
int value = 0;
var->data.descriptor_set = 0;
if (base_type == GLSL_TYPE_SAMPLER) {
diff --git a/src/gallium/frontends/vallium/val_lower_vulkan_resource.h b/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.h
index d325d7c1b5b..af3aab72eec 100644
--- a/src/gallium/frontends/vallium/val_lower_vulkan_resource.h
+++ b/src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.h
@@ -21,16 +21,16 @@
* IN THE SOFTWARE.
*/
-#ifndef VAL_LOWER_VULKAN_RESOURCE_H
-#define VAL_LOWER_VULKAN_RESOURCE_H
+#ifndef LVP_LOWER_VULKAN_RESOURCE_H
+#define LVP_LOWER_VULKAN_RESOURCE_H
-struct val_pipeline_layout;
-struct val_device;
-void val_lower_pipeline_layout(const struct val_device *device,
- struct val_pipeline_layout *layout,
+struct lvp_pipeline_layout;
+struct lvp_device;
+void lvp_lower_pipeline_layout(const struct lvp_device *device,
+ struct lvp_pipeline_layout *layout,
nir_shader *shader);
bool
-val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
+lvp_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
#endif
diff --git a/src/gallium/frontends/vallium/val_pass.c b/src/gallium/frontends/lavapipe/lvp_pass.c
index a2f9cb4d108..83f671d575a 100644
--- a/src/gallium/frontends/vallium/val_pass.c
+++ b/src/gallium/frontends/lavapipe/lvp_pass.c
@@ -21,21 +21,21 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
static void
-val_render_pass_compile(struct val_render_pass *pass)
+lvp_render_pass_compile(struct lvp_render_pass *pass)
{
for (uint32_t i = 0; i < pass->subpass_count; i++) {
- struct val_subpass *subpass = &pass->subpasses[i];
+ struct lvp_subpass *subpass = &pass->subpasses[i];
for (uint32_t j = 0; j < subpass->attachment_count; j++) {
- struct val_subpass_attachment *subpass_att =
+ struct lvp_subpass_attachment *subpass_att =
&subpass->attachments[j];
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
- struct val_render_pass_attachment *pass_att =
+ struct lvp_render_pass_attachment *pass_att =
&pass->attachments[subpass_att->attachment];
pass_att->first_subpass_idx = UINT32_MAX;
@@ -43,7 +43,7 @@ val_render_pass_compile(struct val_render_pass *pass)
}
for (uint32_t i = 0; i < pass->subpass_count; i++) {
- struct val_subpass *subpass = &pass->subpasses[i];
+ struct lvp_subpass *subpass = &pass->subpasses[i];
uint32_t color_sample_count = 1, depth_sample_count = 1;
/* We don't allow depth_stencil_attachment to be non-NULL and
@@ -60,12 +60,12 @@ val_render_pass_compile(struct val_render_pass *pass)
subpass->ds_resolve_attachment = NULL;
for (uint32_t j = 0; j < subpass->attachment_count; j++) {
- struct val_subpass_attachment *subpass_att =
+ struct lvp_subpass_attachment *subpass_att =
&subpass->attachments[j];
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
- struct val_render_pass_attachment *pass_att =
+ struct lvp_render_pass_attachment *pass_att =
&pass->attachments[subpass_att->attachment];
if (i < pass_att->first_subpass_idx)
@@ -75,14 +75,14 @@ val_render_pass_compile(struct val_render_pass *pass)
subpass->has_color_att = false;
for (uint32_t j = 0; j < subpass->color_count; j++) {
- struct val_subpass_attachment *subpass_att =
+ struct lvp_subpass_attachment *subpass_att =
&subpass->color_attachments[j];
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
subpass->has_color_att = true;
- struct val_render_pass_attachment *pass_att =
+ struct lvp_render_pass_attachment *pass_att =
&pass->attachments[subpass_att->attachment];
color_sample_count = pass_att->samples;
@@ -91,7 +91,7 @@ val_render_pass_compile(struct val_render_pass *pass)
if (subpass->depth_stencil_attachment) {
const uint32_t a =
subpass->depth_stencil_attachment->attachment;
- struct val_render_pass_attachment *pass_att =
+ struct lvp_render_pass_attachment *pass_att =
&pass->attachments[a];
depth_sample_count = pass_att->samples;
}
@@ -103,7 +103,7 @@ val_render_pass_compile(struct val_render_pass *pass)
subpass->has_color_resolve = false;
if (subpass->resolve_attachments) {
for (uint32_t j = 0; j < subpass->color_count; j++) {
- struct val_subpass_attachment *resolve_att =
+ struct lvp_subpass_attachment *resolve_att =
&subpass->resolve_attachments[j];
if (resolve_att->attachment == VK_ATTACHMENT_UNUSED)
@@ -134,7 +134,7 @@ val_render_pass_compile(struct val_render_pass *pass)
}
static unsigned
-val_num_subpass_attachments(const VkSubpassDescription *desc)
+lvp_num_subpass_attachments(const VkSubpassDescription *desc)
{
return desc->inputAttachmentCount +
desc->colorAttachmentCount +
@@ -142,14 +142,14 @@ val_num_subpass_attachments(const VkSubpassDescription *desc)
(desc->pDepthStencilAttachment != NULL);
}
-VkResult val_CreateRenderPass(
+VkResult lvp_CreateRenderPass(
VkDevice _device,
const VkRenderPassCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkRenderPass* pRenderPass)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_render_pass *pass;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_render_pass *pass;
size_t size;
size_t attachments_offset;
@@ -166,7 +166,7 @@ VkResult val_CreateRenderPass(
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
/* Clear the subpasses along with the parent pass. This required because
- * each array member of val_subpass must be a valid pointer if not NULL.
+ * each array member of lvp_subpass must be a valid pointer if not NULL.
*/
memset(pass, 0, size);
@@ -177,7 +177,7 @@ VkResult val_CreateRenderPass(
pass->attachments = (void *) pass + attachments_offset;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
- struct val_render_pass_attachment *att = &pass->attachments[i];
+ struct lvp_render_pass_attachment *att = &pass->attachments[i];
att->format = pCreateInfo->pAttachments[i].format;
att->samples = pCreateInfo->pAttachments[i].samples;
@@ -189,13 +189,13 @@ VkResult val_CreateRenderPass(
uint32_t subpass_attachment_count = 0;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
- subpass_attachment_count += val_num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
+ subpass_attachment_count += lvp_num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
}
if (subpass_attachment_count) {
pass->subpass_attachments =
vk_alloc2(&device->alloc, pAllocator,
- subpass_attachment_count * sizeof(struct val_subpass_attachment), 8,
+ subpass_attachment_count * sizeof(struct lvp_subpass_attachment), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
vk_free2(&device->alloc, pAllocator, pass);
@@ -204,14 +204,14 @@ VkResult val_CreateRenderPass(
} else
pass->subpass_attachments = NULL;
- struct val_subpass_attachment *p = pass->subpass_attachments;
+ struct lvp_subpass_attachment *p = pass->subpass_attachments;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
- struct val_subpass *subpass = &pass->subpasses[i];
+ struct lvp_subpass *subpass = &pass->subpasses[i];
subpass->input_count = desc->inputAttachmentCount;
subpass->color_count = desc->colorAttachmentCount;
- subpass->attachment_count = val_num_subpass_attachments(desc);
+ subpass->attachment_count = lvp_num_subpass_attachments(desc);
subpass->attachments = p;
if (desc->inputAttachmentCount > 0) {
@@ -219,7 +219,7 @@ VkResult val_CreateRenderPass(
p += desc->inputAttachmentCount;
for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
- subpass->input_attachments[j] = (struct val_subpass_attachment) {
+ subpass->input_attachments[j] = (struct lvp_subpass_attachment) {
.attachment = desc->pInputAttachments[j].attachment,
.layout = desc->pInputAttachments[j].layout,
};
@@ -231,7 +231,7 @@ VkResult val_CreateRenderPass(
p += desc->colorAttachmentCount;
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
- subpass->color_attachments[j] = (struct val_subpass_attachment) {
+ subpass->color_attachments[j] = (struct lvp_subpass_attachment) {
.attachment = desc->pColorAttachments[j].attachment,
.layout = desc->pColorAttachments[j].layout,
};
@@ -243,7 +243,7 @@ VkResult val_CreateRenderPass(
p += desc->colorAttachmentCount;
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
- subpass->resolve_attachments[j] = (struct val_subpass_attachment) {
+ subpass->resolve_attachments[j] = (struct lvp_subpass_attachment) {
.attachment = desc->pResolveAttachments[j].attachment,
.layout = desc->pResolveAttachments[j].layout,
};
@@ -253,26 +253,26 @@ VkResult val_CreateRenderPass(
if (desc->pDepthStencilAttachment) {
subpass->depth_stencil_attachment = p++;
- *subpass->depth_stencil_attachment = (struct val_subpass_attachment) {
+ *subpass->depth_stencil_attachment = (struct lvp_subpass_attachment) {
.attachment = desc->pDepthStencilAttachment->attachment,
.layout = desc->pDepthStencilAttachment->layout,
};
}
}
- val_render_pass_compile(pass);
- *pRenderPass = val_render_pass_to_handle(pass);
+ lvp_render_pass_compile(pass);
+ *pRenderPass = lvp_render_pass_to_handle(pass);
return VK_SUCCESS;
}
-void val_DestroyRenderPass(
+void lvp_DestroyRenderPass(
VkDevice _device,
VkRenderPass _pass,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_render_pass, pass, _pass);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_render_pass, pass, _pass);
if (!_pass)
return;
@@ -281,7 +281,7 @@ void val_DestroyRenderPass(
vk_free2(&device->alloc, pAllocator, pass);
}
-void val_GetRenderAreaGranularity(
+void lvp_GetRenderAreaGranularity(
VkDevice device,
VkRenderPass renderPass,
VkExtent2D* pGranularity)
diff --git a/src/gallium/frontends/vallium/val_pipeline.c b/src/gallium/frontends/lavapipe/lvp_pipeline.c
index dc0465dd895..fb9a8918a4b 100644
--- a/src/gallium/frontends/vallium/val_pipeline.c
+++ b/src/gallium/frontends/lavapipe/lvp_pipeline.c
@@ -21,25 +21,25 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "glsl_types.h"
#include "spirv/nir_spirv.h"
#include "nir/nir_builder.h"
-#include "val_lower_vulkan_resource.h"
+#include "lvp_lower_vulkan_resource.h"
#include "pipe/p_state.h"
#include "pipe/p_context.h"
#define SPIR_V_MAGIC_NUMBER 0x07230203
-VkResult val_CreateShaderModule(
+VkResult lvp_CreateShaderModule(
VkDevice _device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_shader_module *module;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_shader_module *module;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
@@ -55,19 +55,19 @@ VkResult val_CreateShaderModule(
module->size = pCreateInfo->codeSize;
memcpy(module->data, pCreateInfo->pCode, module->size);
- *pShaderModule = val_shader_module_to_handle(module);
+ *pShaderModule = lvp_shader_module_to_handle(module);
return VK_SUCCESS;
}
-void val_DestroyShaderModule(
+void lvp_DestroyShaderModule(
VkDevice _device,
VkShaderModule _module,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_shader_module, module, _module);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_shader_module, module, _module);
if (!_module)
return;
@@ -75,13 +75,13 @@ void val_DestroyShaderModule(
vk_free2(&device->alloc, pAllocator, module);
}
-void val_DestroyPipeline(
+void lvp_DestroyPipeline(
VkDevice _device,
VkPipeline _pipeline,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_pipeline, pipeline, _pipeline);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
@@ -463,8 +463,8 @@ shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
})
static void
-val_shader_compile_to_ir(struct val_pipeline *pipeline,
- struct val_shader_module *module,
+lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
+ struct lvp_shader_module *module,
const char *entrypoint_name,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
@@ -508,7 +508,7 @@ val_shader_compile_to_ir(struct val_pipeline *pipeline,
}
}
}
- struct val_device *pdevice = pipeline->device;
+ struct lvp_device *pdevice = pipeline->device;
const struct spirv_to_nir_options spirv_options = {
.environment = NIR_SPIRV_VULKAN,
.caps = {
@@ -559,14 +559,14 @@ val_shader_compile_to_ir(struct val_pipeline *pipeline,
nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
if (stage == MESA_SHADER_FRAGMENT)
- val_lower_input_attachments(nir, false);
+ lvp_lower_input_attachments(nir, false);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
nir_remove_dead_variables(nir, nir_var_uniform, NULL);
- val_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
+ lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_split_var_copies);
@@ -633,7 +633,7 @@ val_shader_compile_to_ir(struct val_pipeline *pipeline,
pipeline->pipeline_nir[stage] = nir;
}
-static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct val_pipeline *pipeline)
+static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct lvp_pipeline *pipeline)
{
state->type = PIPE_SHADER_IR_NIR;
state->ir.nir = pipeline->pipeline_nir[stage];
@@ -679,7 +679,7 @@ merge_tess_info(struct shader_info *tes_info,
}
static gl_shader_stage
-val_shader_stage(VkShaderStageFlagBits stage)
+lvp_shader_stage(VkShaderStageFlagBits stage)
{
switch (stage) {
case VK_SHADER_STAGE_VERTEX_BIT:
@@ -701,10 +701,10 @@ val_shader_stage(VkShaderStageFlagBits stage)
}
static VkResult
-val_pipeline_compile(struct val_pipeline *pipeline,
+lvp_pipeline_compile(struct lvp_pipeline *pipeline,
gl_shader_stage stage)
{
- struct val_device *device = pipeline->device;
+ struct lvp_device *device = pipeline->device;
device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage], true);
if (stage == MESA_SHADER_COMPUTE) {
struct pipe_compute_state shstate = {};
@@ -740,16 +740,16 @@ val_pipeline_compile(struct val_pipeline *pipeline,
}
static VkResult
-val_graphics_pipeline_init(struct val_pipeline *pipeline,
- struct val_device *device,
- struct val_pipeline_cache *cache,
+lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
+ struct lvp_device *device,
+ struct lvp_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc)
{
if (alloc == NULL)
alloc = &device->alloc;
pipeline->device = device;
- pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
+ pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
pipeline->force_min_sample = false;
/* recreate createinfo */
@@ -757,10 +757,10 @@ val_graphics_pipeline_init(struct val_pipeline *pipeline,
pipeline->is_compute_pipeline = false;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
- VAL_FROM_HANDLE(val_shader_module, module,
+ LVP_FROM_HANDLE(lvp_shader_module, module,
pCreateInfo->pStages[i].module);
- gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
- val_shader_compile_to_ir(pipeline, module,
+ gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
+ lvp_shader_compile_to_ir(pipeline, module,
pCreateInfo->pStages[i].pName,
stage,
pCreateInfo->pStages[i].pSpecializationInfo);
@@ -781,8 +781,8 @@ val_graphics_pipeline_init(struct val_pipeline *pipeline,
bool has_fragment_shader = false;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
- gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
- val_pipeline_compile(pipeline, stage);
+ gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
+ lvp_pipeline_compile(pipeline, stage);
if (stage == MESA_SHADER_FRAGMENT)
has_fragment_shader = true;
}
@@ -804,16 +804,16 @@ val_graphics_pipeline_init(struct val_pipeline *pipeline,
}
static VkResult
-val_graphics_pipeline_create(
+lvp_graphics_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
- struct val_pipeline *pipeline;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
+ struct lvp_pipeline *pipeline;
VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
@@ -825,19 +825,19 @@ val_graphics_pipeline_create(
vk_object_base_init(&device->vk, &pipeline->base,
VK_OBJECT_TYPE_PIPELINE);
- result = val_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
+ result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
pAllocator);
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, pipeline);
return result;
}
- *pPipeline = val_pipeline_to_handle(pipeline);
+ *pPipeline = lvp_pipeline_to_handle(pipeline);
return VK_SUCCESS;
}
-VkResult val_CreateGraphicsPipelines(
+VkResult lvp_CreateGraphicsPipelines(
VkDevice _device,
VkPipelineCache pipelineCache,
uint32_t count,
@@ -850,7 +850,7 @@ VkResult val_CreateGraphicsPipelines(
for (; i < count; i++) {
VkResult r;
- r = val_graphics_pipeline_create(_device,
+ r = lvp_graphics_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
pAllocator, &pPipelines[i]);
@@ -864,42 +864,42 @@ VkResult val_CreateGraphicsPipelines(
}
static VkResult
-val_compute_pipeline_init(struct val_pipeline *pipeline,
- struct val_device *device,
- struct val_pipeline_cache *cache,
+lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
+ struct lvp_device *device,
+ struct lvp_pipeline_cache *cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc)
{
- VAL_FROM_HANDLE(val_shader_module, module,
+ LVP_FROM_HANDLE(lvp_shader_module, module,
pCreateInfo->stage.module);
if (alloc == NULL)
alloc = &device->alloc;
pipeline->device = device;
- pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
+ pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
pipeline->force_min_sample = false;
deep_copy_compute_create_info(&pipeline->compute_create_info, pCreateInfo);
pipeline->is_compute_pipeline = true;
- val_shader_compile_to_ir(pipeline, module,
+ lvp_shader_compile_to_ir(pipeline, module,
pCreateInfo->stage.pName,
MESA_SHADER_COMPUTE,
pCreateInfo->stage.pSpecializationInfo);
- val_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
+ lvp_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
return VK_SUCCESS;
}
static VkResult
-val_compute_pipeline_create(
+lvp_compute_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
- struct val_pipeline *pipeline;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
+ struct lvp_pipeline *pipeline;
VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
@@ -911,19 +911,19 @@ val_compute_pipeline_create(
vk_object_base_init(&device->vk, &pipeline->base,
VK_OBJECT_TYPE_PIPELINE);
- result = val_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
+ result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
pAllocator);
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, pipeline);
return result;
}
- *pPipeline = val_pipeline_to_handle(pipeline);
+ *pPipeline = lvp_pipeline_to_handle(pipeline);
return VK_SUCCESS;
}
-VkResult val_CreateComputePipelines(
+VkResult lvp_CreateComputePipelines(
VkDevice _device,
VkPipelineCache pipelineCache,
uint32_t count,
@@ -936,7 +936,7 @@ VkResult val_CreateComputePipelines(
for (; i < count; i++) {
VkResult r;
- r = val_compute_pipeline_create(_device,
+ r = lvp_compute_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
pAllocator, &pPipelines[i]);
diff --git a/src/gallium/frontends/vallium/val_pipeline_cache.c b/src/gallium/frontends/lavapipe/lvp_pipeline_cache.c
index b0e519fdf8b..1f48186ff46 100644
--- a/src/gallium/frontends/vallium/val_pipeline_cache.c
+++ b/src/gallium/frontends/lavapipe/lvp_pipeline_cache.c
@@ -21,16 +21,16 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
-VkResult val_CreatePipelineCache(
+VkResult lvp_CreatePipelineCache(
VkDevice _device,
const VkPipelineCacheCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineCache* pPipelineCache)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_pipeline_cache *cache;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_pipeline_cache *cache;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
@@ -49,27 +49,27 @@ VkResult val_CreatePipelineCache(
cache->alloc = device->alloc;
cache->device = device;
- *pPipelineCache = val_pipeline_cache_to_handle(cache);
+ *pPipelineCache = lvp_pipeline_cache_to_handle(cache);
return VK_SUCCESS;
}
-void val_DestroyPipelineCache(
+void lvp_DestroyPipelineCache(
VkDevice _device,
VkPipelineCache _cache,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
if (!_cache)
return;
-// val_pipeline_cache_finish(cache);
+// lvp_pipeline_cache_finish(cache);
vk_object_base_finish(&cache->base);
vk_free2(&device->alloc, pAllocator, cache);
}
-VkResult val_GetPipelineCacheData(
+VkResult lvp_GetPipelineCacheData(
VkDevice _device,
VkPipelineCache _cache,
size_t* pDataSize,
@@ -86,14 +86,14 @@ VkResult val_GetPipelineCacheData(
hdr[1] = 1;
hdr[2] = VK_VENDOR_ID_MESA;
hdr[3] = 0;
- val_device_get_cache_uuid(&hdr[4]);
+ lvp_device_get_cache_uuid(&hdr[4]);
}
} else
*pDataSize = 32;
return result;
}
-VkResult val_MergePipelineCaches(
+VkResult lvp_MergePipelineCaches(
VkDevice _device,
VkPipelineCache destCache,
uint32_t srcCacheCount,
diff --git a/src/gallium/frontends/vallium/val_private.h b/src/gallium/frontends/lavapipe/lvp_private.h
index 7a41c9f3818..ff1edcddd4a 100644
--- a/src/gallium/frontends/vallium/val_private.h
+++ b/src/gallium/frontends/lavapipe/lvp_private.h
@@ -50,8 +50,8 @@ typedef uint32_t xcb_window_t;
#include <vulkan/vulkan.h>
#include <vulkan/vk_icd.h>
-#include "val_extensions.h"
-#include "val_entrypoints.h"
+#include "lvp_extensions.h"
+#include "lvp_entrypoints.h"
#include "vk_object.h"
#include "wsi_common.h"
@@ -64,119 +64,119 @@ extern "C" {
#define MAX_SETS 8
#define MAX_PUSH_CONSTANTS_SIZE 128
-#define val_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
+#define lvp_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
#define typed_memcpy(dest, src, count) ({ \
memcpy((dest), (src), (count) * sizeof(*(src))); \
})
-int val_get_instance_entrypoint_index(const char *name);
-int val_get_device_entrypoint_index(const char *name);
-int val_get_physical_device_entrypoint_index(const char *name);
+int lvp_get_instance_entrypoint_index(const char *name);
+int lvp_get_device_entrypoint_index(const char *name);
+int lvp_get_physical_device_entrypoint_index(const char *name);
-const char *val_get_instance_entry_name(int index);
-const char *val_get_physical_device_entry_name(int index);
-const char *val_get_device_entry_name(int index);
+const char *lvp_get_instance_entry_name(int index);
+const char *lvp_get_physical_device_entry_name(int index);
+const char *lvp_get_device_entry_name(int index);
-bool val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct val_instance_extension_table *instance);
-bool val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct val_instance_extension_table *instance);
-bool val_device_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct val_instance_extension_table *instance,
- const struct val_device_extension_table *device);
+bool lvp_instance_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct lvp_instance_extension_table *instance);
+bool lvp_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct lvp_instance_extension_table *instance);
+bool lvp_device_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct lvp_instance_extension_table *instance,
+ const struct lvp_device_extension_table *device);
-void *val_lookup_entrypoint(const char *name);
+void *lvp_lookup_entrypoint(const char *name);
-#define VAL_DEFINE_HANDLE_CASTS(__val_type, __VkType) \
+#define LVP_DEFINE_HANDLE_CASTS(__lvp_type, __VkType) \
\
- static inline struct __val_type * \
- __val_type ## _from_handle(__VkType _handle) \
+ static inline struct __lvp_type * \
+ __lvp_type ## _from_handle(__VkType _handle) \
{ \
- return (struct __val_type *) _handle; \
+ return (struct __lvp_type *) _handle; \
} \
\
static inline __VkType \
- __val_type ## _to_handle(struct __val_type *_obj) \
+ __lvp_type ## _to_handle(struct __lvp_type *_obj) \
{ \
return (__VkType) _obj; \
}
-#define VAL_DEFINE_NONDISP_HANDLE_CASTS(__val_type, __VkType) \
+#define LVP_DEFINE_NONDISP_HANDLE_CASTS(__lvp_type, __VkType) \
\
- static inline struct __val_type * \
- __val_type ## _from_handle(__VkType _handle) \
+ static inline struct __lvp_type * \
+ __lvp_type ## _from_handle(__VkType _handle) \
{ \
- return (struct __val_type *)(uintptr_t) _handle; \
+ return (struct __lvp_type *)(uintptr_t) _handle; \
} \
\
static inline __VkType \
- __val_type ## _to_handle(struct __val_type *_obj) \
+ __lvp_type ## _to_handle(struct __lvp_type *_obj) \
{ \
return (__VkType)(uintptr_t) _obj; \
}
-#define VAL_FROM_HANDLE(__val_type, __name, __handle) \
- struct __val_type *__name = __val_type ## _from_handle(__handle)
-
-VAL_DEFINE_HANDLE_CASTS(val_cmd_buffer, VkCommandBuffer)
-VAL_DEFINE_HANDLE_CASTS(val_device, VkDevice)
-VAL_DEFINE_HANDLE_CASTS(val_instance, VkInstance)
-VAL_DEFINE_HANDLE_CASTS(val_physical_device, VkPhysicalDevice)
-VAL_DEFINE_HANDLE_CASTS(val_queue, VkQueue)
-
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_cmd_pool, VkCommandPool)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_buffer, VkBuffer)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_buffer_view, VkBufferView)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_pool, VkDescriptorPool)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_set, VkDescriptorSet)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_descriptor_set_layout, VkDescriptorSetLayout)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_device_memory, VkDeviceMemory)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_event, VkEvent)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_framebuffer, VkFramebuffer)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_image, VkImage)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_image_view, VkImageView);
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline_cache, VkPipelineCache)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline, VkPipeline)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_pipeline_layout, VkPipelineLayout)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_query_pool, VkQueryPool)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_render_pass, VkRenderPass)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_sampler, VkSampler)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_shader_module, VkShaderModule)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_fence, VkFence);
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_semaphore, VkSemaphore);
+#define LVP_FROM_HANDLE(__lvp_type, __name, __handle) \
+ struct __lvp_type *__name = __lvp_type ## _from_handle(__handle)
+
+LVP_DEFINE_HANDLE_CASTS(lvp_cmd_buffer, VkCommandBuffer)
+LVP_DEFINE_HANDLE_CASTS(lvp_device, VkDevice)
+LVP_DEFINE_HANDLE_CASTS(lvp_instance, VkInstance)
+LVP_DEFINE_HANDLE_CASTS(lvp_physical_device, VkPhysicalDevice)
+LVP_DEFINE_HANDLE_CASTS(lvp_queue, VkQueue)
+
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_cmd_pool, VkCommandPool)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer, VkBuffer)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer_view, VkBufferView)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_pool, VkDescriptorPool)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_set, VkDescriptorSet)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_set_layout, VkDescriptorSetLayout)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_device_memory, VkDeviceMemory)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_event, VkEvent)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_framebuffer, VkFramebuffer)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_image, VkImage)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_image_view, VkImageView);
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_cache, VkPipelineCache)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline, VkPipeline)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_layout, VkPipelineLayout)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_query_pool, VkQueryPool)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_render_pass, VkRenderPass)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_sampler, VkSampler)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_shader_module, VkShaderModule)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_fence, VkFence);
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_semaphore, VkSemaphore);
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
*/
-VkResult __vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...);
+VkResult __vk_errorf(struct lvp_instance *instance, VkResult error, const char *file, int line, const char *format, ...);
-#define VAL_DEBUG_ALL_ENTRYPOINTS (1 << 0)
+#define LVP_DEBUG_ALL_ENTRYPOINTS (1 << 0)
#define vk_error(instance, error) __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
#define vk_errorf(instance, error, format, ...) __vk_errorf(instance, error, __FILE__, __LINE__, format, ## __VA_ARGS__);
-void __val_finishme(const char *file, int line, const char *format, ...)
- val_printflike(3, 4);
+void __lvp_finishme(const char *file, int line, const char *format, ...)
+ lvp_printflike(3, 4);
-#define val_finishme(format, ...) \
- __val_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
+#define lvp_finishme(format, ...) \
+ __lvp_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
#define stub_return(v) \
do { \
- val_finishme("stub %s", __func__); \
+ lvp_finishme("stub %s", __func__); \
return (v); \
} while (0)
#define stub() \
do { \
- val_finishme("stub %s", __func__); \
+ lvp_finishme("stub %s", __func__); \
return; \
} while (0)
-struct val_shader_module {
+struct lvp_shader_module {
struct vk_object_base base;
uint32_t size;
char data[0];
@@ -195,58 +195,58 @@ mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
return (1 << mesa_stage);
}
-#define VAL_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
+#define LVP_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
-#define val_foreach_stage(stage, stage_bits) \
+#define lvp_foreach_stage(stage, stage_bits) \
for (gl_shader_stage stage, \
- __tmp = (gl_shader_stage)((stage_bits) & VAL_STAGE_MASK); \
+ __tmp = (gl_shader_stage)((stage_bits) & LVP_STAGE_MASK); \
stage = __builtin_ffs(__tmp) - 1, __tmp; \
__tmp &= ~(1 << (stage)))
-struct val_physical_device {
+struct lvp_physical_device {
VK_LOADER_DATA _loader_data;
- struct val_instance * instance;
+ struct lvp_instance * instance;
struct pipe_loader_device *pld;
struct pipe_screen *pscreen;
uint32_t max_images;
struct wsi_device wsi_device;
- struct val_device_extension_table supported_extensions;
+ struct lvp_device_extension_table supported_extensions;
};
-struct val_instance {
+struct lvp_instance {
struct vk_object_base base;
VkAllocationCallbacks alloc;
uint32_t apiVersion;
int physicalDeviceCount;
- struct val_physical_device physicalDevice;
+ struct lvp_physical_device physicalDevice;
uint64_t debug_flags;
struct pipe_loader_device *devs;
int num_devices;
- struct val_instance_extension_table enabled_extensions;
- struct val_instance_dispatch_table dispatch;
- struct val_physical_device_dispatch_table physical_device_dispatch;
- struct val_device_dispatch_table device_dispatch;
+ struct lvp_instance_extension_table enabled_extensions;
+ struct lvp_instance_dispatch_table dispatch;
+ struct lvp_physical_device_dispatch_table physical_device_dispatch;
+ struct lvp_device_dispatch_table device_dispatch;
};
-VkResult val_init_wsi(struct val_physical_device *physical_device);
-void val_finish_wsi(struct val_physical_device *physical_device);
+VkResult lvp_init_wsi(struct lvp_physical_device *physical_device);
+void lvp_finish_wsi(struct lvp_physical_device *physical_device);
-bool val_instance_extension_supported(const char *name);
-uint32_t val_physical_device_api_version(struct val_physical_device *dev);
-bool val_physical_device_extension_supported(struct val_physical_device *dev,
+bool lvp_instance_extension_supported(const char *name);
+uint32_t lvp_physical_device_api_version(struct lvp_physical_device *dev);
+bool lvp_physical_device_extension_supported(struct lvp_physical_device *dev,
const char *name);
-struct val_queue {
+struct lvp_queue {
VK_LOADER_DATA _loader_data;
VkDeviceQueueCreateFlags flags;
- struct val_device * device;
+ struct lvp_device * device;
struct pipe_context *ctx;
bool shutdown;
thrd_t exec_thread;
@@ -256,37 +256,37 @@ struct val_queue {
uint32_t count;
};
-struct val_queue_work {
+struct lvp_queue_work {
struct list_head list;
uint32_t cmd_buffer_count;
- struct val_cmd_buffer **cmd_buffers;
- struct val_fence *fence;
+ struct lvp_cmd_buffer **cmd_buffers;
+ struct lvp_fence *fence;
};
-struct val_pipeline_cache {
+struct lvp_pipeline_cache {
struct vk_object_base base;
- struct val_device * device;
+ struct lvp_device * device;
VkAllocationCallbacks alloc;
};
-struct val_device {
+struct lvp_device {
struct vk_device vk;
VkAllocationCallbacks alloc;
- struct val_queue queue;
- struct val_instance * instance;
- struct val_physical_device *physical_device;
+ struct lvp_queue queue;
+ struct lvp_instance * instance;
+ struct lvp_physical_device *physical_device;
struct pipe_screen *pscreen;
mtx_t fence_lock;
- struct val_device_extension_table enabled_extensions;
- struct val_device_dispatch_table dispatch;
+ struct lvp_device_extension_table enabled_extensions;
+ struct lvp_device_dispatch_table dispatch;
};
-void val_device_get_cache_uuid(void *uuid);
+void lvp_device_get_cache_uuid(void *uuid);
-struct val_device_memory {
+struct lvp_device_memory {
struct vk_object_base base;
struct pipe_memory_allocation *pmem;
uint32_t type_index;
@@ -294,7 +294,7 @@ struct val_device_memory {
void * map;
};
-struct val_image {
+struct lvp_image {
struct vk_object_base base;
VkImageType type;
VkFormat vk_format;
@@ -304,7 +304,7 @@ struct val_image {
};
static inline uint32_t
-val_get_layerCount(const struct val_image *image,
+lvp_get_layerCount(const struct lvp_image *image,
const VkImageSubresourceRange *range)
{
return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
@@ -312,28 +312,28 @@ val_get_layerCount(const struct val_image *image,
}
static inline uint32_t
-val_get_levelCount(const struct val_image *image,
+lvp_get_levelCount(const struct lvp_image *image,
const VkImageSubresourceRange *range)
{
return range->levelCount == VK_REMAINING_MIP_LEVELS ?
(image->bo->last_level + 1) - range->baseMipLevel : range->levelCount;
}
-struct val_image_create_info {
+struct lvp_image_create_info {
const VkImageCreateInfo *vk_info;
uint32_t bind_flags;
uint32_t stride;
};
VkResult
-val_image_create(VkDevice _device,
- const struct val_image_create_info *create_info,
+lvp_image_create(VkDevice _device,
+ const struct lvp_image_create_info *create_info,
const VkAllocationCallbacks* alloc,
VkImage *pImage);
-struct val_image_view {
+struct lvp_image_view {
struct vk_object_base base;
- const struct val_image *image; /**< VkImageViewCreateInfo::image */
+ const struct lvp_image *image; /**< VkImageViewCreateInfo::image */
VkImageViewType view_type;
VkFormat format;
@@ -344,23 +344,23 @@ struct val_image_view {
struct pipe_surface *surface; /* have we created a pipe surface for this? */
};
-struct val_subpass_attachment {
+struct lvp_subpass_attachment {
uint32_t attachment;
VkImageLayout layout;
bool in_render_loop;
};
-struct val_subpass {
+struct lvp_subpass {
uint32_t attachment_count;
- struct val_subpass_attachment * attachments;
+ struct lvp_subpass_attachment * attachments;
uint32_t input_count;
uint32_t color_count;
- struct val_subpass_attachment * input_attachments;
- struct val_subpass_attachment * color_attachments;
- struct val_subpass_attachment * resolve_attachments;
- struct val_subpass_attachment * depth_stencil_attachment;
- struct val_subpass_attachment * ds_resolve_attachment;
+ struct lvp_subpass_attachment * input_attachments;
+ struct lvp_subpass_attachment * color_attachments;
+ struct lvp_subpass_attachment * resolve_attachments;
+ struct lvp_subpass_attachment * depth_stencil_attachment;
+ struct lvp_subpass_attachment * ds_resolve_attachment;
/** Subpass has at least one color resolve attachment */
bool has_color_resolve;
@@ -371,7 +371,7 @@ struct val_subpass {
VkSampleCountFlagBits max_sample_count;
};
-struct val_render_pass_attachment {
+struct lvp_render_pass_attachment {
VkFormat format;
uint32_t samples;
VkAttachmentLoadOp load_op;
@@ -384,32 +384,32 @@ struct val_render_pass_attachment {
uint32_t last_subpass_idx;
};
-struct val_render_pass {
+struct lvp_render_pass {
struct vk_object_base base;
uint32_t attachment_count;
uint32_t subpass_count;
- struct val_subpass_attachment * subpass_attachments;
- struct val_render_pass_attachment * attachments;
- struct val_subpass subpasses[0];
+ struct lvp_subpass_attachment * subpass_attachments;
+ struct lvp_render_pass_attachment * attachments;
+ struct lvp_subpass subpasses[0];
};
-struct val_sampler {
+struct lvp_sampler {
struct vk_object_base base;
VkSamplerCreateInfo create_info;
uint32_t state[4];
};
-struct val_framebuffer {
+struct lvp_framebuffer {
struct vk_object_base base;
uint32_t width;
uint32_t height;
uint32_t layers;
uint32_t attachment_count;
- struct val_image_view * attachments[0];
+ struct lvp_image_view * attachments[0];
};
-struct val_descriptor_set_binding_layout {
+struct lvp_descriptor_set_binding_layout {
uint16_t descriptor_index;
/* Number of array elements in this binding */
VkDescriptorType type;
@@ -426,10 +426,10 @@ struct val_descriptor_set_binding_layout {
} stage[MESA_SHADER_STAGES];
/* Immutable samplers (or NULL if no immutable samplers) */
- struct val_sampler **immutable_samplers;
+ struct lvp_sampler **immutable_samplers;
};
-struct val_descriptor_set_layout {
+struct lvp_descriptor_set_layout {
struct vk_object_base base;
/* Number of bindings in this descriptor set */
uint16_t binding_count;
@@ -452,34 +452,34 @@ struct val_descriptor_set_layout {
uint16_t dynamic_offset_count;
/* Bindings in this descriptor set */
- struct val_descriptor_set_binding_layout binding[0];
+ struct lvp_descriptor_set_binding_layout binding[0];
};
-struct val_descriptor {
+struct lvp_descriptor {
VkDescriptorType type;
union {
struct {
- struct val_image_view *image_view;
- struct val_sampler *sampler;
+ struct lvp_image_view *image_view;
+ struct lvp_sampler *sampler;
};
struct {
uint64_t offset;
uint64_t range;
- struct val_buffer *buffer;
+ struct lvp_buffer *buffer;
} buf;
- struct val_buffer_view *buffer_view;
+ struct lvp_buffer_view *buffer_view;
};
};
-struct val_descriptor_set {
+struct lvp_descriptor_set {
struct vk_object_base base;
- const struct val_descriptor_set_layout *layout;
+ const struct lvp_descriptor_set_layout *layout;
struct list_head link;
- struct val_descriptor descriptors[0];
+ struct lvp_descriptor descriptors[0];
};
-struct val_descriptor_pool {
+struct lvp_descriptor_pool {
struct vk_object_base base;
VkDescriptorPoolCreateFlags flags;
uint32_t max_sets;
@@ -488,18 +488,18 @@ struct val_descriptor_pool {
};
VkResult
-val_descriptor_set_create(struct val_device *device,
- const struct val_descriptor_set_layout *layout,
- struct val_descriptor_set **out_set);
+lvp_descriptor_set_create(struct lvp_device *device,
+ const struct lvp_descriptor_set_layout *layout,
+ struct lvp_descriptor_set **out_set);
void
-val_descriptor_set_destroy(struct val_device *device,
- struct val_descriptor_set *set);
+lvp_descriptor_set_destroy(struct lvp_device *device,
+ struct lvp_descriptor_set *set);
-struct val_pipeline_layout {
+struct lvp_pipeline_layout {
struct vk_object_base base;
struct {
- struct val_descriptor_set_layout *layout;
+ struct lvp_descriptor_set_layout *layout;
uint32_t dynamic_offset_start;
} set[MAX_SETS];
@@ -510,10 +510,10 @@ struct val_pipeline_layout {
} stage[MESA_SHADER_STAGES];
};
-struct val_pipeline {
+struct lvp_pipeline {
struct vk_object_base base;
- struct val_device * device;
- struct val_pipeline_layout * layout;
+ struct lvp_device * device;
+ struct lvp_pipeline_layout * layout;
bool is_compute_pipeline;
bool force_min_sample;
@@ -523,25 +523,25 @@ struct val_pipeline {
VkComputePipelineCreateInfo compute_create_info;
};
-struct val_event {
+struct lvp_event {
struct vk_object_base base;
uint64_t event_storage;
};
-struct val_fence {
+struct lvp_fence {
struct vk_object_base base;
bool signaled;
struct pipe_fence_handle *handle;
};
-struct val_semaphore {
+struct lvp_semaphore {
struct vk_object_base base;
bool dummy;
};
-struct val_buffer {
+struct lvp_buffer {
struct vk_object_base base;
- struct val_device * device;
+ struct lvp_device * device;
VkDeviceSize size;
VkBufferUsageFlags usage;
@@ -551,16 +551,16 @@ struct val_buffer {
uint64_t total_size;
};
-struct val_buffer_view {
+struct lvp_buffer_view {
struct vk_object_base base;
VkFormat format;
enum pipe_format pformat;
- struct val_buffer *buffer;
+ struct lvp_buffer *buffer;
uint32_t offset;
uint64_t range;
};
-struct val_query_pool {
+struct lvp_query_pool {
struct vk_object_base base;
VkQueryType type;
uint32_t count;
@@ -568,7 +568,7 @@ struct val_query_pool {
struct pipe_query *queries[0];
};
-struct val_cmd_pool {
+struct lvp_cmd_pool {
struct vk_object_base base;
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
@@ -576,22 +576,22 @@ struct val_cmd_pool {
};
-enum val_cmd_buffer_status {
- VAL_CMD_BUFFER_STATUS_INVALID,
- VAL_CMD_BUFFER_STATUS_INITIAL,
- VAL_CMD_BUFFER_STATUS_RECORDING,
- VAL_CMD_BUFFER_STATUS_EXECUTABLE,
- VAL_CMD_BUFFER_STATUS_PENDING,
+enum lvp_cmd_buffer_status {
+ LVP_CMD_BUFFER_STATUS_INVALID,
+ LVP_CMD_BUFFER_STATUS_INITIAL,
+ LVP_CMD_BUFFER_STATUS_RECORDING,
+ LVP_CMD_BUFFER_STATUS_EXECUTABLE,
+ LVP_CMD_BUFFER_STATUS_PENDING,
};
-struct val_cmd_buffer {
+struct lvp_cmd_buffer {
struct vk_object_base base;
- struct val_device * device;
+ struct lvp_device * device;
VkCommandBufferLevel level;
- enum val_cmd_buffer_status status;
- struct val_cmd_pool * pool;
+ enum lvp_cmd_buffer_status status;
+ struct lvp_cmd_pool * pool;
struct list_head pool_link;
struct list_head cmds;
@@ -600,125 +600,125 @@ struct val_cmd_buffer {
};
/* in same order and buffer building commands in spec. */
-enum val_cmds {
- VAL_CMD_BIND_PIPELINE,
- VAL_CMD_SET_VIEWPORT,
- VAL_CMD_SET_SCISSOR,
- VAL_CMD_SET_LINE_WIDTH,
- VAL_CMD_SET_DEPTH_BIAS,
- VAL_CMD_SET_BLEND_CONSTANTS,
- VAL_CMD_SET_DEPTH_BOUNDS,
- VAL_CMD_SET_STENCIL_COMPARE_MASK,
- VAL_CMD_SET_STENCIL_WRITE_MASK,
- VAL_CMD_SET_STENCIL_REFERENCE,
- VAL_CMD_BIND_DESCRIPTOR_SETS,
- VAL_CMD_BIND_INDEX_BUFFER,
- VAL_CMD_BIND_VERTEX_BUFFERS,
- VAL_CMD_DRAW,
- VAL_CMD_DRAW_INDEXED,
- VAL_CMD_DRAW_INDIRECT,
- VAL_CMD_DRAW_INDEXED_INDIRECT,
- VAL_CMD_DISPATCH,
- VAL_CMD_DISPATCH_INDIRECT,
- VAL_CMD_COPY_BUFFER,
- VAL_CMD_COPY_IMAGE,
- VAL_CMD_BLIT_IMAGE,
- VAL_CMD_COPY_BUFFER_TO_IMAGE,
- VAL_CMD_COPY_IMAGE_TO_BUFFER,
- VAL_CMD_UPDATE_BUFFER,
- VAL_CMD_FILL_BUFFER,
- VAL_CMD_CLEAR_COLOR_IMAGE,
- VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE,
- VAL_CMD_CLEAR_ATTACHMENTS,
- VAL_CMD_RESOLVE_IMAGE,
- VAL_CMD_SET_EVENT,
- VAL_CMD_RESET_EVENT,
- VAL_CMD_WAIT_EVENTS,
- VAL_CMD_PIPELINE_BARRIER,
- VAL_CMD_BEGIN_QUERY,
- VAL_CMD_END_QUERY,
- VAL_CMD_RESET_QUERY_POOL,
- VAL_CMD_WRITE_TIMESTAMP,
- VAL_CMD_COPY_QUERY_POOL_RESULTS,
- VAL_CMD_PUSH_CONSTANTS,
- VAL_CMD_BEGIN_RENDER_PASS,
- VAL_CMD_NEXT_SUBPASS,
- VAL_CMD_END_RENDER_PASS,
- VAL_CMD_EXECUTE_COMMANDS,
-};
-
-struct val_cmd_bind_pipeline {
+enum lvp_cmds {
+ LVP_CMD_BIND_PIPELINE,
+ LVP_CMD_SET_VIEWPORT,
+ LVP_CMD_SET_SCISSOR,
+ LVP_CMD_SET_LINE_WIDTH,
+ LVP_CMD_SET_DEPTH_BIAS,
+ LVP_CMD_SET_BLEND_CONSTANTS,
+ LVP_CMD_SET_DEPTH_BOUNDS,
+ LVP_CMD_SET_STENCIL_COMPARE_MASK,
+ LVP_CMD_SET_STENCIL_WRITE_MASK,
+ LVP_CMD_SET_STENCIL_REFERENCE,
+ LVP_CMD_BIND_DESCRIPTOR_SETS,
+ LVP_CMD_BIND_INDEX_BUFFER,
+ LVP_CMD_BIND_VERTEX_BUFFERS,
+ LVP_CMD_DRAW,
+ LVP_CMD_DRAW_INDEXED,
+ LVP_CMD_DRAW_INDIRECT,
+ LVP_CMD_DRAW_INDEXED_INDIRECT,
+ LVP_CMD_DISPATCH,
+ LVP_CMD_DISPATCH_INDIRECT,
+ LVP_CMD_COPY_BUFFER,
+ LVP_CMD_COPY_IMAGE,
+ LVP_CMD_BLIT_IMAGE,
+ LVP_CMD_COPY_BUFFER_TO_IMAGE,
+ LVP_CMD_COPY_IMAGE_TO_BUFFER,
+ LVP_CMD_UPDATE_BUFFER,
+ LVP_CMD_FILL_BUFFER,
+ LVP_CMD_CLEAR_COLOR_IMAGE,
+ LVP_CMD_CLEAR_DEPTH_STENCIL_IMAGE,
+ LVP_CMD_CLEAR_ATTACHMENTS,
+ LVP_CMD_RESOLVE_IMAGE,
+ LVP_CMD_SET_EVENT,
+ LVP_CMD_RESET_EVENT,
+ LVP_CMD_WAIT_EVENTS,
+ LVP_CMD_PIPELINE_BARRIER,
+ LVP_CMD_BEGIN_QUERY,
+ LVP_CMD_END_QUERY,
+ LVP_CMD_RESET_QUERY_POOL,
+ LVP_CMD_WRITE_TIMESTAMP,
+ LVP_CMD_COPY_QUERY_POOL_RESULTS,
+ LVP_CMD_PUSH_CONSTANTS,
+ LVP_CMD_BEGIN_RENDER_PASS,
+ LVP_CMD_NEXT_SUBPASS,
+ LVP_CMD_END_RENDER_PASS,
+ LVP_CMD_EXECUTE_COMMANDS,
+};
+
+struct lvp_cmd_bind_pipeline {
VkPipelineBindPoint bind_point;
- struct val_pipeline *pipeline;
+ struct lvp_pipeline *pipeline;
};
-struct val_cmd_set_viewport {
+struct lvp_cmd_set_viewport {
uint32_t first_viewport;
uint32_t viewport_count;
VkViewport viewports[16];
};
-struct val_cmd_set_scissor {
+struct lvp_cmd_set_scissor {
uint32_t first_scissor;
uint32_t scissor_count;
VkRect2D scissors[16];
};
-struct val_cmd_set_line_width {
+struct lvp_cmd_set_line_width {
float line_width;
};
-struct val_cmd_set_depth_bias {
+struct lvp_cmd_set_depth_bias {
float constant_factor;
float clamp;
float slope_factor;
};
-struct val_cmd_set_blend_constants {
+struct lvp_cmd_set_blend_constants {
float blend_constants[4];
};
-struct val_cmd_set_depth_bounds {
+struct lvp_cmd_set_depth_bounds {
float min_depth;
float max_depth;
};
-struct val_cmd_set_stencil_vals {
+struct lvp_cmd_set_stencil_vals {
VkStencilFaceFlags face_mask;
uint32_t value;
};
-struct val_cmd_bind_descriptor_sets {
+struct lvp_cmd_bind_descriptor_sets {
VkPipelineBindPoint bind_point;
- struct val_pipeline_layout *layout;
+ struct lvp_pipeline_layout *layout;
uint32_t first;
uint32_t count;
- struct val_descriptor_set **sets;
+ struct lvp_descriptor_set **sets;
uint32_t dynamic_offset_count;
const uint32_t *dynamic_offsets;
};
-struct val_cmd_bind_index_buffer {
- const struct val_buffer *buffer;
+struct lvp_cmd_bind_index_buffer {
+ const struct lvp_buffer *buffer;
VkDeviceSize offset;
VkIndexType index_type;
};
-struct val_cmd_bind_vertex_buffers {
+struct lvp_cmd_bind_vertex_buffers {
uint32_t first;
uint32_t binding_count;
- struct val_buffer **buffers;
+ struct lvp_buffer **buffers;
const VkDeviceSize *offsets;
};
-struct val_cmd_draw {
+struct lvp_cmd_draw {
uint32_t vertex_count;
uint32_t instance_count;
uint32_t first_vertex;
uint32_t first_instance;
};
-struct val_cmd_draw_indexed {
+struct lvp_cmd_draw_indexed {
uint32_t index_count;
uint32_t instance_count;
uint32_t first_index;
@@ -726,43 +726,43 @@ struct val_cmd_draw_indexed {
uint32_t first_instance;
};
-struct val_cmd_draw_indirect {
+struct lvp_cmd_draw_indirect {
VkDeviceSize offset;
- struct val_buffer *buffer;
+ struct lvp_buffer *buffer;
uint32_t draw_count;
uint32_t stride;
};
-struct val_cmd_dispatch {
+struct lvp_cmd_dispatch {
uint32_t x;
uint32_t y;
uint32_t z;
};
-struct val_cmd_dispatch_indirect {
- const struct val_buffer *buffer;
+struct lvp_cmd_dispatch_indirect {
+ const struct lvp_buffer *buffer;
VkDeviceSize offset;
};
-struct val_cmd_copy_buffer {
- struct val_buffer *src;
- struct val_buffer *dst;
+struct lvp_cmd_copy_buffer {
+ struct lvp_buffer *src;
+ struct lvp_buffer *dst;
uint32_t region_count;
const VkBufferCopy *regions;
};
-struct val_cmd_copy_image {
- struct val_image *src;
- struct val_image *dst;
+struct lvp_cmd_copy_image {
+ struct lvp_image *src;
+ struct lvp_image *dst;
VkImageLayout src_layout;
VkImageLayout dst_layout;
uint32_t region_count;
const VkImageCopy *regions;
};
-struct val_cmd_blit_image {
- struct val_image *src;
- struct val_image *dst;
+struct lvp_cmd_blit_image {
+ struct lvp_image *src;
+ struct lvp_image *dst;
VkImageLayout src_layout;
VkImageLayout dst_layout;
uint32_t region_count;
@@ -770,77 +770,77 @@ struct val_cmd_blit_image {
VkFilter filter;
};
-struct val_cmd_copy_buffer_to_image {
- struct val_buffer *src;
- struct val_image *dst;
+struct lvp_cmd_copy_buffer_to_image {
+ struct lvp_buffer *src;
+ struct lvp_image *dst;
VkImageLayout dst_layout;
uint32_t region_count;
const VkBufferImageCopy *regions;
};
-struct val_cmd_copy_image_to_buffer {
- struct val_image *src;
- struct val_buffer *dst;
+struct lvp_cmd_copy_image_to_buffer {
+ struct lvp_image *src;
+ struct lvp_buffer *dst;
VkImageLayout src_layout;
uint32_t region_count;
const VkBufferImageCopy *regions;
};
-struct val_cmd_update_buffer {
- struct val_buffer *buffer;
+struct lvp_cmd_update_buffer {
+ struct lvp_buffer *buffer;
VkDeviceSize offset;
VkDeviceSize data_size;
char data[0];
};
-struct val_cmd_fill_buffer {
- struct val_buffer *buffer;
+struct lvp_cmd_fill_buffer {
+ struct lvp_buffer *buffer;
VkDeviceSize offset;
VkDeviceSize fill_size;
uint32_t data;
};
-struct val_cmd_clear_color_image {
- struct val_image *image;
+struct lvp_cmd_clear_color_image {
+ struct lvp_image *image;
VkImageLayout layout;
VkClearColorValue clear_val;
uint32_t range_count;
VkImageSubresourceRange *ranges;
};
-struct val_cmd_clear_ds_image {
- struct val_image *image;
+struct lvp_cmd_clear_ds_image {
+ struct lvp_image *image;
VkImageLayout layout;
VkClearDepthStencilValue clear_val;
uint32_t range_count;
VkImageSubresourceRange *ranges;
};
-struct val_cmd_clear_attachments {
+struct lvp_cmd_clear_attachments {
uint32_t attachment_count;
VkClearAttachment *attachments;
uint32_t rect_count;
VkClearRect *rects;
};
-struct val_cmd_resolve_image {
- struct val_image *src;
- struct val_image *dst;
+struct lvp_cmd_resolve_image {
+ struct lvp_image *src;
+ struct lvp_image *dst;
VkImageLayout src_layout;
VkImageLayout dst_layout;
uint32_t region_count;
VkImageResolve *regions;
};
-struct val_cmd_event_set {
- struct val_event *event;
+struct lvp_cmd_event_set {
+ struct lvp_event *event;
bool value;
bool flush;
};
-struct val_cmd_wait_events {
+struct lvp_cmd_wait_events {
uint32_t event_count;
- struct val_event **events;
+ struct lvp_event **events;
VkPipelineStageFlags src_stage_mask;
VkPipelineStageFlags dst_stage_mask;
uint32_t memory_barrier_count;
@@ -851,7 +851,7 @@ struct val_cmd_wait_events {
VkImageMemoryBarrier *image_memory_barriers;
};
-struct val_cmd_pipeline_barrier {
+struct lvp_cmd_pipeline_barrier {
VkPipelineStageFlags src_stage_mask;
VkPipelineStageFlags dst_stage_mask;
bool by_region;
@@ -863,99 +863,99 @@ struct val_cmd_pipeline_barrier {
VkImageMemoryBarrier *image_memory_barriers;
};
-struct val_cmd_query_cmd {
- struct val_query_pool *pool;
+struct lvp_cmd_query_cmd {
+ struct lvp_query_pool *pool;
uint32_t query;
uint32_t index;
bool precise;
bool flush;
};
-struct val_cmd_copy_query_pool_results {
- struct val_query_pool *pool;
+struct lvp_cmd_copy_query_pool_results {
+ struct lvp_query_pool *pool;
uint32_t first_query;
uint32_t query_count;
- struct val_buffer *dst;
+ struct lvp_buffer *dst;
VkDeviceSize dst_offset;
VkDeviceSize stride;
VkQueryResultFlags flags;
};
-struct val_cmd_push_constants {
+struct lvp_cmd_push_constants {
VkShaderStageFlags stage;
uint32_t offset;
uint32_t size;
uint32_t val[1];
};
-struct val_attachment_state {
+struct lvp_attachment_state {
VkImageAspectFlags pending_clear_aspects;
VkClearValue clear_value;
};
-struct val_cmd_begin_render_pass {
- struct val_framebuffer *framebuffer;
- struct val_render_pass *render_pass;
+struct lvp_cmd_begin_render_pass {
+ struct lvp_framebuffer *framebuffer;
+ struct lvp_render_pass *render_pass;
VkRect2D render_area;
- struct val_attachment_state *attachments;
+ struct lvp_attachment_state *attachments;
};
-struct val_cmd_next_subpass {
+struct lvp_cmd_next_subpass {
VkSubpassContents contents;
};
-struct val_cmd_execute_commands {
+struct lvp_cmd_execute_commands {
uint32_t command_buffer_count;
- struct val_cmd_buffer *cmd_buffers[0];
+ struct lvp_cmd_buffer *cmd_buffers[0];
};
-struct val_cmd_buffer_entry {
+struct lvp_cmd_buffer_entry {
struct list_head cmd_link;
uint32_t cmd_type;
union {
- struct val_cmd_bind_pipeline pipeline;
- struct val_cmd_set_viewport set_viewport;
- struct val_cmd_set_scissor set_scissor;
- struct val_cmd_set_line_width set_line_width;
- struct val_cmd_set_depth_bias set_depth_bias;
- struct val_cmd_set_blend_constants set_blend_constants;
- struct val_cmd_set_depth_bounds set_depth_bounds;
- struct val_cmd_set_stencil_vals stencil_vals;
- struct val_cmd_bind_descriptor_sets descriptor_sets;
- struct val_cmd_bind_vertex_buffers vertex_buffers;
- struct val_cmd_bind_index_buffer index_buffer;
- struct val_cmd_draw draw;
- struct val_cmd_draw_indexed draw_indexed;
- struct val_cmd_draw_indirect draw_indirect;
- struct val_cmd_dispatch dispatch;
- struct val_cmd_dispatch_indirect dispatch_indirect;
- struct val_cmd_copy_buffer copy_buffer;
- struct val_cmd_copy_image copy_image;
- struct val_cmd_blit_image blit_image;
- struct val_cmd_copy_buffer_to_image buffer_to_img;
- struct val_cmd_copy_image_to_buffer img_to_buffer;
- struct val_cmd_update_buffer update_buffer;
- struct val_cmd_fill_buffer fill_buffer;
- struct val_cmd_clear_color_image clear_color_image;
- struct val_cmd_clear_ds_image clear_ds_image;
- struct val_cmd_clear_attachments clear_attachments;
- struct val_cmd_resolve_image resolve_image;
- struct val_cmd_event_set event_set;
- struct val_cmd_wait_events wait_events;
- struct val_cmd_pipeline_barrier pipeline_barrier;
- struct val_cmd_query_cmd query;
- struct val_cmd_copy_query_pool_results copy_query_pool_results;
- struct val_cmd_push_constants push_constants;
- struct val_cmd_begin_render_pass begin_render_pass;
- struct val_cmd_next_subpass next_subpass;
- struct val_cmd_execute_commands execute_commands;
+ struct lvp_cmd_bind_pipeline pipeline;
+ struct lvp_cmd_set_viewport set_viewport;
+ struct lvp_cmd_set_scissor set_scissor;
+ struct lvp_cmd_set_line_width set_line_width;
+ struct lvp_cmd_set_depth_bias set_depth_bias;
+ struct lvp_cmd_set_blend_constants set_blend_constants;
+ struct lvp_cmd_set_depth_bounds set_depth_bounds;
+ struct lvp_cmd_set_stencil_vals stencil_vals;
+ struct lvp_cmd_bind_descriptor_sets descriptor_sets;
+ struct lvp_cmd_bind_vertex_buffers vertex_buffers;
+ struct lvp_cmd_bind_index_buffer index_buffer;
+ struct lvp_cmd_draw draw;
+ struct lvp_cmd_draw_indexed draw_indexed;
+ struct lvp_cmd_draw_indirect draw_indirect;
+ struct lvp_cmd_dispatch dispatch;
+ struct lvp_cmd_dispatch_indirect dispatch_indirect;
+ struct lvp_cmd_copy_buffer copy_buffer;
+ struct lvp_cmd_copy_image copy_image;
+ struct lvp_cmd_blit_image blit_image;
+ struct lvp_cmd_copy_buffer_to_image buffer_to_img;
+ struct lvp_cmd_copy_image_to_buffer img_to_buffer;
+ struct lvp_cmd_update_buffer update_buffer;
+ struct lvp_cmd_fill_buffer fill_buffer;
+ struct lvp_cmd_clear_color_image clear_color_image;
+ struct lvp_cmd_clear_ds_image clear_ds_image;
+ struct lvp_cmd_clear_attachments clear_attachments;
+ struct lvp_cmd_resolve_image resolve_image;
+ struct lvp_cmd_event_set event_set;
+ struct lvp_cmd_wait_events wait_events;
+ struct lvp_cmd_pipeline_barrier pipeline_barrier;
+ struct lvp_cmd_query_cmd query;
+ struct lvp_cmd_copy_query_pool_results copy_query_pool_results;
+ struct lvp_cmd_push_constants push_constants;
+ struct lvp_cmd_begin_render_pass begin_render_pass;
+ struct lvp_cmd_next_subpass next_subpass;
+ struct lvp_cmd_execute_commands execute_commands;
} u;
};
-VkResult val_execute_cmds(struct val_device *device,
- struct val_queue *queue,
- struct val_fence *fence,
- struct val_cmd_buffer *cmd_buffer);
+VkResult lvp_execute_cmds(struct lvp_device *device,
+ struct lvp_queue *queue,
+ struct lvp_fence *fence,
+ struct lvp_cmd_buffer *cmd_buffer);
enum pipe_format vk_format_to_pipe(VkFormat format);
diff --git a/src/gallium/frontends/vallium/val_query.c b/src/gallium/frontends/lavapipe/lvp_query.c
index d0a4859f5a0..bdcd2a069d7 100644
--- a/src/gallium/frontends/vallium/val_query.c
+++ b/src/gallium/frontends/lavapipe/lvp_query.c
@@ -21,16 +21,16 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "pipe/p_context.h"
-VkResult val_CreateQueryPool(
+VkResult lvp_CreateQueryPool(
VkDevice _device,
const VkQueryPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkQueryPool* pQueryPool)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
enum pipe_query_type pipeq;
switch (pCreateInfo->queryType) {
@@ -43,7 +43,7 @@ VkResult val_CreateQueryPool(
default:
return VK_ERROR_FEATURE_NOT_PRESENT;
}
- struct val_query_pool *pool;
+ struct lvp_query_pool *pool;
uint32_t pool_size = sizeof(*pool) + pCreateInfo->queryCount * sizeof(struct pipe_query *);
pool = vk_zalloc2(&device->alloc, pAllocator,
@@ -58,17 +58,17 @@ VkResult val_CreateQueryPool(
pool->count = pCreateInfo->queryCount;
pool->base_type = pipeq;
- *pQueryPool = val_query_pool_to_handle(pool);
+ *pQueryPool = lvp_query_pool_to_handle(pool);
return VK_SUCCESS;
}
-void val_DestroyQueryPool(
+void lvp_DestroyQueryPool(
VkDevice _device,
VkQueryPool _pool,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_query_pool, pool, _pool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_query_pool, pool, _pool);
if (!pool)
return;
@@ -80,7 +80,7 @@ void val_DestroyQueryPool(
vk_free2(&device->alloc, pAllocator, pool);
}
-VkResult val_GetQueryPoolResults(
+VkResult lvp_GetQueryPoolResults(
VkDevice _device,
VkQueryPool queryPool,
uint32_t firstQuery,
@@ -90,11 +90,11 @@ VkResult val_GetQueryPoolResults(
VkDeviceSize stride,
VkQueryResultFlags flags)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- VAL_FROM_HANDLE(val_query_pool, pool, queryPool);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ LVP_FROM_HANDLE(lvp_query_pool, pool, queryPool);
VkResult vk_result = VK_SUCCESS;
- val_DeviceWaitIdle(_device);
+ lvp_DeviceWaitIdle(_device);
for (unsigned i = firstQuery; i < firstQuery + queryCount; i++) {
uint8_t *dptr = (uint8_t *)((char *)pData + (stride * (i - firstQuery)));
diff --git a/src/gallium/frontends/vallium/val_util.c b/src/gallium/frontends/lavapipe/lvp_util.c
index 9e920885410..1c473879349 100644
--- a/src/gallium/frontends/vallium/val_util.c
+++ b/src/gallium/frontends/lavapipe/lvp_util.c
@@ -21,10 +21,10 @@
* IN THE SOFTWARE.
*/
-#include "val_private.h"
+#include "lvp_private.h"
#include "vk_enum_to_str.h"
-void val_printflike(3, 4)
-__val_finishme(const char *file, int line, const char *format, ...)
+void lvp_printflike(3, 4)
+__lvp_finishme(const char *file, int line, const char *format, ...)
{
va_list ap;
char buffer[256];
@@ -37,7 +37,7 @@ __val_finishme(const char *file, int line, const char *format, ...)
}
VkResult
-__vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...)
+__vk_errorf(struct lvp_instance *instance, VkResult error, const char *file, int line, const char *format, ...)
{
va_list ap;
char buffer[256];
diff --git a/src/gallium/frontends/vallium/val_wsi.c b/src/gallium/frontends/lavapipe/lvp_wsi.c
index aa56654b9e0..5ed131bc171 100644
--- a/src/gallium/frontends/vallium/val_wsi.c
+++ b/src/gallium/frontends/lavapipe/lvp_wsi.c
@@ -21,49 +21,49 @@
* IN THE SOFTWARE.
*/
-#include "val_wsi.h"
+#include "lvp_wsi.h"
static PFN_vkVoidFunction
-val_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
+lvp_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
- return val_lookup_entrypoint(pName);
+ return lvp_lookup_entrypoint(pName);
}
VkResult
-val_init_wsi(struct val_physical_device *physical_device)
+lvp_init_wsi(struct lvp_physical_device *physical_device)
{
return wsi_device_init(&physical_device->wsi_device,
- val_physical_device_to_handle(physical_device),
- val_wsi_proc_addr,
+ lvp_physical_device_to_handle(physical_device),
+ lvp_wsi_proc_addr,
&physical_device->instance->alloc,
-1, NULL, true);
}
void
-val_finish_wsi(struct val_physical_device *physical_device)
+lvp_finish_wsi(struct lvp_physical_device *physical_device)
{
wsi_device_finish(&physical_device->wsi_device,
&physical_device->instance->alloc);
}
-void val_DestroySurfaceKHR(
+void lvp_DestroySurfaceKHR(
VkInstance _instance,
VkSurfaceKHR _surface,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
vk_free2(&instance->alloc, pAllocator, surface);
}
-VkResult val_GetPhysicalDeviceSurfaceSupportKHR(
+VkResult lvp_GetPhysicalDeviceSurfaceSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32* pSupported)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_surface_support(&device->wsi_device,
queueFamilyIndex,
@@ -71,62 +71,62 @@ VkResult val_GetPhysicalDeviceSurfaceSupportKHR(
pSupported);
}
-VkResult val_GetPhysicalDeviceSurfaceCapabilitiesKHR(
+VkResult lvp_GetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities(&device->wsi_device,
surface,
pSurfaceCapabilities);
}
-VkResult val_GetPhysicalDeviceSurfaceCapabilities2KHR(
+VkResult lvp_GetPhysicalDeviceSurfaceCapabilities2KHR(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities2(&device->wsi_device,
pSurfaceInfo,
pSurfaceCapabilities);
}
-VkResult val_GetPhysicalDeviceSurfaceCapabilities2EXT(
+VkResult lvp_GetPhysicalDeviceSurfaceCapabilities2EXT(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_surface_capabilities2ext(&device->wsi_device,
surface,
pSurfaceCapabilities);
}
-VkResult val_GetPhysicalDeviceSurfaceFormatsKHR(
+VkResult lvp_GetPhysicalDeviceSurfaceFormatsKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_surface_formats(&device->wsi_device,
surface,
pSurfaceFormatCount,
pSurfaceFormats);
}
-VkResult val_GetPhysicalDeviceSurfacePresentModesKHR(
+VkResult lvp_GetPhysicalDeviceSurfacePresentModesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_surface_present_modes(&device->wsi_device,
surface,
@@ -134,13 +134,13 @@ VkResult val_GetPhysicalDeviceSurfacePresentModesKHR(
pPresentModes);
}
-VkResult val_CreateSwapchainKHR(
+VkResult lvp_CreateSwapchainKHR(
VkDevice _device,
const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSwapchainKHR* pSwapchain)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
const VkAllocationCallbacks *alloc;
if (pAllocator)
alloc = pAllocator;
@@ -148,18 +148,18 @@ VkResult val_CreateSwapchainKHR(
alloc = &device->alloc;
return wsi_common_create_swapchain(&device->physical_device->wsi_device,
- val_device_to_handle(device),
+ lvp_device_to_handle(device),
pCreateInfo,
alloc,
pSwapchain);
}
-void val_DestroySwapchainKHR(
+void lvp_DestroySwapchainKHR(
VkDevice _device,
VkSwapchainKHR swapchain,
const VkAllocationCallbacks* pAllocator)
{
- VAL_FROM_HANDLE(val_device, device, _device);
+ LVP_FROM_HANDLE(lvp_device, device, _device);
const VkAllocationCallbacks *alloc;
if (pAllocator)
@@ -170,7 +170,7 @@ void val_DestroySwapchainKHR(
wsi_common_destroy_swapchain(_device, swapchain, alloc);
}
-VkResult val_GetSwapchainImagesKHR(
+VkResult lvp_GetSwapchainImagesKHR(
VkDevice device,
VkSwapchainKHR swapchain,
uint32_t* pSwapchainImageCount,
@@ -181,7 +181,7 @@ VkResult val_GetSwapchainImagesKHR(
pSwapchainImages);
}
-VkResult val_AcquireNextImageKHR(
+VkResult lvp_AcquireNextImageKHR(
VkDevice device,
VkSwapchainKHR swapchain,
uint64_t timeout,
@@ -198,23 +198,23 @@ VkResult val_AcquireNextImageKHR(
.deviceMask = 0,
};
- return val_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
+ return lvp_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
}
-VkResult val_AcquireNextImage2KHR(
+VkResult lvp_AcquireNextImage2KHR(
VkDevice _device,
const VkAcquireNextImageInfoKHR* pAcquireInfo,
uint32_t* pImageIndex)
{
- VAL_FROM_HANDLE(val_device, device, _device);
- struct val_physical_device *pdevice = device->physical_device;
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_physical_device *pdevice = device->physical_device;
VkResult result = wsi_common_acquire_next_image2(&pdevice->wsi_device,
_device,
pAcquireInfo,
pImageIndex);
#if 0
- VAL_FROM_HANDLE(val_fence, fence, pAcquireInfo->fence);
+ LVP_FROM_HANDLE(lvp_fence, fence, pAcquireInfo->fence);
if (fence && (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR)) {
if (fence->fence)
@@ -229,19 +229,19 @@ VkResult val_AcquireNextImage2KHR(
return result;
}
-VkResult val_QueuePresentKHR(
+VkResult lvp_QueuePresentKHR(
VkQueue _queue,
const VkPresentInfoKHR* pPresentInfo)
{
- VAL_FROM_HANDLE(val_queue, queue, _queue);
+ LVP_FROM_HANDLE(lvp_queue, queue, _queue);
return wsi_common_queue_present(&queue->device->physical_device->wsi_device,
- val_device_to_handle(queue->device),
+ lvp_device_to_handle(queue->device),
_queue, 0,
pPresentInfo);
}
-VkResult val_GetDeviceGroupPresentCapabilitiesKHR(
+VkResult lvp_GetDeviceGroupPresentCapabilitiesKHR(
VkDevice device,
VkDeviceGroupPresentCapabilitiesKHR* pCapabilities)
{
@@ -253,7 +253,7 @@ VkResult val_GetDeviceGroupPresentCapabilitiesKHR(
return VK_SUCCESS;
}
-VkResult val_GetDeviceGroupSurfacePresentModesKHR(
+VkResult lvp_GetDeviceGroupSurfacePresentModesKHR(
VkDevice device,
VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR* pModes)
@@ -263,13 +263,13 @@ VkResult val_GetDeviceGroupSurfacePresentModesKHR(
return VK_SUCCESS;
}
-VkResult val_GetPhysicalDevicePresentRectanglesKHR(
+VkResult lvp_GetPhysicalDevicePresentRectanglesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pRectCount,
VkRect2D* pRects)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_common_get_present_rectangles(&device->wsi_device,
surface,
diff --git a/src/gallium/frontends/vallium/val_wsi.h b/src/gallium/frontends/lavapipe/lvp_wsi.h
index 26fceb00b04..b3530e4c179 100644
--- a/src/gallium/frontends/vallium/val_wsi.h
+++ b/src/gallium/frontends/lavapipe/lvp_wsi.h
@@ -23,52 +23,52 @@
#pragma once
-#include "val_private.h"
+#include "lvp_private.h"
-struct val_swapchain;
+struct lvp_swapchain;
-struct val_wsi_interface {
+struct lvp_wsi_interface {
VkResult (*get_support)(VkIcdSurfaceBase *surface,
- struct val_physical_device *device,
+ struct lvp_physical_device *device,
uint32_t queueFamilyIndex,
VkBool32* pSupported);
VkResult (*get_capabilities)(VkIcdSurfaceBase *surface,
- struct val_physical_device *device,
+ struct lvp_physical_device *device,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
VkResult (*get_formats)(VkIcdSurfaceBase *surface,
- struct val_physical_device *device,
+ struct lvp_physical_device *device,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats);
VkResult (*get_present_modes)(VkIcdSurfaceBase *surface,
- struct val_physical_device *device,
+ struct lvp_physical_device *device,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes);
VkResult (*create_swapchain)(VkIcdSurfaceBase *surface,
- struct val_device *device,
+ struct lvp_device *device,
const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- struct val_swapchain **swapchain);
+ struct lvp_swapchain **swapchain);
};
-struct val_swapchain {
- struct val_device *device;
+struct lvp_swapchain {
+ struct lvp_device *device;
- VkResult (*destroy)(struct val_swapchain *swapchain,
+ VkResult (*destroy)(struct lvp_swapchain *swapchain,
const VkAllocationCallbacks *pAllocator);
- VkResult (*get_images)(struct val_swapchain *swapchain,
+ VkResult (*get_images)(struct lvp_swapchain *swapchain,
uint32_t *pCount, VkImage *pSwapchainImages);
- VkResult (*acquire_next_image)(struct val_swapchain *swap_chain,
+ VkResult (*acquire_next_image)(struct lvp_swapchain *swap_chain,
uint64_t timeout, VkSemaphore semaphore,
uint32_t *image_index);
- VkResult (*queue_present)(struct val_swapchain *swap_chain,
- struct val_queue *queue,
+ VkResult (*queue_present)(struct lvp_swapchain *swap_chain,
+ struct lvp_queue *queue,
uint32_t image_index);
};
-VAL_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
-VAL_DEFINE_NONDISP_HANDLE_CASTS(val_swapchain, VkSwapchainKHR)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
+LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_swapchain, VkSwapchainKHR)
-VkResult val_x11_init_wsi(struct val_instance *instance);
-void val_x11_finish_wsi(struct val_instance *instance);
-VkResult val_wl_init_wsi(struct val_instance *instance);
-void val_wl_finish_wsi(struct val_instance *instance);
+VkResult lvp_x11_init_wsi(struct lvp_instance *instance);
+void lvp_x11_finish_wsi(struct lvp_instance *instance);
+VkResult lvp_wl_init_wsi(struct lvp_instance *instance);
+void lvp_wl_finish_wsi(struct lvp_instance *instance);
diff --git a/src/gallium/frontends/vallium/val_wsi_wayland.c b/src/gallium/frontends/lavapipe/lvp_wsi_wayland.c
index 2fb14a6beda..13f5d8787d6 100644
--- a/src/gallium/frontends/vallium/val_wsi_wayland.c
+++ b/src/gallium/frontends/lavapipe/lvp_wsi_wayland.c
@@ -24,25 +24,25 @@
*/
#include "wsi_common_wayland.h"
-#include "val_private.h"
+#include "lvp_private.h"
-VkBool32 val_GetPhysicalDeviceWaylandPresentationSupportKHR(
+VkBool32 lvp_GetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display* display)
{
- VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
return wsi_wl_get_presentation_support(&physical_device->wsi_device, display);
}
-VkResult val_CreateWaylandSurfaceKHR(
+VkResult lvp_CreateWaylandSurfaceKHR(
VkInstance _instance,
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
diff --git a/src/gallium/frontends/vallium/val_wsi_x11.c b/src/gallium/frontends/lavapipe/lvp_wsi_x11.c
index 26a3203f8ea..9ba99d61c59 100644
--- a/src/gallium/frontends/vallium/val_wsi_x11.c
+++ b/src/gallium/frontends/lavapipe/lvp_wsi_x11.c
@@ -25,15 +25,15 @@
#include <xcb/xcb.h>
#include "wsi_common_x11.h"
-#include "val_private.h"
+#include "lvp_private.h"
-VkBool32 val_GetPhysicalDeviceXcbPresentationSupportKHR(
+VkBool32 lvp_GetPhysicalDeviceXcbPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
xcb_connection_t* connection,
xcb_visualid_t visual_id)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_get_physical_device_xcb_presentation_support(
&device->wsi_device,
@@ -41,13 +41,13 @@ VkBool32 val_GetPhysicalDeviceXcbPresentationSupportKHR(
connection, visual_id);
}
-VkBool32 val_GetPhysicalDeviceXlibPresentationSupportKHR(
+VkBool32 lvp_GetPhysicalDeviceXlibPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
Display* dpy,
VisualID visualID)
{
- VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
+ LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
return wsi_get_physical_device_xcb_presentation_support(
&device->wsi_device,
@@ -55,13 +55,13 @@ VkBool32 val_GetPhysicalDeviceXlibPresentationSupportKHR(
XGetXCBConnection(dpy), visualID);
}
-VkResult val_CreateXcbSurfaceKHR(
+VkResult lvp_CreateXcbSurfaceKHR(
VkInstance _instance,
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
@@ -73,13 +73,13 @@ VkResult val_CreateXcbSurfaceKHR(
return wsi_create_xcb_surface(alloc, pCreateInfo, pSurface);
}
-VkResult val_CreateXlibSurfaceKHR(
+VkResult lvp_CreateXlibSurfaceKHR(
VkInstance _instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
- VAL_FROM_HANDLE(val_instance, instance, _instance);
+ LVP_FROM_HANDLE(lvp_instance, instance, _instance);
const VkAllocationCallbacks *alloc;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
diff --git a/src/gallium/frontends/lavapipe/meson.build b/src/gallium/frontends/lavapipe/meson.build
new file mode 100644
index 00000000000..972735fcff9
--- /dev/null
+++ b/src/gallium/frontends/lavapipe/meson.build
@@ -0,0 +1,66 @@
+
+lvp_entrypoints = custom_target(
+ 'lvp_entrypoints.[ch]',
+ input : ['lvp_entrypoints_gen.py', vk_api_xml],
+ output : ['lvp_entrypoints.h', 'lvp_entrypoints.c'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
+ meson.current_build_dir()
+ ],
+ depend_files : files('lvp_extensions.py'),
+)
+
+lvp_extensions_c = custom_target(
+ 'lvp_extensions.c',
+ input : ['lvp_extensions.py', vk_api_xml],
+ output : ['lvp_extensions.c', 'lvp_extensions.h'],
+ command : [
+ prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
+ '--out-h', '@OUTPUT1@'
+ ],
+)
+
+liblvp_files = files(
+ 'lvp_device.c',
+ 'lvp_cmd_buffer.c',
+ 'lvp_descriptor_set.c',
+ 'lvp_execute.c',
+ 'lvp_util.c',
+ 'lvp_image.c',
+ 'lvp_formats.c',
+ 'lvp_lower_vulkan_resource.c',
+ 'lvp_lower_vulkan_resource.h',
+ 'lvp_lower_input_attachments.c',
+ 'lvp_pass.c',
+ 'lvp_pipeline.c',
+ 'lvp_pipeline_cache.c',
+ 'lvp_query.c',
+ 'lvp_wsi.c')
+
+lvp_deps = []
+lvp_flags = []
+
+if with_platform_x11
+ lvp_deps += dep_xcb_dri3
+ lvp_flags += [
+ '-DVK_USE_PLATFORM_XCB_KHR',
+ '-DVK_USE_PLATFORM_XLIB_KHR',
+ ]
+ liblvp_files += files('lvp_wsi_x11.c')
+endif
+
+if with_platform_wayland
+ lvp_deps += dep_wayland_client
+ lvp_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
+ liblvp_files += files('lvp_wsi_wayland.c')
+endif
+
+liblavapipe_st = static_library(
+ 'lavapipe_st',
+ [liblvp_files, lvp_entrypoints, lvp_extensions_c ],
+ link_with : [ libvulkan_wsi ],
+ c_args : [ lvp_flags ],
+ gnu_symbol_visibility : 'hidden',
+ include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
+ dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
+)
diff --git a/src/gallium/frontends/vallium/meson.build b/src/gallium/frontends/vallium/meson.build
deleted file mode 100644
index f0afa899eb1..00000000000
--- a/src/gallium/frontends/vallium/meson.build
+++ /dev/null
@@ -1,66 +0,0 @@
-
-val_entrypoints = custom_target(
- 'val_entrypoints.[ch]',
- input : ['val_entrypoints_gen.py', vk_api_xml],
- output : ['val_entrypoints.h', 'val_entrypoints.c'],
- command : [
- prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
- meson.current_build_dir()
- ],
- depend_files : files('val_extensions.py'),
-)
-
-val_extensions_c = custom_target(
- 'val_extensions.c',
- input : ['val_extensions.py', vk_api_xml],
- output : ['val_extensions.c', 'val_extensions.h'],
- command : [
- prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
- '--out-h', '@OUTPUT1@'
- ],
-)
-
-libval_files = files(
- 'val_device.c',
- 'val_cmd_buffer.c',
- 'val_descriptor_set.c',
- 'val_execute.c',
- 'val_util.c',
- 'val_image.c',
- 'val_formats.c',
- 'val_lower_vulkan_resource.c',
- 'val_lower_vulkan_resource.h',
- 'val_lower_input_attachments.c',
- 'val_pass.c',
- 'val_pipeline.c',
- 'val_pipeline_cache.c',
- 'val_query.c',
- 'val_wsi.c')
-
-val_deps = []
-val_flags = []
-
-if with_platform_x11
- val_deps += dep_xcb_dri3
- val_flags += [
- '-DVK_USE_PLATFORM_XCB_KHR',
- '-DVK_USE_PLATFORM_XLIB_KHR',
- ]
- libval_files += files('val_wsi_x11.c')
-endif
-
-if with_platform_wayland
- val_deps += dep_wayland_client
- val_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
- libval_files += files('val_wsi_wayland.c')
-endif
-
-libvallium_st = static_library(
- 'vallium_st',
- [libval_files, val_entrypoints, val_extensions_c ],
- link_with : [ libvulkan_wsi ],
- c_args : [ val_flags ],
- gnu_symbol_visibility : 'hidden',
- include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
- dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
-)
diff --git a/src/gallium/meson.build b/src/gallium/meson.build
index 1681fcd0734..7d3b6c3230d 100644
--- a/src/gallium/meson.build
+++ b/src/gallium/meson.build
@@ -226,6 +226,6 @@ if with_tests
subdir('tests')
endif
if with_swrast_vk
- subdir('frontends/vallium')
- subdir('targets/vallium')
+ subdir('frontends/lavapipe')
+ subdir('targets/lavapipe')
endif
diff --git a/src/gallium/targets/vallium/val_icd.py b/src/gallium/targets/lavapipe/lvp_icd.py
index 80c7d337cc7..6084058ca2b 100644
--- a/src/gallium/targets/vallium/val_icd.py
+++ b/src/gallium/targets/lavapipe/lvp_icd.py
@@ -28,10 +28,10 @@ import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', help='Output json file.', required=True)
- parser.add_argument('--lib-path', help='Path to libvulkan_val.so')
+ parser.add_argument('--lib-path', help='Path to libvulkan_lvp.so')
args = parser.parse_args()
- path = 'libvulkan_val.so'
+ path = 'libvulkan_lvp.so'
if args.lib_path:
path = os.path.join(args.lib_path, path)
diff --git a/src/gallium/targets/vallium/meson.build b/src/gallium/targets/lavapipe/meson.build
index 38efd26fd0c..b34498ef929 100644
--- a/src/gallium/targets/vallium/meson.build
+++ b/src/gallium/targets/lavapipe/meson.build
@@ -1,8 +1,8 @@
-libvulkan_val = shared_library(
- 'vulkan_val',
+libvulkan_lvp = shared_library(
+ 'vulkan_lvp',
[ 'target.c' ],
include_directories : [ inc_src, inc_util, inc_include, inc_gallium, inc_gallium_aux, inc_gallium_winsys, inc_gallium_drivers ],
- link_whole : [ libvallium_st ],
+ link_whole : [ liblavapipe_st ],
link_with : [libpipe_loader_static, libmegadriver_stub, libdri, libdricommon ,libgallium, libwsw, libswdri, libws_null, libswkmsdri ],
gnu_symbol_visibility : 'hidden',
link_args : [ld_args_bsymbolic, ld_args_gc_sections],
@@ -11,16 +11,16 @@ libvulkan_val = shared_library(
name_suffix : 'so',
)
-val_icd = custom_target(
- 'val_icd',
- input : 'val_icd.py',
- output : 'val_icd.@0@.json'.format(host_machine.cpu()),
+lvp_icd = custom_target(
+ 'lvp_icd',
+ input : 'lvp_icd.py',
+ output : 'lvp_icd.@0@.json'.format(host_machine.cpu()),
command : [
prog_python, '@INPUT@',
'--lib-path', join_paths(get_option('prefix'), get_option('libdir')),
'--out', '@OUTPUT@',
],
- depend_files : files('../../frontends/vallium/val_extensions.py'),
+ depend_files : files('../../frontends/lavapipe/lvp_extensions.py'),
build_by_default : true,
install_dir : with_vulkan_icd_dir,
install : true,
diff --git a/src/gallium/targets/vallium/target.c b/src/gallium/targets/lavapipe/target.c
index 7ca11854032..7ca11854032 100644
--- a/src/gallium/targets/vallium/target.c
+++ b/src/gallium/targets/lavapipe/target.c