summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/broadcom/vulkan/meson.build4
-rw-r--r--src/broadcom/vulkan/v3dv_cmd_buffer.c2193
-rw-r--r--src/broadcom/vulkan/v3dv_image.c2
-rw-r--r--src/broadcom/vulkan/v3dv_meta_clear.c389
-rw-r--r--src/broadcom/vulkan/v3dv_meta_copy.c1539
-rw-r--r--src/broadcom/vulkan/v3dv_meta_copy.h75
-rw-r--r--src/broadcom/vulkan/v3dv_private.h82
-rw-r--r--src/broadcom/vulkan/v3dvx_cmd_buffer.c2081
-rw-r--r--src/broadcom/vulkan/v3dvx_device.c55
-rw-r--r--src/broadcom/vulkan/v3dvx_formats.c61
-rw-r--r--src/broadcom/vulkan/v3dvx_meta_clear.c403
-rw-r--r--src/broadcom/vulkan/v3dvx_meta_copy.c1353
-rw-r--r--src/broadcom/vulkan/v3dvx_private.h219
-rw-r--r--src/broadcom/vulkan/v3dvx_queue.c2
14 files changed, 4395 insertions, 4063 deletions
diff --git a/src/broadcom/vulkan/meson.build b/src/broadcom/vulkan/meson.build
index 2f4063ed71a..c523fb713c2 100644
--- a/src/broadcom/vulkan/meson.build
+++ b/src/broadcom/vulkan/meson.build
@@ -54,10 +54,14 @@ libv3dv_files = files(
)
files_per_version = files(
+ 'v3dvx_cmd_buffer.c',
'v3dvx_device.c',
'v3dvx_formats.c',
'v3dvx_image.c',
'v3dvx_pipeline.c',
+ 'v3dvx_meta_clear.c',
+ 'v3dvx_meta_copy.c',
+ 'v3dvx_pipeline.c',
'v3dvx_queue.c',
)
diff --git a/src/broadcom/vulkan/v3dv_cmd_buffer.c b/src/broadcom/vulkan/v3dv_cmd_buffer.c
index e7aac250279..0e4b124cb25 100644
--- a/src/broadcom/vulkan/v3dv_cmd_buffer.c
+++ b/src/broadcom/vulkan/v3dv_cmd_buffer.c
@@ -22,8 +22,6 @@
*/
#include "v3dv_private.h"
-#include "broadcom/cle/v3dx_pack.h"
-#include "util/half_float.h"
#include "util/u_pack_color.h"
#include "vk_format_info.h"
#include "vk_util.h"
@@ -84,9 +82,6 @@ v3dv_job_add_bo_unchecked(struct v3dv_job *job, struct v3dv_bo *bo)
job->bo_handle_mask |= bo->handle_bit;
}
-static void
-cmd_buffer_emit_render_pass_rcl(struct v3dv_cmd_buffer *cmd_buffer);
-
VKAPI_ATTR VkResult VKAPI_CALL
v3dv_CreateCommandPool(VkDevice _device,
const VkCommandPoolCreateInfo *pCreateInfo,
@@ -347,17 +342,6 @@ cmd_buffer_destroy(struct v3dv_cmd_buffer *cmd_buffer)
vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
}
-void
-v3dv_job_emit_binning_flush(struct v3dv_job *job)
-{
- assert(job);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(FLUSH));
- v3dv_return_if_oom(NULL, job);
-
- cl_emit(&job->bcl, FLUSH, flush);
-}
-
static bool
attachment_list_is_subset(struct v3dv_subpass_attachment *l1, uint32_t l1_count,
struct v3dv_subpass_attachment *l2, uint32_t l2_count)
@@ -524,35 +508,6 @@ job_compute_frame_tiling(struct v3dv_job *job,
return tiling;
}
-static void
-job_emit_binning_prolog(struct v3dv_job *job,
- const struct v3dv_frame_tiling *tiling,
- uint32_t layers)
-{
- /* This must go before the binning mode configuration. It is
- * required for layered framebuffers to work.
- */
- cl_emit(&job->bcl, NUMBER_OF_LAYERS, config) {
- config.number_of_layers = layers;
- }
-
- cl_emit(&job->bcl, TILE_BINNING_MODE_CFG, config) {
- config.width_in_pixels = tiling->width;
- config.height_in_pixels = tiling->height;
- config.number_of_render_targets = MAX2(tiling->render_target_count, 1);
- config.multisample_mode_4x = tiling->msaa;
- config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
- }
-
- /* There's definitely nothing in the VCD cache we want. */
- cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
-
- /* "Binning mode lists must have a Start Tile Binning item (6) after
- * any prefix state data before the binning list proper starts."
- */
- cl_emit(&job->bcl, START_TILE_BINNING, bin);
-}
-
void
v3dv_job_start_frame(struct v3dv_job *job,
uint32_t width,
@@ -617,7 +572,7 @@ v3dv_job_start_frame(struct v3dv_job *job,
v3dv_job_add_bo_unchecked(job, job->tile_state);
- job_emit_binning_prolog(job, tiling, layers);
+ v3dv_X(job->device, job_emit_binning_prolog)(job, tiling, layers);
job->ez_state = V3D_EZ_UNDECIDED;
job->first_ez_state = V3D_EZ_UNDECIDED;
@@ -637,19 +592,9 @@ cmd_buffer_end_render_pass_frame(struct v3dv_cmd_buffer *cmd_buffer)
* any RCL commands of its own.
*/
if (v3dv_cl_offset(&cmd_buffer->state.job->rcl) == 0)
- cmd_buffer_emit_render_pass_rcl(cmd_buffer);
-
- v3dv_job_emit_binning_flush(cmd_buffer->state.job);
-}
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_render_pass_rcl)(cmd_buffer);
-static void
-cmd_buffer_end_render_pass_secondary(struct v3dv_cmd_buffer *cmd_buffer)
-{
- assert(cmd_buffer->state.job);
- v3dv_cl_ensure_space_with_branch(&cmd_buffer->state.job->bcl,
- cl_packet_length(RETURN_FROM_SUB_LIST));
- v3dv_return_if_oom(cmd_buffer, NULL);
- cl_emit(&cmd_buffer->state.job->bcl, RETURN_FROM_SUB_LIST, ret);
+ v3dv_X(cmd_buffer->device, job_emit_binning_flush)(cmd_buffer->state.job);
}
struct v3dv_job *
@@ -736,7 +681,7 @@ v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer *cmd_buffer)
cmd_buffer_end_render_pass_frame(cmd_buffer);
} else {
assert(job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
- cmd_buffer_end_render_pass_secondary(cmd_buffer);
+ v3dv_X(cmd_buffer->device, cmd_buffer_end_render_pass_secondary)(cmd_buffer);
}
}
@@ -1198,22 +1143,6 @@ v3dv_ResetCommandPool(VkDevice device,
}
static void
-emit_clip_window(struct v3dv_job *job, const VkRect2D *rect)
-{
- assert(job);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CLIP_WINDOW));
- v3dv_return_if_oom(NULL, job);
-
- cl_emit(&job->bcl, CLIP_WINDOW, clip) {
- clip.clip_window_left_pixel_coordinate = rect->offset.x;
- clip.clip_window_bottom_pixel_coordinate = rect->offset.y;
- clip.clip_window_width_in_pixels = rect->extent.width;
- clip.clip_window_height_in_pixels = rect->extent.height;
- }
-}
-
-static void
cmd_buffer_update_tile_alignment(struct v3dv_cmd_buffer *cmd_buffer)
{
/* Render areas and scissor/viewport are only relevant inside render passes,
@@ -1240,42 +1169,6 @@ cmd_buffer_update_tile_alignment(struct v3dv_cmd_buffer *cmd_buffer)
}
}
-void
-v3dv_get_hw_clear_color(const VkClearColorValue *color,
- uint32_t internal_type,
- uint32_t internal_size,
- uint32_t *hw_color)
-{
- union util_color uc;
- switch (internal_type) {
- case V3D_INTERNAL_TYPE_8:
- util_pack_color(color->float32, PIPE_FORMAT_R8G8B8A8_UNORM, &uc);
- memcpy(hw_color, uc.ui, internal_size);
- break;
- case V3D_INTERNAL_TYPE_8I:
- case V3D_INTERNAL_TYPE_8UI:
- hw_color[0] = ((color->uint32[0] & 0xff) |
- (color->uint32[1] & 0xff) << 8 |
- (color->uint32[2] & 0xff) << 16 |
- (color->uint32[3] & 0xff) << 24);
- break;
- case V3D_INTERNAL_TYPE_16F:
- util_pack_color(color->float32, PIPE_FORMAT_R16G16B16A16_FLOAT, &uc);
- memcpy(hw_color, uc.ui, internal_size);
- break;
- case V3D_INTERNAL_TYPE_16I:
- case V3D_INTERNAL_TYPE_16UI:
- hw_color[0] = ((color->uint32[0] & 0xffff) | color->uint32[1] << 16);
- hw_color[1] = ((color->uint32[2] & 0xffff) | color->uint32[3] << 16);
- break;
- case V3D_INTERNAL_TYPE_32F:
- case V3D_INTERNAL_TYPE_32I:
- case V3D_INTERNAL_TYPE_32UI:
- memcpy(hw_color, color->uint32, internal_size);
- break;
- }
-}
-
static void
cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t attachment_idx,
@@ -1298,8 +1191,8 @@ cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_cmd_buffer_attachment_state *attachment_state =
&cmd_buffer->state.attachments[attachment_idx];
- v3dv_get_hw_clear_color(color, internal_type, internal_size,
- &attachment_state->clear_value.color[0]);
+ v3dv_X(cmd_buffer->device, get_hw_clear_color)
+ (color, internal_type, internal_size, &attachment_state->clear_value.color[0]);
attachment_state->vk_clear_value.color = *color;
}
@@ -1449,889 +1342,6 @@ v3dv_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
v3dv_cmd_buffer_subpass_start(cmd_buffer, state->subpass_idx + 1);
}
-void
-v3dv_render_pass_setup_render_target(struct v3dv_cmd_buffer *cmd_buffer,
- int rt,
- uint32_t *rt_bpp,
- uint32_t *rt_type,
- uint32_t *rt_clamp)
-{
- const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
-
- assert(state->subpass_idx < state->pass->subpass_count);
- const struct v3dv_subpass *subpass =
- &state->pass->subpasses[state->subpass_idx];
-
- if (rt >= subpass->color_count)
- return;
-
- struct v3dv_subpass_attachment *attachment = &subpass->color_attachments[rt];
- const uint32_t attachment_idx = attachment->attachment;
- if (attachment_idx == VK_ATTACHMENT_UNUSED)
- return;
-
- const struct v3dv_framebuffer *framebuffer = state->framebuffer;
- assert(attachment_idx < framebuffer->attachment_count);
- struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
- assert(iview->aspects & VK_IMAGE_ASPECT_COLOR_BIT);
-
- *rt_bpp = iview->internal_bpp;
- *rt_type = iview->internal_type;
-
- if (vk_format_is_int(iview->vk_format))
- *rt_clamp = V3D_RENDER_TARGET_CLAMP_INT;
- else if (vk_format_is_srgb(iview->vk_format))
- *rt_clamp = V3D_RENDER_TARGET_CLAMP_NORM;
- else
- *rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
-}
-
-static void
-cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_cl *cl,
- struct v3dv_image_view *iview,
- uint32_t layer,
- uint32_t buffer)
-{
- const struct v3dv_image *image = iview->image;
- const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
- uint32_t layer_offset = v3dv_layer_offset(image,
- iview->base_level,
- iview->first_layer + layer);
-
- cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
- load.buffer_to_load = buffer;
- load.address = v3dv_cl_address(image->mem->bo, layer_offset);
-
- load.input_image_format = iview->format->rt_type;
- load.r_b_swap = iview->swap_rb;
- load.memory_format = slice->tiling;
-
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- load.height_in_ub_or_stride =
- slice->padded_height_of_output_image_in_uif_blocks;
- } else if (slice->tiling == V3D_TILING_RASTER) {
- load.height_in_ub_or_stride = slice->stride;
- }
-
- if (image->samples > VK_SAMPLE_COUNT_1_BIT)
- load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
- else
- load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static bool
-check_needs_load(const struct v3dv_cmd_buffer_state *state,
- VkImageAspectFlags aspect,
- uint32_t att_first_subpass_idx,
- VkAttachmentLoadOp load_op)
-{
- /* We call this with image->aspects & aspect, so 0 means the aspect we are
- * testing does not exist in the image.
- */
- if (!aspect)
- return false;
-
- /* Attachment load operations apply on the first subpass that uses the
- * attachment, otherwise we always need to load.
- */
- if (state->job->first_subpass > att_first_subpass_idx)
- return true;
-
- /* If the job is continuing a subpass started in another job, we always
- * need to load.
- */
- if (state->job->is_subpass_continue)
- return true;
-
- /* If the area is not aligned to tile boundaries, we always need to load */
- if (!state->tile_aligned_render_area)
- return true;
-
- /* The attachment load operations must be LOAD */
- return load_op == VK_ATTACHMENT_LOAD_OP_LOAD;
-}
-
-static bool
-check_needs_clear(const struct v3dv_cmd_buffer_state *state,
- VkImageAspectFlags aspect,
- uint32_t att_first_subpass_idx,
- VkAttachmentLoadOp load_op,
- bool do_clear_with_draw)
-{
- /* We call this with image->aspects & aspect, so 0 means the aspect we are
- * testing does not exist in the image.
- */
- if (!aspect)
- return false;
-
- /* If the aspect needs to be cleared with a draw call then we won't emit
- * the clear here.
- */
- if (do_clear_with_draw)
- return false;
-
- /* If this is resuming a subpass started with another job, then attachment
- * load operations don't apply.
- */
- if (state->job->is_subpass_continue)
- return false;
-
- /* If the render area is not aligned to tile boudaries we can't use the
- * TLB for a clear.
- */
- if (!state->tile_aligned_render_area)
- return false;
-
- /* If this job is running in a subpass other than the first subpass in
- * which this attachment is used then attachment load operations don't apply.
- */
- if (state->job->first_subpass != att_first_subpass_idx)
- return false;
-
- /* The attachment load operation must be CLEAR */
- return load_op == VK_ATTACHMENT_LOAD_OP_CLEAR;
-}
-
-static bool
-check_needs_store(const struct v3dv_cmd_buffer_state *state,
- VkImageAspectFlags aspect,
- uint32_t att_last_subpass_idx,
- VkAttachmentStoreOp store_op)
-{
- /* We call this with image->aspects & aspect, so 0 means the aspect we are
- * testing does not exist in the image.
- */
- if (!aspect)
- return false;
-
- /* Attachment store operations only apply on the last subpass where the
- * attachment is used, in other subpasses we always need to store.
- */
- if (state->subpass_idx < att_last_subpass_idx)
- return true;
-
- /* Attachment store operations only apply on the last job we emit on the the
- * last subpass where the attachment is used, otherwise we always need to
- * store.
- */
- if (!state->job->is_subpass_finish)
- return true;
-
- /* The attachment store operation must be STORE */
- return store_op == VK_ATTACHMENT_STORE_OP_STORE;
-}
-
-static void
-cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_cl *cl,
- uint32_t layer)
-{
- const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- const struct v3dv_framebuffer *framebuffer = state->framebuffer;
- const struct v3dv_render_pass *pass = state->pass;
- const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
-
- for (uint32_t i = 0; i < subpass->color_count; i++) {
- uint32_t attachment_idx = subpass->color_attachments[i].attachment;
-
- if (attachment_idx == VK_ATTACHMENT_UNUSED)
- continue;
-
- const struct v3dv_render_pass_attachment *attachment =
- &state->pass->attachments[attachment_idx];
-
- /* According to the Vulkan spec:
- *
- * "The load operation for each sample in an attachment happens before
- * any recorded command which accesses the sample in the first subpass
- * where the attachment is used."
- *
- * If the load operation is CLEAR, we must only clear once on the first
- * subpass that uses the attachment (and in that case we don't LOAD).
- * After that, we always want to load so we don't lose any rendering done
- * by a previous subpass to the same attachment. We also want to load
- * if the current job is continuing subpass work started by a previous
- * job, for the same reason.
- *
- * If the render area is not aligned to tile boundaries then we have
- * tiles which are partially covered by it. In this case, we need to
- * load the tiles so we can preserve the pixels that are outside the
- * render area for any such tiles.
- */
- bool needs_load = check_needs_load(state,
- VK_IMAGE_ASPECT_COLOR_BIT,
- attachment->first_subpass,
- attachment->desc.loadOp);
- if (needs_load) {
- struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
- cmd_buffer_render_pass_emit_load(cmd_buffer, cl, iview,
- layer, RENDER_TARGET_0 + i);
- }
- }
-
- uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
- if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
- const struct v3dv_render_pass_attachment *ds_attachment =
- &state->pass->attachments[ds_attachment_idx];
-
- const VkImageAspectFlags ds_aspects =
- vk_format_aspects(ds_attachment->desc.format);
-
- const bool needs_depth_load =
- check_needs_load(state,
- ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.loadOp);
-
- const bool needs_stencil_load =
- check_needs_load(state,
- ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.stencilLoadOp);
-
- if (needs_depth_load || needs_stencil_load) {
- struct v3dv_image_view *iview =
- framebuffer->attachments[ds_attachment_idx];
- /* From the Vulkan spec:
- *
- * "When an image view of a depth/stencil image is used as a
- * depth/stencil framebuffer attachment, the aspectMask is ignored
- * and both depth and stencil image subresources are used."
- *
- * So we ignore the aspects from the subresource range of the image
- * view for the depth/stencil attachment, but we still need to restrict
- * the to aspects compatible with the render pass and the image.
- */
- const uint32_t zs_buffer =
- v3dv_zs_buffer(needs_depth_load, needs_stencil_load);
- cmd_buffer_render_pass_emit_load(cmd_buffer, cl,
- iview, layer, zs_buffer);
- }
- }
-
- cl_emit(cl, END_OF_LOADS, end);
-}
-
-static void
-cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_cl *cl,
- uint32_t attachment_idx,
- uint32_t layer,
- uint32_t buffer,
- bool clear,
- bool is_multisample_resolve)
-{
- const struct v3dv_image_view *iview =
- cmd_buffer->state.framebuffer->attachments[attachment_idx];
- const struct v3dv_image *image = iview->image;
- const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
- uint32_t layer_offset = v3dv_layer_offset(image,
- iview->base_level,
- iview->first_layer + layer);
-
- cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = buffer;
- store.address = v3dv_cl_address(image->mem->bo, layer_offset);
- store.clear_buffer_being_stored = clear;
-
- store.output_image_format = iview->format->rt_type;
- store.r_b_swap = iview->swap_rb;
- store.memory_format = slice->tiling;
-
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- store.height_in_ub_or_stride =
- slice->padded_height_of_output_image_in_uif_blocks;
- } else if (slice->tiling == V3D_TILING_RASTER) {
- store.height_in_ub_or_stride = slice->stride;
- }
-
- if (image->samples > VK_SAMPLE_COUNT_1_BIT)
- store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
- else if (is_multisample_resolve)
- store.decimate_mode = V3D_DECIMATE_MODE_4X;
- else
- store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static void
-cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_cl *cl,
- uint32_t layer)
-{
- struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- const struct v3dv_subpass *subpass =
- &state->pass->subpasses[state->subpass_idx];
-
- bool has_stores = false;
- bool use_global_zs_clear = false;
- bool use_global_rt_clear = false;
-
- /* FIXME: separate stencil */
- uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
- if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
- const struct v3dv_render_pass_attachment *ds_attachment =
- &state->pass->attachments[ds_attachment_idx];
-
- assert(state->job->first_subpass >= ds_attachment->first_subpass);
- assert(state->subpass_idx >= ds_attachment->first_subpass);
- assert(state->subpass_idx <= ds_attachment->last_subpass);
-
- /* From the Vulkan spec, VkImageSubresourceRange:
- *
- * "When an image view of a depth/stencil image is used as a
- * depth/stencil framebuffer attachment, the aspectMask is ignored
- * and both depth and stencil image subresources are used."
- *
- * So we ignore the aspects from the subresource range of the image
- * view for the depth/stencil attachment, but we still need to restrict
- * the to aspects compatible with the render pass and the image.
- */
- const VkImageAspectFlags aspects =
- vk_format_aspects(ds_attachment->desc.format);
-
- /* Only clear once on the first subpass that uses the attachment */
- bool needs_depth_clear =
- check_needs_clear(state,
- aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.loadOp,
- subpass->do_depth_clear_with_draw);
-
- bool needs_stencil_clear =
- check_needs_clear(state,
- aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.stencilLoadOp,
- subpass->do_stencil_clear_with_draw);
-
- /* Skip the last store if it is not required */
- bool needs_depth_store =
- check_needs_store(state,
- aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
- ds_attachment->last_subpass,
- ds_attachment->desc.storeOp);
-
- bool needs_stencil_store =
- check_needs_store(state,
- aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
- ds_attachment->last_subpass,
- ds_attachment->desc.stencilStoreOp);
-
- /* GFXH-1689: The per-buffer store command's clear buffer bit is broken
- * for depth/stencil.
- *
- * There used to be some confusion regarding the Clear Tile Buffers
- * Z/S bit also being broken, but we confirmed with Broadcom that this
- * is not the case, it was just that some other hardware bugs (that we
- * need to work around, such as GFXH-1461) could cause this bit to behave
- * incorrectly.
- *
- * There used to be another issue where the RTs bit in the Clear Tile
- * Buffers packet also cleared Z/S, but Broadcom confirmed this is
- * fixed since V3D 4.1.
- *
- * So if we have to emit a clear of depth or stencil we don't use
- * the per-buffer store clear bit, even if we need to store the buffers,
- * instead we always have to use the Clear Tile Buffers Z/S bit.
- * If we have configured the job to do early Z/S clearing, then we
- * don't want to emit any Clear Tile Buffers command at all here.
- *
- * Note that GFXH-1689 is not reproduced in the simulator, where
- * using the clear buffer bit in depth/stencil stores works fine.
- */
- use_global_zs_clear = !state->job->early_zs_clear &&
- (needs_depth_clear || needs_stencil_clear);
- if (needs_depth_store || needs_stencil_store) {
- const uint32_t zs_buffer =
- v3dv_zs_buffer(needs_depth_store, needs_stencil_store);
- cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
- ds_attachment_idx, layer,
- zs_buffer, false, false);
- has_stores = true;
- }
- }
-
- for (uint32_t i = 0; i < subpass->color_count; i++) {
- uint32_t attachment_idx = subpass->color_attachments[i].attachment;
-
- if (attachment_idx == VK_ATTACHMENT_UNUSED)
- continue;
-
- const struct v3dv_render_pass_attachment *attachment =
- &state->pass->attachments[attachment_idx];
-
- assert(state->job->first_subpass >= attachment->first_subpass);
- assert(state->subpass_idx >= attachment->first_subpass);
- assert(state->subpass_idx <= attachment->last_subpass);
-
- /* Only clear once on the first subpass that uses the attachment */
- bool needs_clear =
- check_needs_clear(state,
- VK_IMAGE_ASPECT_COLOR_BIT,
- attachment->first_subpass,
- attachment->desc.loadOp,
- false);
-
- /* Skip the last store if it is not required */
- bool needs_store =
- check_needs_store(state,
- VK_IMAGE_ASPECT_COLOR_BIT,
- attachment->last_subpass,
- attachment->desc.storeOp);
-
- /* If we need to resolve this attachment emit that store first. Notice
- * that we must not request a tile buffer clear here in that case, since
- * that would clear the tile buffer before we get to emit the actual
- * color attachment store below, since the clear happens after the
- * store is completed.
- *
- * If the attachment doesn't support TLB resolves then we will have to
- * fallback to doing the resolve in a shader separately after this
- * job, so we will need to store the multisampled sttachment even if that
- * wansn't requested by the client.
- */
- const bool needs_resolve =
- subpass->resolve_attachments &&
- subpass->resolve_attachments[i].attachment != VK_ATTACHMENT_UNUSED;
- if (needs_resolve && attachment->use_tlb_resolve) {
- const uint32_t resolve_attachment_idx =
- subpass->resolve_attachments[i].attachment;
- cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
- resolve_attachment_idx, layer,
- RENDER_TARGET_0 + i,
- false, true);
- has_stores = true;
- } else if (needs_resolve) {
- needs_store = true;
- }
-
- /* Emit the color attachment store if needed */
- if (needs_store) {
- cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
- attachment_idx, layer,
- RENDER_TARGET_0 + i,
- needs_clear && !use_global_rt_clear,
- false);
- has_stores = true;
- } else if (needs_clear) {
- use_global_rt_clear = true;
- }
- }
-
- /* We always need to emit at least one dummy store */
- if (!has_stores) {
- cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = NONE;
- }
- }
-
- /* If we have any depth/stencil clears we can't use the per-buffer clear
- * bit and instead we have to emit a single clear of all tile buffers.
- */
- if (use_global_zs_clear || use_global_rt_clear) {
- cl_emit(cl, CLEAR_TILE_BUFFERS, clear) {
- clear.clear_z_stencil_buffer = use_global_zs_clear;
- clear.clear_all_render_targets = use_global_rt_clear;
- }
- }
-}
-
-static void
-cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t layer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- /* Emit the generic list in our indirect state -- the rcl will just
- * have pointers into it.
- */
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- cmd_buffer_render_pass_emit_loads(cmd_buffer, cl, layer);
-
- /* The binner starts out writing tiles assuming that the initial mode
- * is triangles, so make sure that's the case.
- */
- cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
- fmt.primitive_type = LIST_TRIANGLES;
- }
-
- /* PTB assumes that value to be 0, but hw will not set it. */
- cl_emit(cl, SET_INSTANCEID, set) {
- set.instance_id = 0;
- }
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- cmd_buffer_render_pass_emit_stores(cmd_buffer, cl, layer);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t layer)
-{
- const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
-
- struct v3dv_job *job = cmd_buffer->state.job;
- struct v3dv_cl *rcl = &job->rcl;
-
- /* If doing multicore binning, we would need to initialize each
- * core's tile list here.
- */
- const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
- const uint32_t tile_alloc_offset =
- 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
- cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
- list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
- }
-
- cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
- config.number_of_bin_tile_lists = 1;
- config.total_frame_width_in_tiles = tiling->draw_tiles_x;
- config.total_frame_height_in_tiles = tiling->draw_tiles_y;
-
- config.supertile_width_in_tiles = tiling->supertile_width;
- config.supertile_height_in_tiles = tiling->supertile_height;
-
- config.total_frame_width_in_supertiles =
- tiling->frame_width_in_supertiles;
- config.total_frame_height_in_supertiles =
- tiling->frame_height_in_supertiles;
- }
-
- /* Start by clearing the tile buffer. */
- cl_emit(rcl, TILE_COORDINATES, coords) {
- coords.tile_column_number = 0;
- coords.tile_row_number = 0;
- }
-
- /* Emit an initial clear of the tile buffers. This is necessary
- * for any buffers that should be cleared (since clearing
- * normally happens at the *end* of the generic tile list), but
- * it's also nice to clear everything so the first tile doesn't
- * inherit any contents from some previous frame.
- *
- * Also, implement the GFXH-1742 workaround. There's a race in
- * the HW between the RCL updating the TLB's internal type/size
- * and the spawning of the QPU instances using the TLB's current
- * internal type/size. To make sure the QPUs get the right
- * state, we need 1 dummy store in between internal type/size
- * changes on V3D 3.x, and 2 dummy stores on 4.x.
- */
- for (int i = 0; i < 2; i++) {
- if (i > 0)
- cl_emit(rcl, TILE_COORDINATES, coords);
- cl_emit(rcl, END_OF_LOADS, end);
- cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = NONE;
- }
- if (i == 0 && cmd_buffer->state.tile_aligned_render_area) {
- cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
- clear.clear_z_stencil_buffer = !job->early_zs_clear;
- clear.clear_all_render_targets = true;
- }
- }
- cl_emit(rcl, END_OF_TILE_MARKER, end);
- }
-
- cl_emit(rcl, FLUSH_VCD_CACHE, flush);
-
- cmd_buffer_render_pass_emit_per_tile_rcl(cmd_buffer, layer);
-
- uint32_t supertile_w_in_pixels =
- tiling->tile_width * tiling->supertile_width;
- uint32_t supertile_h_in_pixels =
- tiling->tile_height * tiling->supertile_height;
- const uint32_t min_x_supertile =
- state->render_area.offset.x / supertile_w_in_pixels;
- const uint32_t min_y_supertile =
- state->render_area.offset.y / supertile_h_in_pixels;
-
- uint32_t max_render_x = state->render_area.offset.x;
- if (state->render_area.extent.width > 0)
- max_render_x += state->render_area.extent.width - 1;
- uint32_t max_render_y = state->render_area.offset.y;
- if (state->render_area.extent.height > 0)
- max_render_y += state->render_area.extent.height - 1;
- const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
- const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
-
- for (int y = min_y_supertile; y <= max_y_supertile; y++) {
- for (int x = min_x_supertile; x <= max_x_supertile; x++) {
- cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
- coords.column_number_in_supertiles = x;
- coords.row_number_in_supertiles = y;
- }
- }
- }
-}
-
-static void
-set_rcl_early_z_config(struct v3dv_job *job,
- bool *early_z_disable,
- uint32_t *early_z_test_and_update_direction)
-{
- /* If this is true then we have not emitted any draw calls in this job
- * and we don't get any benefits form early Z.
- */
- if (!job->decided_global_ez_enable) {
- assert(job->draw_count == 0);
- *early_z_disable = true;
- return;
- }
-
- switch (job->first_ez_state) {
- case V3D_EZ_UNDECIDED:
- case V3D_EZ_LT_LE:
- *early_z_disable = false;
- *early_z_test_and_update_direction = EARLY_Z_DIRECTION_LT_LE;
- break;
- case V3D_EZ_GT_GE:
- *early_z_disable = false;
- *early_z_test_and_update_direction = EARLY_Z_DIRECTION_GT_GE;
- break;
- case V3D_EZ_DISABLED:
- *early_z_disable = true;
- break;
- }
-}
-
-static void
-cmd_buffer_emit_render_pass_rcl(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- const struct v3dv_framebuffer *framebuffer = state->framebuffer;
-
- /* We can't emit the RCL until we have a framebuffer, which we may not have
- * if we are recording a secondary command buffer. In that case, we will
- * have to wait until vkCmdExecuteCommands is called from a primary command
- * buffer.
- */
- if (!framebuffer) {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
- return;
- }
-
- const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
-
- const uint32_t fb_layers = framebuffer->layers;
- v3dv_cl_ensure_space_with_branch(&job->rcl, 200 +
- MAX2(fb_layers, 1) * 256 *
- cl_packet_length(SUPERTILE_COORDINATES));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- assert(state->subpass_idx < state->pass->subpass_count);
- const struct v3dv_render_pass *pass = state->pass;
- const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
- struct v3dv_cl *rcl = &job->rcl;
-
- /* Comon config must be the first TILE_RENDERING_MODE_CFG and
- * Z_STENCIL_CLEAR_VALUES must be last. The ones in between are optional
- * updates to the previous HW state.
- */
- bool do_early_zs_clear = false;
- const uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
- config.image_width_pixels = framebuffer->width;
- config.image_height_pixels = framebuffer->height;
- config.number_of_render_targets = MAX2(subpass->color_count, 1);
- config.multisample_mode_4x = tiling->msaa;
- config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
-
- if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
- const struct v3dv_image_view *iview =
- framebuffer->attachments[ds_attachment_idx];
- config.internal_depth_type = iview->internal_type;
-
- set_rcl_early_z_config(job,
- &config.early_z_disable,
- &config.early_z_test_and_update_direction);
-
- /* Early-Z/S clear can be enabled if the job is clearing and not
- * storing (or loading) depth. If a stencil aspect is also present
- * we have the same requirements for it, however, in this case we
- * can accept stencil loadOp DONT_CARE as well, so instead of
- * checking that stencil is cleared we check that is not loaded.
- *
- * Early-Z/S clearing is independent of Early Z/S testing, so it is
- * possible to enable one but not the other so long as their
- * respective requirements are met.
- */
- struct v3dv_render_pass_attachment *ds_attachment =
- &pass->attachments[ds_attachment_idx];
-
- const VkImageAspectFlags ds_aspects =
- vk_format_aspects(ds_attachment->desc.format);
-
- bool needs_depth_clear =
- check_needs_clear(state,
- ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.loadOp,
- subpass->do_depth_clear_with_draw);
-
- bool needs_depth_store =
- check_needs_store(state,
- ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
- ds_attachment->last_subpass,
- ds_attachment->desc.storeOp);
-
- do_early_zs_clear = needs_depth_clear && !needs_depth_store;
- if (do_early_zs_clear &&
- vk_format_has_stencil(ds_attachment->desc.format)) {
- bool needs_stencil_load =
- check_needs_load(state,
- ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.stencilLoadOp);
-
- bool needs_stencil_store =
- check_needs_store(state,
- ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
- ds_attachment->last_subpass,
- ds_attachment->desc.stencilStoreOp);
-
- do_early_zs_clear = !needs_stencil_load && !needs_stencil_store;
- }
-
- config.early_depth_stencil_clear = do_early_zs_clear;
- } else {
- config.early_z_disable = true;
- }
- }
-
- /* If we enabled early Z/S clear, then we can't emit any "Clear Tile Buffers"
- * commands with the Z/S bit set, so keep track of whether we enabled this
- * in the job so we can skip these later.
- */
- job->early_zs_clear = do_early_zs_clear;
-
- for (uint32_t i = 0; i < subpass->color_count; i++) {
- uint32_t attachment_idx = subpass->color_attachments[i].attachment;
- if (attachment_idx == VK_ATTACHMENT_UNUSED)
- continue;
-
- struct v3dv_image_view *iview =
- state->framebuffer->attachments[attachment_idx];
-
- const struct v3dv_image *image = iview->image;
- const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
-
- const uint32_t *clear_color =
- &state->attachments[attachment_idx].clear_value.color[0];
-
- uint32_t clear_pad = 0;
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- int uif_block_height = v3d_utile_height(image->cpp) * 2;
-
- uint32_t implicit_padded_height =
- align(framebuffer->height, uif_block_height) / uif_block_height;
-
- if (slice->padded_height_of_output_image_in_uif_blocks -
- implicit_padded_height >= 15) {
- clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
- }
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
- clear.clear_color_low_32_bits = clear_color[0];
- clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
- clear.render_target_number = i;
- };
-
- if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
- clear.clear_color_mid_low_32_bits =
- ((clear_color[1] >> 24) | (clear_color[2] << 8));
- clear.clear_color_mid_high_24_bits =
- ((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
- clear.render_target_number = i;
- };
- }
-
- if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
- clear.uif_padded_height_in_uif_blocks = clear_pad;
- clear.clear_color_high_16_bits = clear_color[3] >> 16;
- clear.render_target_number = i;
- };
- }
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
- v3dv_render_pass_setup_render_target(cmd_buffer, 0,
- &rt.render_target_0_internal_bpp,
- &rt.render_target_0_internal_type,
- &rt.render_target_0_clamp);
- v3dv_render_pass_setup_render_target(cmd_buffer, 1,
- &rt.render_target_1_internal_bpp,
- &rt.render_target_1_internal_type,
- &rt.render_target_1_clamp);
- v3dv_render_pass_setup_render_target(cmd_buffer, 2,
- &rt.render_target_2_internal_bpp,
- &rt.render_target_2_internal_type,
- &rt.render_target_2_clamp);
- v3dv_render_pass_setup_render_target(cmd_buffer, 3,
- &rt.render_target_3_internal_bpp,
- &rt.render_target_3_internal_type,
- &rt.render_target_3_clamp);
- }
-
- /* Ends rendering mode config. */
- if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
- clear.z_clear_value =
- state->attachments[ds_attachment_idx].clear_value.z;
- clear.stencil_clear_value =
- state->attachments[ds_attachment_idx].clear_value.s;
- };
- } else {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
- clear.z_clear_value = 1.0f;
- clear.stencil_clear_value = 0;
- };
- }
-
- /* Always set initial block size before the first branch, which needs
- * to match the value from binning mode config.
- */
- cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
- init.use_auto_chained_tile_lists = true;
- init.size_of_first_block_in_chained_tile_lists =
- TILE_ALLOCATION_BLOCK_SIZE_64B;
- }
-
- for (int layer = 0; layer < MAX2(1, fb_layers); layer++)
- cmd_buffer_emit_render_pass_layer_rcl(cmd_buffer, layer);
-
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
static void
cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
{
@@ -2608,44 +1618,6 @@ v3dv_EndCommandBuffer(VkCommandBuffer commandBuffer)
}
static void
-emit_occlusion_query(struct v3dv_cmd_buffer *cmd_buffer);
-
-static void
-ensure_array_state(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t slot_size,
- uint32_t used_count,
- uint32_t *alloc_count,
- void **ptr);
-
-static void
-cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer *primary,
- struct v3dv_cmd_buffer *secondary)
-{
- struct v3dv_cmd_buffer_state *p_state = &primary->state;
- struct v3dv_cmd_buffer_state *s_state = &secondary->state;
-
- const uint32_t total_state_count =
- p_state->query.end.used_count + s_state->query.end.used_count;
- ensure_array_state(primary,
- sizeof(struct v3dv_end_query_cpu_job_info),
- total_state_count,
- &p_state->query.end.alloc_count,
- (void **) &p_state->query.end.states);
- v3dv_return_if_oom(primary, NULL);
-
- for (uint32_t i = 0; i < s_state->query.end.used_count; i++) {
- const struct v3dv_end_query_cpu_job_info *s_qstate =
- &secondary->state.query.end.states[i];
-
- struct v3dv_end_query_cpu_job_info *p_qstate =
- &p_state->query.end.states[p_state->query.end.used_count++];
-
- p_qstate->pool = s_qstate->pool;
- p_qstate->query = s_qstate->query;
- }
-}
-
-static void
clone_bo_list(struct v3dv_cmd_buffer *cmd_buffer,
struct list_head *dst,
struct list_head *src)
@@ -2673,9 +1645,9 @@ clone_bo_list(struct v3dv_cmd_buffer *cmd_buffer,
* for jobs recorded in secondary command buffers when we want to execute
* them in primaries.
*/
-static struct v3dv_job *
-job_clone_in_cmd_buffer(struct v3dv_job *job,
- struct v3dv_cmd_buffer *cmd_buffer)
+struct v3dv_job *
+v3dv_job_clone_in_cmd_buffer(struct v3dv_job *job,
+ struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_job *clone_job = vk_alloc(&job->device->vk.alloc,
sizeof(struct v3dv_job), 8,
@@ -2704,163 +1676,6 @@ job_clone_in_cmd_buffer(struct v3dv_job *job,
return clone_job;
}
-static struct v3dv_job *
-cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer *cmd_buffer,
- bool is_bcl_barrier)
-{
- assert(cmd_buffer->state.subpass_idx != -1);
- v3dv_cmd_buffer_finish_job(cmd_buffer);
- struct v3dv_job *job =
- v3dv_cmd_buffer_subpass_resume(cmd_buffer,
- cmd_buffer->state.subpass_idx);
- if (!job)
- return NULL;
-
- job->serialize = true;
- job->needs_bcl_sync = is_bcl_barrier;
- return job;
-}
-
-static void
-cmd_buffer_execute_inside_pass(struct v3dv_cmd_buffer *primary,
- uint32_t cmd_buffer_count,
- const VkCommandBuffer *cmd_buffers)
-{
- assert(primary->state.job);
-
- /* Emit occlusion query state if needed so the draw calls inside our
- * secondaries update the counters.
- */
- bool has_occlusion_query =
- primary->state.dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY;
- if (has_occlusion_query)
- emit_occlusion_query(primary);
-
- /* FIXME: if our primary job tiling doesn't enable MSSA but any of the
- * pipelines used by the secondaries do, we need to re-start the primary
- * job to enable MSAA. See cmd_buffer_restart_job_for_msaa_if_needed.
- */
- bool pending_barrier = false;
- bool pending_bcl_barrier = false;
- for (uint32_t i = 0; i < cmd_buffer_count; i++) {
- V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
-
- assert(secondary->usage_flags &
- VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
-
- list_for_each_entry(struct v3dv_job, secondary_job,
- &secondary->jobs, list_link) {
- if (secondary_job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
- /* If the job is a CL, then we branch to it from the primary BCL.
- * In this case the secondary's BCL is finished with a
- * RETURN_FROM_SUB_LIST command to return back to the primary BCL
- * once we are done executing it.
- */
- assert(v3dv_cl_offset(&secondary_job->rcl) == 0);
- assert(secondary_job->bcl.bo);
-
- /* Sanity check that secondary BCL ends with RETURN_FROM_SUB_LIST */
- STATIC_ASSERT(cl_packet_length(RETURN_FROM_SUB_LIST) == 1);
- assert(v3dv_cl_offset(&secondary_job->bcl) >= 1);
- assert(*(((uint8_t *)secondary_job->bcl.next) - 1) ==
- V3D42_RETURN_FROM_SUB_LIST_opcode);
-
- /* If this secondary has any barriers (or we had any pending barrier
- * to apply), then we can't just branch to it from the primary, we
- * need to split the primary to create a new job that can consume
- * the barriers first.
- *
- * FIXME: in this case, maybe just copy the secondary BCL without
- * the RETURN_FROM_SUB_LIST into the primary job to skip the
- * branch?
- */
- struct v3dv_job *primary_job = primary->state.job;
- if (!primary_job || secondary_job->serialize || pending_barrier) {
- const bool needs_bcl_barrier =
- secondary_job->needs_bcl_sync || pending_bcl_barrier;
- primary_job =
- cmd_buffer_subpass_split_for_barrier(primary,
- needs_bcl_barrier);
- v3dv_return_if_oom(primary, NULL);
-
- /* Since we have created a new primary we need to re-emit
- * occlusion query state.
- */
- if (has_occlusion_query)
- emit_occlusion_query(primary);
- }
-
- /* Make sure our primary job has all required BO references */
- set_foreach(secondary_job->bos, entry) {
- struct v3dv_bo *bo = (struct v3dv_bo *)entry->key;
- v3dv_job_add_bo(primary_job, bo);
- }
-
- /* Emit required branch instructions. We expect each of these
- * to end with a corresponding 'return from sub list' item.
- */
- list_for_each_entry(struct v3dv_bo, bcl_bo,
- &secondary_job->bcl.bo_list, list_link) {
- v3dv_cl_ensure_space_with_branch(&primary_job->bcl,
- cl_packet_length(BRANCH_TO_SUB_LIST));
- v3dv_return_if_oom(primary, NULL);
- cl_emit(&primary_job->bcl, BRANCH_TO_SUB_LIST, branch) {
- branch.address = v3dv_cl_address(bcl_bo, 0);
- }
- }
-
- primary_job->tmu_dirty_rcl |= secondary_job->tmu_dirty_rcl;
- } else if (secondary_job->type == V3DV_JOB_TYPE_CPU_CLEAR_ATTACHMENTS) {
- if (pending_barrier) {
- cmd_buffer_subpass_split_for_barrier(primary, pending_bcl_barrier);
- v3dv_return_if_oom(primary, NULL);
- }
-
- const struct v3dv_clear_attachments_cpu_job_info *info =
- &secondary_job->cpu.clear_attachments;
- v3dv_CmdClearAttachments(v3dv_cmd_buffer_to_handle(primary),
- info->attachment_count,
- info->attachments,
- info->rect_count,
- info->rects);
- } else {
- /* This is a regular job (CPU or GPU), so just finish the current
- * primary job (if any) and then add the secondary job to the
- * primary's job list right after it.
- */
- v3dv_cmd_buffer_finish_job(primary);
- job_clone_in_cmd_buffer(secondary_job, primary);
- if (pending_barrier) {
- secondary_job->serialize = true;
- if (pending_bcl_barrier)
- secondary_job->needs_bcl_sync = true;
- }
- }
-
- pending_barrier = false;
- pending_bcl_barrier = false;
- }
-
- /* If the secondary has recorded any vkCmdEndQuery commands, we need to
- * copy this state to the primary so it is processed properly when the
- * current primary job is finished.
- */
- cmd_buffer_copy_secondary_end_query_state(primary, secondary);
-
- /* If this secondary had any pending barrier state we will need that
- * barrier state consumed with whatever comes next in the primary.
- */
- assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
- pending_barrier = secondary->state.has_barrier;
- pending_bcl_barrier = secondary->state.has_bcl_barrier;
- }
-
- if (pending_barrier) {
- primary->state.has_barrier = true;
- primary->state.has_bcl_barrier |= pending_bcl_barrier;
- }
-}
-
static void
cmd_buffer_execute_outside_pass(struct v3dv_cmd_buffer *primary,
uint32_t cmd_buffer_count,
@@ -2892,7 +1707,7 @@ cmd_buffer_execute_outside_pass(struct v3dv_cmd_buffer *primary,
/* These can only happen inside a render pass */
assert(secondary_job->type != V3DV_JOB_TYPE_CPU_CLEAR_ATTACHMENTS);
assert(secondary_job->type != V3DV_JOB_TYPE_GPU_CL_SECONDARY);
- struct v3dv_job *job = job_clone_in_cmd_buffer(secondary_job, primary);
+ struct v3dv_job *job = v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
if (!job)
return;
@@ -2928,8 +1743,8 @@ v3dv_CmdExecuteCommands(VkCommandBuffer commandBuffer,
V3DV_FROM_HANDLE(v3dv_cmd_buffer, primary, commandBuffer);
if (primary->state.pass != NULL) {
- cmd_buffer_execute_inside_pass(primary,
- commandBufferCount, pCommandBuffers);
+ v3dv_X(primary->device, cmd_buffer_execute_inside_pass)
+ (primary, commandBufferCount, pCommandBuffers);
} else {
cmd_buffer_execute_outside_pass(primary,
commandBufferCount, pCommandBuffers);
@@ -3026,129 +1841,6 @@ cmd_buffer_bind_pipeline_static_state(struct v3dv_cmd_buffer *cmd_buffer,
}
static void
-job_update_ez_state(struct v3dv_job *job,
- struct v3dv_pipeline *pipeline,
- struct v3dv_cmd_buffer *cmd_buffer)
-{
- /* If first_ez_state is V3D_EZ_DISABLED it means that we have already
- * determined that we should disable EZ completely for all draw calls in
- * this job. This will cause us to disable EZ for the entire job in the
- * Tile Rendering Mode RCL packet and when we do that we need to make sure
- * we never emit a draw call in the job with EZ enabled in the CFG_BITS
- * packet, so ez_state must also be V3D_EZ_DISABLED;
- */
- if (job->first_ez_state == V3D_EZ_DISABLED) {
- assert(job->ez_state == V3D_EZ_DISABLED);
- return;
- }
-
- /* This is part of the pre draw call handling, so we should be inside a
- * render pass.
- */
- assert(cmd_buffer->state.pass);
-
- /* If this is the first time we update EZ state for this job we first check
- * if there is anything that requires disabling it completely for the entire
- * job (based on state that is not related to the current draw call and
- * pipeline state).
- */
- if (!job->decided_global_ez_enable) {
- job->decided_global_ez_enable = true;
-
- struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- assert(state->subpass_idx < state->pass->subpass_count);
- struct v3dv_subpass *subpass = &state->pass->subpasses[state->subpass_idx];
- if (subpass->ds_attachment.attachment == VK_ATTACHMENT_UNUSED) {
- job->first_ez_state = V3D_EZ_DISABLED;
- job->ez_state = V3D_EZ_DISABLED;
- return;
- }
-
- /* GFXH-1918: the early-z buffer may load incorrect depth values
- * if the frame has odd width or height.
- *
- * So we need to disable EZ in this case.
- */
- const struct v3dv_render_pass_attachment *ds_attachment =
- &state->pass->attachments[subpass->ds_attachment.attachment];
-
- const VkImageAspectFlags ds_aspects =
- vk_format_aspects(ds_attachment->desc.format);
-
- bool needs_depth_load =
- check_needs_load(state,
- ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
- ds_attachment->first_subpass,
- ds_attachment->desc.loadOp);
-
- if (needs_depth_load) {
- struct v3dv_framebuffer *fb = state->framebuffer;
-
- if (!fb) {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
- perf_debug("Loading depth aspect in a secondary command buffer "
- "without framebuffer info disables early-z tests.\n");
- job->first_ez_state = V3D_EZ_DISABLED;
- job->ez_state = V3D_EZ_DISABLED;
- return;
- }
-
- if (((fb->width % 2) != 0 || (fb->height % 2) != 0)) {
- perf_debug("Loading depth aspect for framebuffer with odd width "
- "or height disables early-Z tests.\n");
- job->first_ez_state = V3D_EZ_DISABLED;
- job->ez_state = V3D_EZ_DISABLED;
- return;
- }
- }
- }
-
- /* Otherwise, we can decide to selectively enable or disable EZ for draw
- * calls using the CFG_BITS packet based on the bound pipeline state.
- */
-
- /* If the FS writes Z, then it may update against the chosen EZ direction */
- struct v3dv_shader_variant *fs_variant =
- pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
- if (fs_variant->prog_data.fs->writes_z) {
- job->ez_state = V3D_EZ_DISABLED;
- return;
- }
-
- switch (pipeline->ez_state) {
- case V3D_EZ_UNDECIDED:
- /* If the pipeline didn't pick a direction but didn't disable, then go
- * along with the current EZ state. This allows EZ optimization for Z
- * func == EQUAL or NEVER.
- */
- break;
-
- case V3D_EZ_LT_LE:
- case V3D_EZ_GT_GE:
- /* If the pipeline picked a direction, then it needs to match the current
- * direction if we've decided on one.
- */
- if (job->ez_state == V3D_EZ_UNDECIDED)
- job->ez_state = pipeline->ez_state;
- else if (job->ez_state != pipeline->ez_state)
- job->ez_state = V3D_EZ_DISABLED;
- break;
-
- case V3D_EZ_DISABLED:
- /* If the pipeline disables EZ because of a bad Z func or stencil
- * operation, then we can't do any more EZ in this frame.
- */
- job->ez_state = V3D_EZ_DISABLED;
- break;
- }
-
- if (job->first_ez_state == V3D_EZ_UNDECIDED &&
- job->ez_state != V3D_EZ_DISABLED) {
- job->first_ez_state = job->ez_state;
- }
-}
-
-static void
bind_graphics_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline)
{
@@ -3369,379 +2061,13 @@ emit_scissor(struct v3dv_cmd_buffer *cmd_buffer)
cmd_buffer->state.clip_window.extent.width = maxx - minx;
cmd_buffer->state.clip_window.extent.height = maxy - miny;
- emit_clip_window(cmd_buffer->state.job, &cmd_buffer->state.clip_window);
+ v3dv_X(cmd_buffer->device, job_emit_clip_window)
+ (cmd_buffer->state.job, &cmd_buffer->state.clip_window);
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_SCISSOR;
}
static void
-emit_viewport(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
- /* FIXME: right now we only support one viewport. viewporst[0] would work
- * now, would need to change if we allow multiple viewports
- */
- float *vptranslate = dynamic->viewport.translate[0];
- float *vpscale = dynamic->viewport.scale[0];
-
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- const uint32_t required_cl_size =
- cl_packet_length(CLIPPER_XY_SCALING) +
- cl_packet_length(CLIPPER_Z_SCALE_AND_OFFSET) +
- cl_packet_length(CLIPPER_Z_MIN_MAX_CLIPPING_PLANES) +
- cl_packet_length(VIEWPORT_OFFSET);
- v3dv_cl_ensure_space_with_branch(&job->bcl, required_cl_size);
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
- clip.viewport_half_width_in_1_256th_of_pixel = vpscale[0] * 256.0f;
- clip.viewport_half_height_in_1_256th_of_pixel = vpscale[1] * 256.0f;
- }
-
- cl_emit(&job->bcl, CLIPPER_Z_SCALE_AND_OFFSET, clip) {
- clip.viewport_z_offset_zc_to_zs = vptranslate[2];
- clip.viewport_z_scale_zc_to_zs = vpscale[2];
- }
- cl_emit(&job->bcl, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES, clip) {
- /* Vulkan's Z NDC is [0..1], unlile OpenGL which is [-1, 1] */
- float z1 = vptranslate[2];
- float z2 = vptranslate[2] + vpscale[2];
- clip.minimum_zw = MIN2(z1, z2);
- clip.maximum_zw = MAX2(z1, z2);
- }
-
- cl_emit(&job->bcl, VIEWPORT_OFFSET, vp) {
- vp.viewport_centre_x_coordinate = vptranslate[0];
- vp.viewport_centre_y_coordinate = vptranslate[1];
- }
-
- cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEWPORT;
-}
-
-static void
-emit_stencil(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- struct v3dv_dynamic_state *dynamic_state = &cmd_buffer->state.dynamic;
-
- const uint32_t dynamic_stencil_states = V3DV_DYNAMIC_STENCIL_COMPARE_MASK |
- V3DV_DYNAMIC_STENCIL_WRITE_MASK |
- V3DV_DYNAMIC_STENCIL_REFERENCE;
-
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- 2 * cl_packet_length(STENCIL_CFG));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- bool emitted_stencil = false;
- for (uint32_t i = 0; i < 2; i++) {
- if (pipeline->emit_stencil_cfg[i]) {
- if (dynamic_state->mask & dynamic_stencil_states) {
- cl_emit_with_prepacked(&job->bcl, STENCIL_CFG,
- pipeline->stencil_cfg[i], config) {
- if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK) {
- config.stencil_test_mask =
- i == 0 ? dynamic_state->stencil_compare_mask.front :
- dynamic_state->stencil_compare_mask.back;
- }
- if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK) {
- config.stencil_write_mask =
- i == 0 ? dynamic_state->stencil_write_mask.front :
- dynamic_state->stencil_write_mask.back;
- }
- if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_REFERENCE) {
- config.stencil_ref_value =
- i == 0 ? dynamic_state->stencil_reference.front :
- dynamic_state->stencil_reference.back;
- }
- }
- } else {
- cl_emit_prepacked(&job->bcl, &pipeline->stencil_cfg[i]);
- }
-
- emitted_stencil = true;
- }
- }
-
- if (emitted_stencil) {
- const uint32_t dynamic_stencil_dirty_flags =
- V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
- V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
- V3DV_CMD_DIRTY_STENCIL_REFERENCE;
- cmd_buffer->state.dirty &= ~dynamic_stencil_dirty_flags;
- }
-}
-
-static void
-emit_depth_bias(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- assert(pipeline);
-
- if (!pipeline->depth_bias.enabled)
- return;
-
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(DEPTH_OFFSET));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
- cl_emit(&job->bcl, DEPTH_OFFSET, bias) {
- bias.depth_offset_factor = dynamic->depth_bias.slope_factor;
- bias.depth_offset_units = dynamic->depth_bias.constant_factor;
- if (pipeline->depth_bias.is_z16)
- bias.depth_offset_units *= 256.0f;
- bias.limit = dynamic->depth_bias.depth_bias_clamp;
- }
-
- cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_DEPTH_BIAS;
-}
-
-static void
-emit_line_width(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(LINE_WIDTH));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, LINE_WIDTH, line) {
- line.line_width = cmd_buffer->state.dynamic.line_width;
- }
-
- cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_LINE_WIDTH;
-}
-
-static void
-emit_sample_state(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- assert(pipeline);
-
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(SAMPLE_STATE));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, SAMPLE_STATE, state) {
- state.coverage = 1.0f;
- state.mask = pipeline->sample_mask;
- }
-}
-
-static void
-emit_blend(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- assert(pipeline);
-
- const uint32_t blend_packets_size =
- cl_packet_length(BLEND_ENABLES) +
- cl_packet_length(BLEND_CONSTANT_COLOR) +
- cl_packet_length(BLEND_CFG) * V3D_MAX_DRAW_BUFFERS +
- cl_packet_length(COLOR_WRITE_MASKS);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, blend_packets_size);
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
- if (pipeline->blend.enables) {
- cl_emit(&job->bcl, BLEND_ENABLES, enables) {
- enables.mask = pipeline->blend.enables;
- }
- }
-
- for (uint32_t i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
- if (pipeline->blend.enables & (1 << i))
- cl_emit_prepacked(&job->bcl, &pipeline->blend.cfg[i]);
- }
-
- cl_emit(&job->bcl, COLOR_WRITE_MASKS, mask) {
- mask.mask = pipeline->blend.color_write_masks;
- }
- }
-
- if (pipeline->blend.needs_color_constants &&
- cmd_buffer->state.dirty & V3DV_CMD_DIRTY_BLEND_CONSTANTS) {
- struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
- cl_emit(&job->bcl, BLEND_CONSTANT_COLOR, color) {
- color.red_f16 = _mesa_float_to_half(dynamic->blend_constants[0]);
- color.green_f16 = _mesa_float_to_half(dynamic->blend_constants[1]);
- color.blue_f16 = _mesa_float_to_half(dynamic->blend_constants[2]);
- color.alpha_f16 = _mesa_float_to_half(dynamic->blend_constants[3]);
- }
- cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_BLEND_CONSTANTS;
- }
-}
-
-static void
-emit_flat_shade_flags(struct v3dv_job *job,
- int varying_offset,
- uint32_t varyings,
- enum V3DX(Varying_Flags_Action) lower,
- enum V3DX(Varying_Flags_Action) higher)
-{
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- cl_packet_length(FLAT_SHADE_FLAGS));
- v3dv_return_if_oom(NULL, job);
-
- cl_emit(&job->bcl, FLAT_SHADE_FLAGS, flags) {
- flags.varying_offset_v0 = varying_offset;
- flags.flat_shade_flags_for_varyings_v024 = varyings;
- flags.action_for_flat_shade_flags_of_lower_numbered_varyings = lower;
- flags.action_for_flat_shade_flags_of_higher_numbered_varyings = higher;
- }
-}
-
-static void
-emit_noperspective_flags(struct v3dv_job *job,
- int varying_offset,
- uint32_t varyings,
- enum V3DX(Varying_Flags_Action) lower,
- enum V3DX(Varying_Flags_Action) higher)
-{
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- cl_packet_length(NON_PERSPECTIVE_FLAGS));
- v3dv_return_if_oom(NULL, job);
-
- cl_emit(&job->bcl, NON_PERSPECTIVE_FLAGS, flags) {
- flags.varying_offset_v0 = varying_offset;
- flags.non_perspective_flags_for_varyings_v024 = varyings;
- flags.action_for_non_perspective_flags_of_lower_numbered_varyings = lower;
- flags.action_for_non_perspective_flags_of_higher_numbered_varyings = higher;
- }
-}
-
-static void
-emit_centroid_flags(struct v3dv_job *job,
- int varying_offset,
- uint32_t varyings,
- enum V3DX(Varying_Flags_Action) lower,
- enum V3DX(Varying_Flags_Action) higher)
-{
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- cl_packet_length(CENTROID_FLAGS));
- v3dv_return_if_oom(NULL, job);
-
- cl_emit(&job->bcl, CENTROID_FLAGS, flags) {
- flags.varying_offset_v0 = varying_offset;
- flags.centroid_flags_for_varyings_v024 = varyings;
- flags.action_for_centroid_flags_of_lower_numbered_varyings = lower;
- flags.action_for_centroid_flags_of_higher_numbered_varyings = higher;
- }
-}
-
-static bool
-emit_varying_flags(struct v3dv_job *job,
- uint32_t num_flags,
- const uint32_t *flags,
- void (*flag_emit_callback)(struct v3dv_job *job,
- int varying_offset,
- uint32_t flags,
- enum V3DX(Varying_Flags_Action) lower,
- enum V3DX(Varying_Flags_Action) higher))
-{
- bool emitted_any = false;
- for (int i = 0; i < num_flags; i++) {
- if (!flags[i])
- continue;
-
- if (emitted_any) {
- flag_emit_callback(job, i, flags[i],
- V3D_VARYING_FLAGS_ACTION_UNCHANGED,
- V3D_VARYING_FLAGS_ACTION_UNCHANGED);
- } else if (i == 0) {
- flag_emit_callback(job, i, flags[i],
- V3D_VARYING_FLAGS_ACTION_UNCHANGED,
- V3D_VARYING_FLAGS_ACTION_ZEROED);
- } else {
- flag_emit_callback(job, i, flags[i],
- V3D_VARYING_FLAGS_ACTION_ZEROED,
- V3D_VARYING_FLAGS_ACTION_ZEROED);
- }
-
- emitted_any = true;
- }
-
- return emitted_any;
-}
-
-static void
-emit_varyings_state(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
-
- struct v3d_fs_prog_data *prog_data_fs =
- pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
-
- const uint32_t num_flags =
- ARRAY_SIZE(prog_data_fs->flat_shade_flags);
- const uint32_t *flat_shade_flags = prog_data_fs->flat_shade_flags;
- const uint32_t *noperspective_flags = prog_data_fs->noperspective_flags;
- const uint32_t *centroid_flags = prog_data_fs->centroid_flags;
-
- if (!emit_varying_flags(job, num_flags, flat_shade_flags,
- emit_flat_shade_flags)) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(ZERO_ALL_FLAT_SHADE_FLAGS));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, ZERO_ALL_FLAT_SHADE_FLAGS, flags);
- }
-
- if (!emit_varying_flags(job, num_flags, noperspective_flags,
- emit_noperspective_flags)) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(ZERO_ALL_NON_PERSPECTIVE_FLAGS));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, ZERO_ALL_NON_PERSPECTIVE_FLAGS, flags);
- }
-
- if (!emit_varying_flags(job, num_flags, centroid_flags,
- emit_centroid_flags)) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(ZERO_ALL_CENTROID_FLAGS));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, ZERO_ALL_CENTROID_FLAGS, flags);
- }
-}
-
-static void
-emit_configuration_bits(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- assert(pipeline);
-
- job_update_ez_state(job, pipeline, cmd_buffer);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CFG_BITS));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit_with_prepacked(&job->bcl, CFG_BITS, pipeline->cfg_bits, config) {
- config.early_z_enable = job->ez_state != V3D_EZ_DISABLED;
- config.early_z_updates_enable = config.early_z_enable &&
- pipeline->z_updates_enable;
- }
-}
-
-static void
update_gfx_uniform_state(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t dirty_uniform_state)
{
@@ -3808,207 +2134,6 @@ update_gfx_uniform_state(struct v3dv_cmd_buffer *cmd_buffer,
}
}
-static void
-emit_gl_shader_state(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- struct v3dv_pipeline *pipeline = state->gfx.pipeline;
- assert(pipeline);
-
- struct v3d_vs_prog_data *prog_data_vs =
- pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX]->prog_data.vs;
- struct v3d_vs_prog_data *prog_data_vs_bin =
- pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN]->prog_data.vs;
- struct v3d_fs_prog_data *prog_data_fs =
- pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
-
- /* Update the cache dirty flag based on the shader progs data */
- job->tmu_dirty_rcl |= prog_data_vs_bin->base.tmu_dirty_rcl;
- job->tmu_dirty_rcl |= prog_data_vs->base.tmu_dirty_rcl;
- job->tmu_dirty_rcl |= prog_data_fs->base.tmu_dirty_rcl;
-
- /* See GFXH-930 workaround below */
- uint32_t num_elements_to_emit = MAX2(pipeline->va_count, 1);
-
- uint32_t shader_rec_offset =
- v3dv_cl_ensure_space(&job->indirect,
- cl_packet_length(GL_SHADER_STATE_RECORD) +
- num_elements_to_emit *
- cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
- 32);
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- struct v3dv_shader_variant *vs_variant =
- pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
- struct v3dv_shader_variant *vs_bin_variant =
- pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
- struct v3dv_shader_variant *fs_variant =
- pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
- struct v3dv_bo *assembly_bo = pipeline->shared_data->assembly_bo;
-
- struct v3dv_bo *default_attribute_values =
- pipeline->default_attribute_values != NULL ?
- pipeline->default_attribute_values :
- pipeline->device->default_attribute_float;
-
- cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_RECORD,
- pipeline->shader_state_record, shader) {
-
- /* FIXME: we are setting this values here and during the
- * prepacking. This is because both cl_emit_with_prepacked and v3dvx_pack
- * asserts for minimum values of these. It would be good to get
- * v3dvx_pack to assert on the final value if possible
- */
- shader.min_coord_shader_input_segments_required_in_play =
- pipeline->vpm_cfg_bin.As;
- shader.min_vertex_shader_input_segments_required_in_play =
- pipeline->vpm_cfg.As;
-
- shader.coordinate_shader_code_address =
- v3dv_cl_address(assembly_bo, vs_bin_variant->assembly_offset);
- shader.vertex_shader_code_address =
- v3dv_cl_address(assembly_bo, vs_variant->assembly_offset);
- shader.fragment_shader_code_address =
- v3dv_cl_address(assembly_bo, fs_variant->assembly_offset);
-
- shader.coordinate_shader_uniforms_address = cmd_buffer->state.uniforms.vs_bin;
- shader.vertex_shader_uniforms_address = cmd_buffer->state.uniforms.vs;
- shader.fragment_shader_uniforms_address = cmd_buffer->state.uniforms.fs;
-
- shader.address_of_default_attribute_values =
- v3dv_cl_address(default_attribute_values, 0);
- }
-
- /* Upload vertex element attributes (SHADER_STATE_ATTRIBUTE_RECORD) */
- bool cs_loaded_any = false;
- const bool cs_uses_builtins = prog_data_vs_bin->uses_iid ||
- prog_data_vs_bin->uses_biid ||
- prog_data_vs_bin->uses_vid;
- const uint32_t packet_length =
- cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
-
- uint32_t emitted_va_count = 0;
- for (uint32_t i = 0; emitted_va_count < pipeline->va_count; i++) {
- assert(i < MAX_VERTEX_ATTRIBS);
-
- if (pipeline->va[i].vk_format == VK_FORMAT_UNDEFINED)
- continue;
-
- const uint32_t binding = pipeline->va[i].binding;
-
- /* We store each vertex attribute in the array using its driver location
- * as index.
- */
- const uint32_t location = i;
-
- struct v3dv_vertex_binding *c_vb = &cmd_buffer->state.vertex_bindings[binding];
-
- cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD,
- &pipeline->vertex_attrs[i * packet_length], attr) {
-
- assert(c_vb->buffer->mem->bo);
- attr.address = v3dv_cl_address(c_vb->buffer->mem->bo,
- c_vb->buffer->mem_offset +
- pipeline->va[i].offset +
- c_vb->offset);
-
- attr.number_of_values_read_by_coordinate_shader =
- prog_data_vs_bin->vattr_sizes[location];
- attr.number_of_values_read_by_vertex_shader =
- prog_data_vs->vattr_sizes[location];
-
- /* GFXH-930: At least one attribute must be enabled and read by CS
- * and VS. If we have attributes being consumed by the VS but not
- * the CS, then set up a dummy load of the last attribute into the
- * CS's VPM inputs. (Since CS is just dead-code-elimination compared
- * to VS, we can't have CS loading but not VS).
- *
- * GFXH-1602: first attribute must be active if using builtins.
- */
- if (prog_data_vs_bin->vattr_sizes[location])
- cs_loaded_any = true;
-
- if (i == 0 && cs_uses_builtins && !cs_loaded_any) {
- attr.number_of_values_read_by_coordinate_shader = 1;
- cs_loaded_any = true;
- } else if (i == pipeline->va_count - 1 && !cs_loaded_any) {
- attr.number_of_values_read_by_coordinate_shader = 1;
- cs_loaded_any = true;
- }
-
- attr.maximum_index = 0xffffff;
- }
-
- emitted_va_count++;
- }
-
- if (pipeline->va_count == 0) {
- /* GFXH-930: At least one attribute must be enabled and read
- * by CS and VS. If we have no attributes being consumed by
- * the shader, set up a dummy to be loaded into the VPM.
- */
- cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
- /* Valid address of data whose value will be unused. */
- attr.address = v3dv_cl_address(job->indirect.bo, 0);
-
- attr.type = ATTRIBUTE_FLOAT;
- attr.stride = 0;
- attr.vec_size = 1;
-
- attr.number_of_values_read_by_coordinate_shader = 1;
- attr.number_of_values_read_by_vertex_shader = 1;
- }
- }
-
- if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- sizeof(pipeline->vcm_cache_size));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit_prepacked(&job->bcl, &pipeline->vcm_cache_size);
- }
-
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- cl_packet_length(GL_SHADER_STATE));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, GL_SHADER_STATE, state) {
- state.address = v3dv_cl_address(job->indirect.bo,
- shader_rec_offset);
- state.number_of_attribute_arrays = num_elements_to_emit;
- }
-
- cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_VERTEX_BUFFER |
- V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
- V3DV_CMD_DIRTY_PUSH_CONSTANTS);
- cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
- cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
-}
-
-static void
-emit_occlusion_query(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- v3dv_cl_ensure_space_with_branch(&job->bcl,
- cl_packet_length(OCCLUSION_QUERY_COUNTER));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
- if (cmd_buffer->state.query.active_query.bo) {
- counter.address =
- v3dv_cl_address(cmd_buffer->state.query.active_query.bo,
- cmd_buffer->state.query.active_query.offset);
- }
- }
-
- cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
-}
-
/* This stores command buffer state that we might be about to stomp for
* a meta operation.
*/
@@ -4145,86 +2270,6 @@ v3dv_cmd_buffer_meta_state_pop(struct v3dv_cmd_buffer *cmd_buffer,
state->meta.has_descriptor_state = false;
}
-/* FIXME: C&P from v3dx_draw. Refactor to common place? */
-static uint32_t
-v3d_hw_prim_type(enum pipe_prim_type prim_type)
-{
- switch (prim_type) {
- case PIPE_PRIM_POINTS:
- case PIPE_PRIM_LINES:
- case PIPE_PRIM_LINE_LOOP:
- case PIPE_PRIM_LINE_STRIP:
- case PIPE_PRIM_TRIANGLES:
- case PIPE_PRIM_TRIANGLE_STRIP:
- case PIPE_PRIM_TRIANGLE_FAN:
- return prim_type;
-
- case PIPE_PRIM_LINES_ADJACENCY:
- case PIPE_PRIM_LINE_STRIP_ADJACENCY:
- case PIPE_PRIM_TRIANGLES_ADJACENCY:
- case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
- return 8 + (prim_type - PIPE_PRIM_LINES_ADJACENCY);
-
- default:
- unreachable("Unsupported primitive type");
- }
-}
-
-struct v3dv_draw_info {
- uint32_t vertex_count;
- uint32_t instance_count;
- uint32_t first_vertex;
- uint32_t first_instance;
-};
-
-static void
-cmd_buffer_emit_draw(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_draw_info *info)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- struct v3dv_pipeline *pipeline = state->gfx.pipeline;
-
- assert(pipeline);
-
- uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
-
- if (info->first_instance > 0) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
- base.base_instance = info->first_instance;
- base.base_vertex = 0;
- }
- }
-
- if (info->instance_count > 1) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(VERTEX_ARRAY_INSTANCED_PRIMS));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
- prim.mode = hw_prim_type;
- prim.index_of_first_vertex = info->first_vertex;
- prim.number_of_instances = info->instance_count;
- prim.instance_length = info->vertex_count;
- }
- } else {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(VERTEX_ARRAY_PRIMS));
- v3dv_return_if_oom(cmd_buffer, NULL);
- cl_emit(&job->bcl, VERTEX_ARRAY_PRIMS, prim) {
- prim.mode = hw_prim_type;
- prim.length = info->vertex_count;
- prim.index_of_first_vertex = info->first_vertex;
- }
- }
-}
-
static struct v3dv_job *
cmd_buffer_pre_draw_split_job(struct v3dv_cmd_buffer *cmd_buffer)
{
@@ -4334,35 +2379,8 @@ cmd_buffer_restart_job_for_msaa_if_needed(struct v3dv_cmd_buffer *cmd_buffer)
v3dv_job_destroy(old_job);
}
-static void
-emit_index_buffer(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- /* We flag all state as dirty when we create a new job so make sure we
- * have a valid index buffer before attempting to emit state for it.
- */
- struct v3dv_buffer *ibuffer =
- v3dv_buffer_from_handle(cmd_buffer->state.index_buffer.buffer);
- if (ibuffer) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(INDEX_BUFFER_SETUP));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- const uint32_t offset = cmd_buffer->state.index_buffer.offset;
- cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
- ib.address = v3dv_cl_address(ibuffer->mem->bo,
- ibuffer->mem_offset + offset);
- ib.size = ibuffer->mem->bo->size;
- }
- }
-
- cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_INDEX_BUFFER;
-}
-
-static void
-cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
+void
+v3dv_cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
{
assert(cmd_buffer->state.gfx.pipeline);
assert(!(cmd_buffer->state.gfx.pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
@@ -4403,12 +2421,14 @@ cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
if (dirty_uniform_state)
update_gfx_uniform_state(cmd_buffer, dirty_uniform_state);
+ struct v3dv_device *device = cmd_buffer->device;
+
if (dirty_uniform_state || (*dirty & V3DV_CMD_DIRTY_VERTEX_BUFFER))
- emit_gl_shader_state(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_gl_shader_state)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE)) {
- emit_configuration_bits(cmd_buffer);
- emit_varyings_state(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_configuration_bits)(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_varyings_state)(cmd_buffer);
}
if (*dirty & (V3DV_CMD_DIRTY_VIEWPORT | V3DV_CMD_DIRTY_SCISSOR)) {
@@ -4416,33 +2436,33 @@ cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
}
if (*dirty & V3DV_CMD_DIRTY_VIEWPORT) {
- emit_viewport(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_viewport)(cmd_buffer);
}
if (*dirty & V3DV_CMD_DIRTY_INDEX_BUFFER)
- emit_index_buffer(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_index_buffer)(cmd_buffer);
const uint32_t dynamic_stencil_dirty_flags =
V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
V3DV_CMD_DIRTY_STENCIL_REFERENCE;
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | dynamic_stencil_dirty_flags))
- emit_stencil(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_stencil)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_DEPTH_BIAS))
- emit_depth_bias(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_depth_bias)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_BLEND_CONSTANTS))
- emit_blend(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_blend)(cmd_buffer);
if (*dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY)
- emit_occlusion_query(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_occlusion_query)(cmd_buffer);
if (*dirty & V3DV_CMD_DIRTY_LINE_WIDTH)
- emit_line_width(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_line_width)(cmd_buffer);
if (*dirty & V3DV_CMD_DIRTY_PIPELINE)
- emit_sample_state(cmd_buffer);
+ v3dv_X(device, cmd_buffer_emit_sample_state)(cmd_buffer);
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_PIPELINE;
}
@@ -4451,8 +2471,8 @@ static void
cmd_buffer_draw(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_draw_info *info)
{
- cmd_buffer_emit_pre_draw(cmd_buffer);
- cmd_buffer_emit_draw(cmd_buffer, info);
+ v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw)(cmd_buffer, info);
}
VKAPI_ATTR void VKAPI_CALL
@@ -4475,63 +2495,6 @@ v3dv_CmdDraw(VkCommandBuffer commandBuffer,
cmd_buffer_draw(cmd_buffer, &info);
}
-static void
-cmd_buffer_emit_draw_indexed(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t vertexOffset,
- uint32_t firstInstance)
-{
- cmd_buffer_emit_pre_draw(cmd_buffer);
-
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
- uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
- uint32_t index_offset = firstIndex * cmd_buffer->state.index_buffer.index_size;
-
- if (vertexOffset != 0 || firstInstance != 0) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
- base.base_instance = firstInstance;
- base.base_vertex = vertexOffset;
- }
- }
-
- if (instanceCount == 1) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(INDEXED_PRIM_LIST));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, INDEXED_PRIM_LIST, prim) {
- prim.index_type = index_type;
- prim.length = indexCount;
- prim.index_offset = index_offset;
- prim.mode = hw_prim_type;
- prim.enable_primitive_restarts = pipeline->primitive_restart;
- }
- } else if (instanceCount > 1) {
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(INDEXED_INSTANCED_PRIM_LIST));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, INDEXED_INSTANCED_PRIM_LIST, prim) {
- prim.index_type = index_type;
- prim.index_offset = index_offset;
- prim.mode = hw_prim_type;
- prim.enable_primitive_restarts = pipeline->primitive_restart;
- prim.number_of_instances = instanceCount;
- prim.instance_length = indexCount;
- }
- }
-}
-
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDrawIndexed(VkCommandBuffer commandBuffer,
uint32_t indexCount,
@@ -4545,36 +2508,9 @@ v3dv_CmdDrawIndexed(VkCommandBuffer commandBuffer,
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer_emit_draw_indexed(cmd_buffer, indexCount, instanceCount,
- firstIndex, vertexOffset, firstInstance);
-}
-
-static void
-cmd_buffer_emit_draw_indirect(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_buffer *buffer,
- VkDeviceSize offset,
- uint32_t drawCount,
- uint32_t stride)
-{
- cmd_buffer_emit_pre_draw(cmd_buffer);
-
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
-
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
- prim.mode = hw_prim_type;
- prim.number_of_draw_indirect_array_records = drawCount;
- prim.stride_in_multiples_of_4_bytes = stride >> 2;
- prim.address = v3dv_cl_address(buffer->mem->bo,
- buffer->mem_offset + offset);
- }
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indexed)
+ (cmd_buffer, indexCount, instanceCount,
+ firstIndex, vertexOffset, firstInstance);
}
VKAPI_ATTR void VKAPI_CALL
@@ -4591,40 +2527,8 @@ v3dv_CmdDrawIndirect(VkCommandBuffer commandBuffer,
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
- cmd_buffer_emit_draw_indirect(cmd_buffer, buffer, offset, drawCount, stride);
-}
-
-static void
-cmd_buffer_emit_indexed_indirect(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_buffer *buffer,
- VkDeviceSize offset,
- uint32_t drawCount,
- uint32_t stride)
-
-{
- cmd_buffer_emit_pre_draw(cmd_buffer);
-
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
- uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
- uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
-
- v3dv_cl_ensure_space_with_branch(
- &job->bcl, cl_packet_length(INDIRECT_INDEXED_INSTANCED_PRIM_LIST));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- cl_emit(&job->bcl, INDIRECT_INDEXED_INSTANCED_PRIM_LIST, prim) {
- prim.index_type = index_type;
- prim.mode = hw_prim_type;
- prim.enable_primitive_restarts = pipeline->primitive_restart;
- prim.number_of_draw_indirect_indexed_records = drawCount;
- prim.stride_in_multiples_of_4_bytes = stride >> 2;
- prim.address = v3dv_cl_address(buffer->mem->bo,
- buffer->mem_offset + offset);
- }
-
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indirect)
+ (cmd_buffer, buffer, offset, drawCount, stride);
}
VKAPI_ATTR void VKAPI_CALL
@@ -4641,7 +2545,8 @@ v3dv_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
- cmd_buffer_emit_indexed_indirect(cmd_buffer, buffer, offset, drawCount, stride);
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_indexed_indirect)
+ (cmd_buffer, buffer, offset, drawCount, stride);
}
VKAPI_ATTR void VKAPI_CALL
@@ -4951,12 +2856,12 @@ v3dv_cmd_buffer_reset_queries(struct v3dv_cmd_buffer *cmd_buffer,
list_addtail(&job->list_link, &cmd_buffer->jobs);
}
-static void
-ensure_array_state(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t slot_size,
- uint32_t used_count,
- uint32_t *alloc_count,
- void **ptr)
+void
+v3dv_cmd_buffer_ensure_array_state(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t slot_size,
+ uint32_t used_count,
+ uint32_t *alloc_count,
+ void **ptr)
{
if (used_count >= *alloc_count) {
const uint32_t prev_slot_count = *alloc_count;
@@ -5007,11 +2912,11 @@ v3dv_cmd_buffer_end_query(struct v3dv_cmd_buffer *cmd_buffer,
* render pass job in which they have been recorded.
*/
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- ensure_array_state(cmd_buffer,
- sizeof(struct v3dv_end_query_cpu_job_info),
- state->query.end.used_count,
- &state->query.end.alloc_count,
- (void **) &state->query.end.states);
+ v3dv_cmd_buffer_ensure_array_state(cmd_buffer,
+ sizeof(struct v3dv_end_query_cpu_job_info),
+ state->query.end.used_count,
+ &state->query.end.alloc_count,
+ (void **) &state->query.end.states);
v3dv_return_if_oom(cmd_buffer, NULL);
struct v3dv_end_query_cpu_job_info *info =
diff --git a/src/broadcom/vulkan/v3dv_image.c b/src/broadcom/vulkan/v3dv_image.c
index c3f3ca0dc9f..15d896a85f6 100644
--- a/src/broadcom/vulkan/v3dv_image.c
+++ b/src/broadcom/vulkan/v3dv_image.c
@@ -611,7 +611,7 @@ v3dv_CreateImageView(VkDevice _device,
assert(iview->format && iview->format->supported);
if (vk_format_is_depth_or_stencil(iview->vk_format)) {
- iview->internal_type = v3dv_get_internal_depth_type(iview->vk_format);
+ iview->internal_type = v3dv_X(device, get_internal_depth_type)(iview->vk_format);
} else {
v3dv_X(device, get_internal_type_bpp_for_output_format)
(iview->format->rt_type, &iview->internal_type, &iview->internal_bpp);
diff --git a/src/broadcom/vulkan/v3dv_meta_clear.c b/src/broadcom/vulkan/v3dv_meta_clear.c
index d95924f4b03..4fcb9a16ad2 100644
--- a/src/broadcom/vulkan/v3dv_meta_clear.c
+++ b/src/broadcom/vulkan/v3dv_meta_clear.c
@@ -23,7 +23,6 @@
#include "v3dv_private.h"
-#include "broadcom/cle/v3dx_pack.h"
#include "compiler/nir/nir_builder.h"
#include "vk_format_info.h"
#include "util/u_pack_color.h"
@@ -1147,389 +1146,6 @@ emit_subpass_ds_clear_rects(struct v3dv_cmd_buffer *cmd_buffer,
v3dv_cmd_buffer_meta_state_pop(cmd_buffer, dynamic_states, false);
}
-static void
-emit_tlb_clear_store(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_cl *cl,
- uint32_t attachment_idx,
- uint32_t layer,
- uint32_t buffer)
-{
- const struct v3dv_image_view *iview =
- cmd_buffer->state.framebuffer->attachments[attachment_idx];
- const struct v3dv_image *image = iview->image;
- const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
- uint32_t layer_offset = v3dv_layer_offset(image,
- iview->base_level,
- iview->first_layer + layer);
-
- cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = buffer;
- store.address = v3dv_cl_address(image->mem->bo, layer_offset);
- store.clear_buffer_being_stored = false;
-
- store.output_image_format = iview->format->rt_type;
- store.r_b_swap = iview->swap_rb;
- store.memory_format = slice->tiling;
-
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- store.height_in_ub_or_stride =
- slice->padded_height_of_output_image_in_uif_blocks;
- } else if (slice->tiling == V3D_TILING_RASTER) {
- store.height_in_ub_or_stride = slice->stride;
- }
-
- if (image->samples > VK_SAMPLE_COUNT_1_BIT)
- store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
- else
- store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static void
-emit_tlb_clear_stores(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_cl *cl,
- uint32_t attachment_count,
- const VkClearAttachment *attachments,
- uint32_t layer)
-{
- struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- const struct v3dv_subpass *subpass =
- &state->pass->subpasses[state->subpass_idx];
-
- bool has_stores = false;
- for (uint32_t i = 0; i < attachment_count; i++) {
- uint32_t attachment_idx;
- uint32_t buffer;
- if (attachments[i].aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT)) {
- attachment_idx = subpass->ds_attachment.attachment;
- buffer = v3dv_zs_buffer_from_aspect_bits(attachments[i].aspectMask);
- } else {
- uint32_t rt_idx = attachments[i].colorAttachment;
- attachment_idx = subpass->color_attachments[rt_idx].attachment;
- buffer = RENDER_TARGET_0 + rt_idx;
- }
-
- if (attachment_idx == VK_ATTACHMENT_UNUSED)
- continue;
-
- has_stores = true;
- emit_tlb_clear_store(cmd_buffer, cl, attachment_idx, layer, buffer);
- }
-
- if (!has_stores) {
- cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = NONE;
- }
- }
-}
-
-static void
-emit_tlb_clear_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t attachment_count,
- const VkClearAttachment *attachments,
- uint32_t layer)
-{
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- cl_emit(cl, END_OF_LOADS, end); /* Nothing to load */
-
- cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
- fmt.primitive_type = LIST_TRIANGLES;
- }
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- emit_tlb_clear_stores(cmd_buffer, cl, attachment_count, attachments, layer);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_tlb_clear_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t attachment_count,
- const VkClearAttachment *attachments,
- uint32_t layer)
-{
- const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- const struct v3dv_framebuffer *framebuffer = state->framebuffer;
-
- struct v3dv_job *job = cmd_buffer->state.job;
- struct v3dv_cl *rcl = &job->rcl;
-
- const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
-
- const uint32_t tile_alloc_offset =
- 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
- cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
- list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
- }
-
- cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
- config.number_of_bin_tile_lists = 1;
- config.total_frame_width_in_tiles = tiling->draw_tiles_x;
- config.total_frame_height_in_tiles = tiling->draw_tiles_y;
-
- config.supertile_width_in_tiles = tiling->supertile_width;
- config.supertile_height_in_tiles = tiling->supertile_height;
-
- config.total_frame_width_in_supertiles =
- tiling->frame_width_in_supertiles;
- config.total_frame_height_in_supertiles =
- tiling->frame_height_in_supertiles;
- }
-
- /* Emit the clear and also the workaround for GFXH-1742 */
- for (int i = 0; i < 2; i++) {
- cl_emit(rcl, TILE_COORDINATES, coords);
- cl_emit(rcl, END_OF_LOADS, end);
- cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = NONE;
- }
- if (i == 0) {
- cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
- clear.clear_z_stencil_buffer = true;
- clear.clear_all_render_targets = true;
- }
- }
- cl_emit(rcl, END_OF_TILE_MARKER, end);
- }
-
- cl_emit(rcl, FLUSH_VCD_CACHE, flush);
-
- emit_tlb_clear_per_tile_rcl(cmd_buffer, attachment_count, attachments, layer);
-
- uint32_t supertile_w_in_pixels =
- tiling->tile_width * tiling->supertile_width;
- uint32_t supertile_h_in_pixels =
- tiling->tile_height * tiling->supertile_height;
-
- const uint32_t max_render_x = framebuffer->width - 1;
- const uint32_t max_render_y = framebuffer->height - 1;
- const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
- const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
-
- for (int y = 0; y <= max_y_supertile; y++) {
- for (int x = 0; x <= max_x_supertile; x++) {
- cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
- coords.column_number_in_supertiles = x;
- coords.row_number_in_supertiles = y;
- }
- }
- }
-}
-
-static void
-emit_tlb_clear_job(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t attachment_count,
- const VkClearAttachment *attachments,
- uint32_t base_layer,
- uint32_t layer_count)
-{
- const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
- const struct v3dv_framebuffer *framebuffer = state->framebuffer;
- const struct v3dv_subpass *subpass =
- &state->pass->subpasses[state->subpass_idx];
- struct v3dv_job *job = cmd_buffer->state.job;
- assert(job);
-
- /* Check how many color attachments we have and also if we have a
- * depth/stencil attachment.
- */
- uint32_t color_attachment_count = 0;
- VkClearAttachment color_attachments[4];
- const VkClearDepthStencilValue *ds_clear_value = NULL;
- uint8_t internal_depth_type = V3D_INTERNAL_TYPE_DEPTH_32F;
- for (uint32_t i = 0; i < attachment_count; i++) {
- if (attachments[i].aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT)) {
- assert(subpass->ds_attachment.attachment != VK_ATTACHMENT_UNUSED);
- ds_clear_value = &attachments[i].clearValue.depthStencil;
- struct v3dv_render_pass_attachment *att =
- &state->pass->attachments[subpass->ds_attachment.attachment];
- internal_depth_type = v3dv_get_internal_depth_type(att->desc.format);
- } else if (attachments[i].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
- color_attachments[color_attachment_count++] = attachments[i];
- }
- }
-
- uint8_t internal_bpp;
- bool msaa;
- v3dv_X(job->device, framebuffer_compute_internal_bpp_msaa)
- (framebuffer, subpass, &internal_bpp, &msaa);
-
- v3dv_job_start_frame(job,
- framebuffer->width,
- framebuffer->height,
- framebuffer->layers,
- color_attachment_count,
- internal_bpp, msaa);
-
- struct v3dv_cl *rcl = &job->rcl;
- v3dv_cl_ensure_space_with_branch(rcl, 200 +
- layer_count * 256 *
- cl_packet_length(SUPERTILE_COORDINATES));
- v3dv_return_if_oom(cmd_buffer, NULL);
-
- const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
- config.early_z_disable = true;
- config.image_width_pixels = framebuffer->width;
- config.image_height_pixels = framebuffer->height;
- config.number_of_render_targets = MAX2(color_attachment_count, 1);
- config.multisample_mode_4x = false; /* FIXME */
- config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
- config.internal_depth_type = internal_depth_type;
- }
-
- for (uint32_t i = 0; i < color_attachment_count; i++) {
- uint32_t rt_idx = color_attachments[i].colorAttachment;
- uint32_t attachment_idx = subpass->color_attachments[rt_idx].attachment;
- if (attachment_idx == VK_ATTACHMENT_UNUSED)
- continue;
-
- const struct v3dv_render_pass_attachment *attachment =
- &state->pass->attachments[attachment_idx];
-
- uint32_t internal_type, internal_bpp, internal_size;
- const struct v3dv_format *format =
- v3dv_X(cmd_buffer->device, get_format)(attachment->desc.format);
- v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_output_format)
- (format->rt_type, &internal_type, &internal_bpp);
-
- internal_size = 4 << internal_bpp;
-
- uint32_t clear_color[4] = { 0 };
- v3dv_get_hw_clear_color(&color_attachments[i].clearValue.color,
- internal_type,
- internal_size,
- clear_color);
-
- struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
- const struct v3dv_image *image = iview->image;
- const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
-
- uint32_t clear_pad = 0;
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- int uif_block_height = v3d_utile_height(image->cpp) * 2;
-
- uint32_t implicit_padded_height =
- align(framebuffer->height, uif_block_height) / uif_block_height;
-
- if (slice->padded_height_of_output_image_in_uif_blocks -
- implicit_padded_height >= 15) {
- clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
- }
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
- clear.clear_color_low_32_bits = clear_color[0];
- clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
- clear.render_target_number = i;
- };
-
- if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
- clear.clear_color_mid_low_32_bits =
- ((clear_color[1] >> 24) | (clear_color[2] << 8));
- clear.clear_color_mid_high_24_bits =
- ((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
- clear.render_target_number = i;
- };
- }
-
- if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
- clear.uif_padded_height_in_uif_blocks = clear_pad;
- clear.clear_color_high_16_bits = clear_color[3] >> 16;
- clear.render_target_number = i;
- };
- }
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
- v3dv_render_pass_setup_render_target(cmd_buffer, 0,
- &rt.render_target_0_internal_bpp,
- &rt.render_target_0_internal_type,
- &rt.render_target_0_clamp);
- v3dv_render_pass_setup_render_target(cmd_buffer, 1,
- &rt.render_target_1_internal_bpp,
- &rt.render_target_1_internal_type,
- &rt.render_target_1_clamp);
- v3dv_render_pass_setup_render_target(cmd_buffer, 2,
- &rt.render_target_2_internal_bpp,
- &rt.render_target_2_internal_type,
- &rt.render_target_2_clamp);
- v3dv_render_pass_setup_render_target(cmd_buffer, 3,
- &rt.render_target_3_internal_bpp,
- &rt.render_target_3_internal_type,
- &rt.render_target_3_clamp);
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
- clear.z_clear_value = ds_clear_value ? ds_clear_value->depth : 1.0f;
- clear.stencil_clear_value = ds_clear_value ? ds_clear_value->stencil : 0;
- };
-
- cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
- init.use_auto_chained_tile_lists = true;
- init.size_of_first_block_in_chained_tile_lists =
- TILE_ALLOCATION_BLOCK_SIZE_64B;
- }
-
- for (int layer = base_layer; layer < base_layer + layer_count; layer++) {
- emit_tlb_clear_layer_rcl(cmd_buffer,
- attachment_count,
- attachments,
- layer);
- }
-
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
-static void
-emit_tlb_clear(struct v3dv_cmd_buffer *cmd_buffer,
- uint32_t attachment_count,
- const VkClearAttachment *attachments,
- uint32_t base_layer,
- uint32_t layer_count)
-{
- struct v3dv_job *job =
- v3dv_cmd_buffer_start_job(cmd_buffer, cmd_buffer->state.subpass_idx,
- V3DV_JOB_TYPE_GPU_CL);
-
- if (!job)
- return;
-
- /* vkCmdClearAttachments runs inside a render pass */
- job->is_subpass_continue = true;
-
- emit_tlb_clear_job(cmd_buffer,
- attachment_count,
- attachments,
- base_layer, layer_count);
-
- v3dv_cmd_buffer_subpass_resume(cmd_buffer, cmd_buffer->state.subpass_idx);
-}
-
static bool
is_subrect(const VkRect2D *r0, const VkRect2D *r1)
{
@@ -1697,8 +1313,9 @@ v3dv_CmdClearAttachments(VkCommandBuffer commandBuffer,
* try to use the TLB to clear if possible.
*/
if (can_use_tlb_clear(cmd_buffer, rectCount, pRects)) {
- emit_tlb_clear(cmd_buffer, attachmentCount, pAttachments,
- pRects[0].baseArrayLayer, pRects[0].layerCount);
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_tlb_clear)
+ (cmd_buffer, attachmentCount, pAttachments,
+ pRects[0].baseArrayLayer, pRects[0].layerCount);
return;
}
diff --git a/src/broadcom/vulkan/v3dv_meta_copy.c b/src/broadcom/vulkan/v3dv_meta_copy.c
index 6528664e7e2..d5d56dc5af4 100644
--- a/src/broadcom/vulkan/v3dv_meta_copy.c
+++ b/src/broadcom/vulkan/v3dv_meta_copy.c
@@ -22,9 +22,9 @@
*/
#include "v3dv_private.h"
+#include "v3dv_meta_copy.h"
#include "compiler/nir/nir_builder.h"
-#include "broadcom/cle/v3dx_pack.h"
#include "vk_format_info.h"
#include "util/u_pack_color.h"
#include "vulkan/util/vk_common_entrypoints.h"
@@ -235,640 +235,6 @@ can_use_tlb(struct v3dv_image *image,
const VkOffset3D *offset,
VkFormat *compat_format);
-/**
- * Copy operations implemented in this file don't operate on a framebuffer
- * object provided by the user, however, since most use the TLB for this,
- * we still need to have some representation of the framebuffer. For the most
- * part, the job's frame tiling information is enough for this, however we
- * still need additional information such us the internal type of our single
- * render target, so we use this auxiliary struct to pass that information
- * around.
- */
-struct framebuffer_data {
- /* The internal type of the single render target */
- uint32_t internal_type;
-
- /* Supertile coverage */
- uint32_t min_x_supertile;
- uint32_t min_y_supertile;
- uint32_t max_x_supertile;
- uint32_t max_y_supertile;
-
- /* Format info */
- VkFormat vk_format;
- const struct v3dv_format *format;
- uint8_t internal_depth_type;
-};
-
-static void
-setup_framebuffer_data(struct v3dv_device *device,
- struct framebuffer_data *fb,
- VkFormat vk_format,
- uint32_t internal_type,
- const struct v3dv_frame_tiling *tiling)
-{
- fb->internal_type = internal_type;
-
- /* Supertile coverage always starts at 0,0 */
- uint32_t supertile_w_in_pixels =
- tiling->tile_width * tiling->supertile_width;
- uint32_t supertile_h_in_pixels =
- tiling->tile_height * tiling->supertile_height;
-
- fb->min_x_supertile = 0;
- fb->min_y_supertile = 0;
- fb->max_x_supertile = (tiling->width - 1) / supertile_w_in_pixels;
- fb->max_y_supertile = (tiling->height - 1) / supertile_h_in_pixels;
-
- fb->vk_format = vk_format;
- fb->format = v3dv_X(device, get_format)(vk_format);
-
- fb->internal_depth_type = V3D_INTERNAL_TYPE_DEPTH_32F;
- if (vk_format_is_depth_or_stencil(vk_format))
- fb->internal_depth_type = v3dv_get_internal_depth_type(vk_format);
-}
-
-/* This chooses a tile buffer format that is appropriate for the copy operation.
- * Typically, this is the image render target type, however, if we are copying
- * depth/stencil to/from a buffer the hardware can't do raster loads/stores, so
- * we need to load and store to/from a tile color buffer using a compatible
- * color format.
- */
-static uint32_t
-choose_tlb_format(struct framebuffer_data *framebuffer,
- VkImageAspectFlags aspect,
- bool for_store,
- bool is_copy_to_buffer,
- bool is_copy_from_buffer)
-{
- if (is_copy_to_buffer || is_copy_from_buffer) {
- switch (framebuffer->vk_format) {
- case VK_FORMAT_D16_UNORM:
- return V3D_OUTPUT_IMAGE_FORMAT_R16UI;
- case VK_FORMAT_D32_SFLOAT:
- return V3D_OUTPUT_IMAGE_FORMAT_R32F;
- case VK_FORMAT_X8_D24_UNORM_PACK32:
- return V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
- case VK_FORMAT_D24_UNORM_S8_UINT:
- /* When storing the stencil aspect of a combined depth/stencil image
- * to a buffer, the Vulkan spec states that the output buffer must
- * have packed stencil values, so we choose an R8UI format for our
- * store outputs. For the load input we still want RGBA8UI since the
- * source image contains 4 channels (including the 3 channels
- * containing the 24-bit depth value).
- *
- * When loading the stencil aspect of a combined depth/stencil image
- * from a buffer, we read packed 8-bit stencil values from the buffer
- * that we need to put into the LSB of the 32-bit format (the R
- * channel), so we use R8UI. For the store, if we used R8UI then we
- * would write 8-bit stencil values consecutively over depth channels,
- * so we need to use RGBA8UI. This will write each stencil value in
- * its correct position, but will overwrite depth values (channels G
- * B,A) with undefined values. To fix this, we will have to restore
- * the depth aspect from the Z tile buffer, which we should pre-load
- * from the image before the store).
- */
- if (aspect & VK_IMAGE_ASPECT_DEPTH_BIT) {
- return V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
- } else {
- assert(aspect & VK_IMAGE_ASPECT_STENCIL_BIT);
- if (is_copy_to_buffer) {
- return for_store ? V3D_OUTPUT_IMAGE_FORMAT_R8UI :
- V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
- } else {
- assert(is_copy_from_buffer);
- return for_store ? V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI :
- V3D_OUTPUT_IMAGE_FORMAT_R8UI;
- }
- }
- default: /* Color formats */
- return framebuffer->format->rt_type;
- break;
- }
- } else {
- return framebuffer->format->rt_type;
- }
-}
-
-static inline bool
-format_needs_rb_swap(struct v3dv_device *device, VkFormat format)
-{
- const uint8_t *swizzle = v3dv_get_format_swizzle(device, format);
- return swizzle[0] == PIPE_SWIZZLE_Z;
-}
-
-static void
-get_internal_type_bpp_for_image_aspects(struct v3dv_device *device,
- VkFormat vk_format,
- VkImageAspectFlags aspect_mask,
- uint32_t *internal_type,
- uint32_t *internal_bpp)
-{
- const VkImageAspectFlags ds_aspects = VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT;
-
- /* We can't store depth/stencil pixel formats to a raster format, so
- * so instead we load our depth/stencil aspects to a compatible color
- * format.
- */
- /* FIXME: pre-compute this at image creation time? */
- if (aspect_mask & ds_aspects) {
- switch (vk_format) {
- case VK_FORMAT_D16_UNORM:
- *internal_type = V3D_INTERNAL_TYPE_16UI;
- *internal_bpp = V3D_INTERNAL_BPP_64;
- break;
- case VK_FORMAT_D32_SFLOAT:
- *internal_type = V3D_INTERNAL_TYPE_32F;
- *internal_bpp = V3D_INTERNAL_BPP_128;
- break;
- case VK_FORMAT_X8_D24_UNORM_PACK32:
- case VK_FORMAT_D24_UNORM_S8_UINT:
- /* Use RGBA8 format so we can relocate the X/S bits in the appropriate
- * place to match Vulkan expectations. See the comment on the tile
- * load command for more details.
- */
- *internal_type = V3D_INTERNAL_TYPE_8UI;
- *internal_bpp = V3D_INTERNAL_BPP_32;
- break;
- default:
- assert(!"unsupported format");
- break;
- }
- } else {
- const struct v3dv_format *format = v3dv_X(device, get_format)(vk_format);
- v3dv_X(device, get_internal_type_bpp_for_output_format)
- (format->rt_type, internal_type, internal_bpp);
- }
-}
-
-struct rcl_clear_info {
- const union v3dv_clear_value *clear_value;
- struct v3dv_image *image;
- VkImageAspectFlags aspects;
- uint32_t layer;
- uint32_t level;
-};
-
-static struct v3dv_cl *
-emit_rcl_prologue(struct v3dv_job *job,
- struct framebuffer_data *fb,
- const struct rcl_clear_info *clear_info)
-{
- const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
-
- struct v3dv_cl *rcl = &job->rcl;
- v3dv_cl_ensure_space_with_branch(rcl, 200 +
- tiling->layers * 256 *
- cl_packet_length(SUPERTILE_COORDINATES));
- if (job->cmd_buffer->state.oom)
- return NULL;
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
- config.early_z_disable = true;
- config.image_width_pixels = tiling->width;
- config.image_height_pixels = tiling->height;
- config.number_of_render_targets = 1;
- config.multisample_mode_4x = tiling->msaa;
- config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
- config.internal_depth_type = fb->internal_depth_type;
- }
-
- if (clear_info && (clear_info->aspects & VK_IMAGE_ASPECT_COLOR_BIT)) {
- uint32_t clear_pad = 0;
- if (clear_info->image) {
- const struct v3dv_image *image = clear_info->image;
- const struct v3d_resource_slice *slice =
- &image->slices[clear_info->level];
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- int uif_block_height = v3d_utile_height(image->cpp) * 2;
-
- uint32_t implicit_padded_height =
- align(tiling->height, uif_block_height) / uif_block_height;
-
- if (slice->padded_height_of_output_image_in_uif_blocks -
- implicit_padded_height >= 15) {
- clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
- }
- }
- }
-
- const uint32_t *color = &clear_info->clear_value->color[0];
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
- clear.clear_color_low_32_bits = color[0];
- clear.clear_color_next_24_bits = color[1] & 0x00ffffff;
- clear.render_target_number = 0;
- };
-
- if (tiling->internal_bpp >= V3D_INTERNAL_BPP_64) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
- clear.clear_color_mid_low_32_bits =
- ((color[1] >> 24) | (color[2] << 8));
- clear.clear_color_mid_high_24_bits =
- ((color[2] >> 24) | ((color[3] & 0xffff) << 8));
- clear.render_target_number = 0;
- };
- }
-
- if (tiling->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
- clear.uif_padded_height_in_uif_blocks = clear_pad;
- clear.clear_color_high_16_bits = color[3] >> 16;
- clear.render_target_number = 0;
- };
- }
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
- rt.render_target_0_internal_bpp = tiling->internal_bpp;
- rt.render_target_0_internal_type = fb->internal_type;
- rt.render_target_0_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
- }
-
- cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
- clear.z_clear_value = clear_info ? clear_info->clear_value->z : 1.0f;
- clear.stencil_clear_value = clear_info ? clear_info->clear_value->s : 0;
- };
-
- cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
- init.use_auto_chained_tile_lists = true;
- init.size_of_first_block_in_chained_tile_lists =
- TILE_ALLOCATION_BLOCK_SIZE_64B;
- }
-
- return rcl;
-}
-
-static void
-emit_frame_setup(struct v3dv_job *job,
- uint32_t layer,
- const union v3dv_clear_value *clear_value)
-{
- v3dv_return_if_oom(NULL, job);
-
- const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
-
- struct v3dv_cl *rcl = &job->rcl;
-
- const uint32_t tile_alloc_offset =
- 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
- cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
- list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
- }
-
- cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
- config.number_of_bin_tile_lists = 1;
- config.total_frame_width_in_tiles = tiling->draw_tiles_x;
- config.total_frame_height_in_tiles = tiling->draw_tiles_y;
-
- config.supertile_width_in_tiles = tiling->supertile_width;
- config.supertile_height_in_tiles = tiling->supertile_height;
-
- config.total_frame_width_in_supertiles =
- tiling->frame_width_in_supertiles;
- config.total_frame_height_in_supertiles =
- tiling->frame_height_in_supertiles;
- }
-
- /* Implement GFXH-1742 workaround. Also, if we are clearing we have to do
- * it here.
- */
- for (int i = 0; i < 2; i++) {
- cl_emit(rcl, TILE_COORDINATES, coords);
- cl_emit(rcl, END_OF_LOADS, end);
- cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = NONE;
- }
- if (clear_value && i == 0) {
- cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
- clear.clear_z_stencil_buffer = true;
- clear.clear_all_render_targets = true;
- }
- }
- cl_emit(rcl, END_OF_TILE_MARKER, end);
- }
-
- cl_emit(rcl, FLUSH_VCD_CACHE, flush);
-}
-
-static void
-emit_supertile_coordinates(struct v3dv_job *job,
- struct framebuffer_data *framebuffer)
-{
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl *rcl = &job->rcl;
-
- const uint32_t min_y = framebuffer->min_y_supertile;
- const uint32_t max_y = framebuffer->max_y_supertile;
- const uint32_t min_x = framebuffer->min_x_supertile;
- const uint32_t max_x = framebuffer->max_x_supertile;
-
- for (int y = min_y; y <= max_y; y++) {
- for (int x = min_x; x <= max_x; x++) {
- cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
- coords.column_number_in_supertiles = x;
- coords.row_number_in_supertiles = y;
- }
- }
- }
-}
-
-static void
-emit_linear_load(struct v3dv_cl *cl,
- uint32_t buffer,
- struct v3dv_bo *bo,
- uint32_t offset,
- uint32_t stride,
- uint32_t format)
-{
- cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
- load.buffer_to_load = buffer;
- load.address = v3dv_cl_address(bo, offset);
- load.input_image_format = format;
- load.memory_format = V3D_TILING_RASTER;
- load.height_in_ub_or_stride = stride;
- load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static void
-emit_linear_store(struct v3dv_cl *cl,
- uint32_t buffer,
- struct v3dv_bo *bo,
- uint32_t offset,
- uint32_t stride,
- bool msaa,
- uint32_t format)
-{
- cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = RENDER_TARGET_0;
- store.address = v3dv_cl_address(bo, offset);
- store.clear_buffer_being_stored = false;
- store.output_image_format = format;
- store.memory_format = V3D_TILING_RASTER;
- store.height_in_ub_or_stride = stride;
- store.decimate_mode = msaa ? V3D_DECIMATE_MODE_ALL_SAMPLES :
- V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static void
-emit_image_load(struct v3dv_device *device,
- struct v3dv_cl *cl,
- struct framebuffer_data *framebuffer,
- struct v3dv_image *image,
- VkImageAspectFlags aspect,
- uint32_t layer,
- uint32_t mip_level,
- bool is_copy_to_buffer,
- bool is_copy_from_buffer)
-{
- uint32_t layer_offset = v3dv_layer_offset(image, mip_level, layer);
-
- /* For image to/from buffer copies we always load to and store from RT0,
- * even for depth/stencil aspects, because the hardware can't do raster
- * stores or loads from/to the depth/stencil tile buffers.
- */
- bool load_to_color_tlb = is_copy_to_buffer || is_copy_from_buffer ||
- aspect == VK_IMAGE_ASPECT_COLOR_BIT;
-
- const struct v3d_resource_slice *slice = &image->slices[mip_level];
- cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
- load.buffer_to_load = load_to_color_tlb ?
- RENDER_TARGET_0 : v3dv_zs_buffer_from_aspect_bits(aspect);
-
- load.address = v3dv_cl_address(image->mem->bo, layer_offset);
-
- load.input_image_format = choose_tlb_format(framebuffer, aspect, false,
- is_copy_to_buffer,
- is_copy_from_buffer);
- load.memory_format = slice->tiling;
-
- /* When copying depth/stencil images to a buffer, for D24 formats Vulkan
- * expects the depth value in the LSB bits of each 32-bit pixel.
- * Unfortunately, the hardware seems to put the S8/X8 bits there and the
- * depth bits on the MSB. To work around that we can reverse the channel
- * order and then swap the R/B channels to get what we want.
- *
- * NOTE: reversing and swapping only gets us the behavior we want if the
- * operations happen in that exact order, which seems to be the case when
- * done on the tile buffer load operations. On the store, it seems the
- * order is not the same. The order on the store is probably reversed so
- * that reversing and swapping on both the load and the store preserves
- * the original order of the channels in memory.
- *
- * Notice that we only need to do this when copying to a buffer, where
- * depth and stencil aspects are copied as separate regions and
- * the spec expects them to be tightly packed.
- */
- bool needs_rb_swap = false;
- bool needs_chan_reverse = false;
- if (is_copy_to_buffer &&
- (framebuffer->vk_format == VK_FORMAT_X8_D24_UNORM_PACK32 ||
- (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT &&
- (aspect & VK_IMAGE_ASPECT_DEPTH_BIT)))) {
- needs_rb_swap = true;
- needs_chan_reverse = true;
- } else if (!is_copy_from_buffer && !is_copy_to_buffer &&
- (aspect & VK_IMAGE_ASPECT_COLOR_BIT)) {
- /* This is not a raw data copy (i.e. we are clearing the image),
- * so we need to make sure we respect the format swizzle.
- */
- needs_rb_swap = format_needs_rb_swap(device, framebuffer->vk_format);
- }
-
- load.r_b_swap = needs_rb_swap;
- load.channel_reverse = needs_chan_reverse;
-
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- load.height_in_ub_or_stride =
- slice->padded_height_of_output_image_in_uif_blocks;
- } else if (slice->tiling == V3D_TILING_RASTER) {
- load.height_in_ub_or_stride = slice->stride;
- }
-
- if (image->samples > VK_SAMPLE_COUNT_1_BIT)
- load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
- else
- load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static void
-emit_image_store(struct v3dv_device *device,
- struct v3dv_cl *cl,
- struct framebuffer_data *framebuffer,
- struct v3dv_image *image,
- VkImageAspectFlags aspect,
- uint32_t layer,
- uint32_t mip_level,
- bool is_copy_to_buffer,
- bool is_copy_from_buffer,
- bool is_multisample_resolve)
-{
- uint32_t layer_offset = v3dv_layer_offset(image, mip_level, layer);
-
- bool store_from_color_tlb = is_copy_to_buffer || is_copy_from_buffer ||
- aspect == VK_IMAGE_ASPECT_COLOR_BIT;
-
- const struct v3d_resource_slice *slice = &image->slices[mip_level];
- cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
- store.buffer_to_store = store_from_color_tlb ?
- RENDER_TARGET_0 : v3dv_zs_buffer_from_aspect_bits(aspect);
-
- store.address = v3dv_cl_address(image->mem->bo, layer_offset);
- store.clear_buffer_being_stored = false;
-
- /* See rationale in emit_image_load() */
- bool needs_rb_swap = false;
- bool needs_chan_reverse = false;
- if (is_copy_from_buffer &&
- (framebuffer->vk_format == VK_FORMAT_X8_D24_UNORM_PACK32 ||
- (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT &&
- (aspect & VK_IMAGE_ASPECT_DEPTH_BIT)))) {
- needs_rb_swap = true;
- needs_chan_reverse = true;
- } else if (!is_copy_from_buffer && !is_copy_to_buffer &&
- (aspect & VK_IMAGE_ASPECT_COLOR_BIT)) {
- needs_rb_swap = format_needs_rb_swap(device, framebuffer->vk_format);
- }
-
- store.r_b_swap = needs_rb_swap;
- store.channel_reverse = needs_chan_reverse;
-
- store.output_image_format = choose_tlb_format(framebuffer, aspect, true,
- is_copy_to_buffer,
- is_copy_from_buffer);
- store.memory_format = slice->tiling;
- if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
- slice->tiling == V3D_TILING_UIF_XOR) {
- store.height_in_ub_or_stride =
- slice->padded_height_of_output_image_in_uif_blocks;
- } else if (slice->tiling == V3D_TILING_RASTER) {
- store.height_in_ub_or_stride = slice->stride;
- }
-
- if (image->samples > VK_SAMPLE_COUNT_1_BIT)
- store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
- else if (is_multisample_resolve)
- store.decimate_mode = V3D_DECIMATE_MODE_4X;
- else
- store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
- }
-}
-
-static void
-emit_copy_layer_to_buffer_per_tile_list(struct v3dv_job *job,
- struct framebuffer_data *framebuffer,
- struct v3dv_buffer *buffer,
- struct v3dv_image *image,
- uint32_t layer_offset,
- const VkBufferImageCopy2KHR *region)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- /* Load image to TLB */
- assert((image->type != VK_IMAGE_TYPE_3D &&
- layer_offset < region->imageSubresource.layerCount) ||
- layer_offset < image->extent.depth);
-
- const uint32_t image_layer = image->type != VK_IMAGE_TYPE_3D ?
- region->imageSubresource.baseArrayLayer + layer_offset :
- region->imageOffset.z + layer_offset;
-
- emit_image_load(job->device, cl, framebuffer, image,
- region->imageSubresource.aspectMask,
- image_layer,
- region->imageSubresource.mipLevel,
- true, false);
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- /* Store TLB to buffer */
- uint32_t width, height;
- if (region->bufferRowLength == 0)
- width = region->imageExtent.width;
- else
- width = region->bufferRowLength;
-
- if (region->bufferImageHeight == 0)
- height = region->imageExtent.height;
- else
- height = region->bufferImageHeight;
-
- /* Handle copy from compressed format */
- width = DIV_ROUND_UP(width, vk_format_get_blockwidth(image->vk_format));
- height = DIV_ROUND_UP(height, vk_format_get_blockheight(image->vk_format));
-
- /* If we are storing stencil from a combined depth/stencil format the
- * Vulkan spec states that the output buffer must have packed stencil
- * values, where each stencil value is 1 byte.
- */
- uint32_t cpp =
- region->imageSubresource.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT ?
- 1 : image->cpp;
- uint32_t buffer_stride = width * cpp;
- uint32_t buffer_offset = buffer->mem_offset + region->bufferOffset +
- height * buffer_stride * layer_offset;
-
- uint32_t format = choose_tlb_format(framebuffer,
- region->imageSubresource.aspectMask,
- true, true, false);
- bool msaa = image->samples > VK_SAMPLE_COUNT_1_BIT;
-
- emit_linear_store(cl, RENDER_TARGET_0, buffer->mem->bo,
- buffer_offset, buffer_stride, msaa, format);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_copy_layer_to_buffer(struct v3dv_job *job,
- struct v3dv_buffer *buffer,
- struct v3dv_image *image,
- struct framebuffer_data *framebuffer,
- uint32_t layer,
- const VkBufferImageCopy2KHR *region)
-{
- emit_frame_setup(job, layer, NULL);
- emit_copy_layer_to_buffer_per_tile_list(job, framebuffer, buffer,
- image, layer, region);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_copy_image_to_buffer_rcl(struct v3dv_job *job,
- struct v3dv_buffer *buffer,
- struct v3dv_image *image,
- struct framebuffer_data *framebuffer,
- const VkBufferImageCopy2KHR *region)
-{
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
- v3dv_return_if_oom(NULL, job);
-
- for (int layer = 0; layer < job->frame_tiling.layers; layer++)
- emit_copy_layer_to_buffer(job, buffer, image, framebuffer, layer, region);
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
/* Implements a copy using the TLB.
*
* This only works if we are copying from offset (0,0), since a TLB store for
@@ -890,9 +256,9 @@ copy_image_to_buffer_tlb(struct v3dv_cmd_buffer *cmd_buffer,
return false;
uint32_t internal_type, internal_bpp;
- get_internal_type_bpp_for_image_aspects(cmd_buffer->device, fb_format,
- region->imageSubresource.aspectMask,
- &internal_type, &internal_bpp);
+ v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_image_aspects)
+ (fb_format, region->imageSubresource.aspectMask,
+ &internal_type, &internal_bpp);
uint32_t num_layers;
if (image->type != VK_IMAGE_TYPE_3D)
@@ -915,11 +281,12 @@ copy_image_to_buffer_tlb(struct v3dv_cmd_buffer *cmd_buffer,
v3dv_job_start_frame(job, width, height, num_layers, 1, internal_bpp, false);
struct framebuffer_data framebuffer;
- setup_framebuffer_data(cmd_buffer->device, &framebuffer, fb_format,
- internal_type, &job->frame_tiling);
+ v3dv_X(job->device, setup_framebuffer_data)(&framebuffer, fb_format, internal_type,
+ &job->frame_tiling);
- v3dv_job_emit_binning_flush(job);
- emit_copy_image_to_buffer_rcl(job, buffer, image, &framebuffer, region);
+ v3dv_X(job->device, job_emit_binning_flush)(job);
+ v3dv_X(job->device, job_emit_copy_image_to_buffer_rcl)
+ (job, buffer, image, &framebuffer, region);
v3dv_cmd_buffer_finish_job(cmd_buffer);
@@ -1371,195 +738,6 @@ v3dv_CmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
}
}
-static void
-emit_copy_image_layer_per_tile_list(struct v3dv_job *job,
- struct framebuffer_data *framebuffer,
- struct v3dv_image *dst,
- struct v3dv_image *src,
- uint32_t layer_offset,
- const VkImageCopy2KHR *region)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- assert((src->type != VK_IMAGE_TYPE_3D &&
- layer_offset < region->srcSubresource.layerCount) ||
- layer_offset < src->extent.depth);
-
- const uint32_t src_layer = src->type != VK_IMAGE_TYPE_3D ?
- region->srcSubresource.baseArrayLayer + layer_offset :
- region->srcOffset.z + layer_offset;
-
- emit_image_load(job->device, cl, framebuffer, src,
- region->srcSubresource.aspectMask,
- src_layer,
- region->srcSubresource.mipLevel,
- false, false);
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- assert((dst->type != VK_IMAGE_TYPE_3D &&
- layer_offset < region->dstSubresource.layerCount) ||
- layer_offset < dst->extent.depth);
-
- const uint32_t dst_layer = dst->type != VK_IMAGE_TYPE_3D ?
- region->dstSubresource.baseArrayLayer + layer_offset :
- region->dstOffset.z + layer_offset;
-
- emit_image_store(job->device, cl, framebuffer, dst,
- region->dstSubresource.aspectMask,
- dst_layer,
- region->dstSubresource.mipLevel,
- false, false, false);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_copy_image_layer(struct v3dv_job *job,
- struct v3dv_image *dst,
- struct v3dv_image *src,
- struct framebuffer_data *framebuffer,
- uint32_t layer,
- const VkImageCopy2KHR *region)
-{
- emit_frame_setup(job, layer, NULL);
- emit_copy_image_layer_per_tile_list(job, framebuffer, dst, src, layer, region);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_copy_image_rcl(struct v3dv_job *job,
- struct v3dv_image *dst,
- struct v3dv_image *src,
- struct framebuffer_data *framebuffer,
- const VkImageCopy2KHR *region)
-{
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
- v3dv_return_if_oom(NULL, job);
-
- for (int layer = 0; layer < job->frame_tiling.layers; layer++)
- emit_copy_image_layer(job, dst, src, framebuffer, layer, region);
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
-/* Disable level 0 write, just write following mipmaps */
-#define V3D_TFU_IOA_DIMTW (1 << 0)
-#define V3D_TFU_IOA_FORMAT_SHIFT 3
-#define V3D_TFU_IOA_FORMAT_LINEARTILE 3
-#define V3D_TFU_IOA_FORMAT_UBLINEAR_1_COLUMN 4
-#define V3D_TFU_IOA_FORMAT_UBLINEAR_2_COLUMN 5
-#define V3D_TFU_IOA_FORMAT_UIF_NO_XOR 6
-#define V3D_TFU_IOA_FORMAT_UIF_XOR 7
-
-#define V3D_TFU_ICFG_NUMMM_SHIFT 5
-#define V3D_TFU_ICFG_TTYPE_SHIFT 9
-
-#define V3D_TFU_ICFG_OPAD_SHIFT 22
-
-#define V3D_TFU_ICFG_FORMAT_SHIFT 18
-#define V3D_TFU_ICFG_FORMAT_RASTER 0
-#define V3D_TFU_ICFG_FORMAT_SAND_128 1
-#define V3D_TFU_ICFG_FORMAT_SAND_256 2
-#define V3D_TFU_ICFG_FORMAT_LINEARTILE 11
-#define V3D_TFU_ICFG_FORMAT_UBLINEAR_1_COLUMN 12
-#define V3D_TFU_ICFG_FORMAT_UBLINEAR_2_COLUMN 13
-#define V3D_TFU_ICFG_FORMAT_UIF_NO_XOR 14
-#define V3D_TFU_ICFG_FORMAT_UIF_XOR 15
-
-static void
-emit_tfu_job(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_image *dst,
- uint32_t dst_mip_level,
- uint32_t dst_layer,
- struct v3dv_image *src,
- uint32_t src_mip_level,
- uint32_t src_layer,
- uint32_t width,
- uint32_t height,
- const struct v3dv_format *format)
-{
- const struct v3d_resource_slice *src_slice = &src->slices[src_mip_level];
- const struct v3d_resource_slice *dst_slice = &dst->slices[dst_mip_level];
-
- assert(dst->mem && dst->mem->bo);
- const struct v3dv_bo *dst_bo = dst->mem->bo;
-
- assert(src->mem && src->mem->bo);
- const struct v3dv_bo *src_bo = src->mem->bo;
-
- struct drm_v3d_submit_tfu tfu = {
- .ios = (height << 16) | width,
- .bo_handles = {
- dst_bo->handle,
- src_bo->handle != dst_bo->handle ? src_bo->handle : 0
- },
- };
-
- const uint32_t src_offset =
- src_bo->offset + v3dv_layer_offset(src, src_mip_level, src_layer);
- tfu.iia |= src_offset;
-
- uint32_t icfg;
- if (src_slice->tiling == V3D_TILING_RASTER) {
- icfg = V3D_TFU_ICFG_FORMAT_RASTER;
- } else {
- icfg = V3D_TFU_ICFG_FORMAT_LINEARTILE +
- (src_slice->tiling - V3D_TILING_LINEARTILE);
- }
- tfu.icfg |= icfg << V3D_TFU_ICFG_FORMAT_SHIFT;
-
- const uint32_t dst_offset =
- dst_bo->offset + v3dv_layer_offset(dst, dst_mip_level, dst_layer);
- tfu.ioa |= dst_offset;
-
- tfu.ioa |= (V3D_TFU_IOA_FORMAT_LINEARTILE +
- (dst_slice->tiling - V3D_TILING_LINEARTILE)) <<
- V3D_TFU_IOA_FORMAT_SHIFT;
- tfu.icfg |= format->tex_type << V3D_TFU_ICFG_TTYPE_SHIFT;
-
- switch (src_slice->tiling) {
- case V3D_TILING_UIF_NO_XOR:
- case V3D_TILING_UIF_XOR:
- tfu.iis |= src_slice->padded_height / (2 * v3d_utile_height(src->cpp));
- break;
- case V3D_TILING_RASTER:
- tfu.iis |= src_slice->stride / src->cpp;
- break;
- default:
- break;
- }
-
- /* If we're writing level 0 (!IOA_DIMTW), then we need to supply the
- * OPAD field for the destination (how many extra UIF blocks beyond
- * those necessary to cover the height).
- */
- if (dst_slice->tiling == V3D_TILING_UIF_NO_XOR ||
- dst_slice->tiling == V3D_TILING_UIF_XOR) {
- uint32_t uif_block_h = 2 * v3d_utile_height(dst->cpp);
- uint32_t implicit_padded_height = align(height, uif_block_h);
- uint32_t icfg =
- (dst_slice->padded_height - implicit_padded_height) / uif_block_h;
- tfu.icfg |= icfg << V3D_TFU_ICFG_OPAD_SHIFT;
- }
-
- v3dv_cmd_buffer_add_tfu_job(cmd_buffer, &tfu);
-}
-
/**
* Returns true if the implementation supports the requested operation (even if
* it failed to process it, for example, due to an out-of-memory error).
@@ -1660,10 +838,10 @@ copy_image_tfu(struct v3dv_cmd_buffer *cmd_buffer,
const uint32_t base_dst_layer = dst->type != VK_IMAGE_TYPE_3D ?
region->dstSubresource.baseArrayLayer : region->dstOffset.z;
for (uint32_t i = 0; i < layer_count; i++) {
- emit_tfu_job(cmd_buffer,
- dst, dst_mip_level, base_dst_layer + i,
- src, src_mip_level, base_src_layer + i,
- width, height, format);
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_tfu_job)
+ (cmd_buffer, dst, dst_mip_level, base_dst_layer + i,
+ src, src_mip_level, base_src_layer + i,
+ width, height, format);
}
return true;
@@ -1694,9 +872,9 @@ copy_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
assert(region->dstSubresource.aspectMask ==
region->srcSubresource.aspectMask);
uint32_t internal_type, internal_bpp;
- get_internal_type_bpp_for_image_aspects(cmd_buffer->device, fb_format,
- region->dstSubresource.aspectMask,
- &internal_type, &internal_bpp);
+ v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_image_aspects)
+ (fb_format, region->dstSubresource.aspectMask,
+ &internal_type, &internal_bpp);
/* From the Vulkan spec with VK_KHR_maintenance1, VkImageCopy valid usage:
*
@@ -1730,11 +908,11 @@ copy_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
src->samples > VK_SAMPLE_COUNT_1_BIT);
struct framebuffer_data framebuffer;
- setup_framebuffer_data(job->device, &framebuffer, fb_format,
- internal_type, &job->frame_tiling);
+ v3dv_X(job->device, setup_framebuffer_data)(&framebuffer, fb_format, internal_type,
+ &job->frame_tiling);
- v3dv_job_emit_binning_flush(job);
- emit_copy_image_rcl(job, dst, src, &framebuffer, region);
+ v3dv_X(job->device, job_emit_binning_flush)(job);
+ v3dv_X(job->device, job_emit_copy_image_rcl)(job, dst, src, &framebuffer, region);
v3dv_cmd_buffer_finish_job(cmd_buffer);
@@ -1963,77 +1141,8 @@ v3dv_CmdCopyImage2KHR(VkCommandBuffer commandBuffer,
}
static void
-emit_clear_image_per_tile_list(struct v3dv_job *job,
- struct framebuffer_data *framebuffer,
- struct v3dv_image *image,
- VkImageAspectFlags aspects,
- uint32_t layer,
- uint32_t level)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- emit_image_store(job->device, cl, framebuffer, image, aspects,
- layer, level, false, false, false);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_clear_image(struct v3dv_job *job,
- struct v3dv_image *image,
- struct framebuffer_data *framebuffer,
- VkImageAspectFlags aspects,
- uint32_t layer,
- uint32_t level)
-{
- emit_clear_image_per_tile_list(job, framebuffer, image, aspects, layer, level);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_clear_image_rcl(struct v3dv_job *job,
- struct v3dv_image *image,
- struct framebuffer_data *framebuffer,
- const union v3dv_clear_value *clear_value,
- VkImageAspectFlags aspects,
- uint32_t layer,
- uint32_t level)
-{
- const struct rcl_clear_info clear_info = {
- .clear_value = clear_value,
- .image = image,
- .aspects = aspects,
- .layer = layer,
- .level = level,
- };
-
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, &clear_info);
- v3dv_return_if_oom(NULL, job);
-
- emit_frame_setup(job, 0, clear_value);
- emit_clear_image(job, image, framebuffer, aspects, layer, level);
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
-static void
-get_hw_clear_color(const VkClearColorValue *color,
+get_hw_clear_color(struct v3dv_device *device,
+ const VkClearColorValue *color,
VkFormat fb_format,
VkFormat image_format,
uint32_t internal_type,
@@ -2049,7 +1158,7 @@ get_hw_clear_color(const VkClearColorValue *color,
* not the compatible format.
*/
if (fb_format == image_format) {
- v3dv_get_hw_clear_color(color, internal_type, internal_size, hw_color);
+ v3dv_X(device, get_hw_clear_color)(color, internal_type, internal_size, hw_color);
} else {
union util_color uc;
enum pipe_format pipe_image_format =
@@ -2074,14 +1183,15 @@ clear_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
return false;
uint32_t internal_type, internal_bpp;
- get_internal_type_bpp_for_image_aspects(cmd_buffer->device, fb_format,
- range->aspectMask,
- &internal_type, &internal_bpp);
+ v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_image_aspects)
+ (fb_format, range->aspectMask,
+ &internal_type, &internal_bpp);
union v3dv_clear_value hw_clear_value = { 0 };
if (range->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
- get_hw_clear_color(&clear_value->color, fb_format, image->vk_format,
- internal_type, internal_bpp, &hw_clear_value.color[0]);
+ get_hw_clear_color(cmd_buffer->device, &clear_value->color, fb_format,
+ image->vk_format, internal_type, internal_bpp,
+ &hw_clear_value.color[0]);
} else {
assert((range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
(range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT));
@@ -2130,18 +1240,19 @@ clear_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
image->samples > VK_SAMPLE_COUNT_1_BIT);
struct framebuffer_data framebuffer;
- setup_framebuffer_data(job->device, &framebuffer, fb_format,
- internal_type, &job->frame_tiling);
+ v3dv_X(job->device, setup_framebuffer_data)(&framebuffer, fb_format, internal_type,
+ &job->frame_tiling);
- v3dv_job_emit_binning_flush(job);
+ v3dv_X(job->device, job_emit_binning_flush)(job);
/* If this triggers it is an application bug: the spec requires
* that any aspects to clear are present in the image.
*/
assert(range->aspectMask & image->aspects);
- emit_clear_image_rcl(job, image, &framebuffer, &hw_clear_value,
- range->aspectMask, layer, level);
+ v3dv_X(job->device, job_emit_clear_image_rcl)
+ (job, image, &framebuffer, &hw_clear_value,
+ range->aspectMask, layer, level);
v3dv_cmd_buffer_finish_job(cmd_buffer);
}
@@ -2194,193 +1305,6 @@ v3dv_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
}
}
-static void
-emit_copy_buffer_per_tile_list(struct v3dv_job *job,
- struct v3dv_bo *dst,
- struct v3dv_bo *src,
- uint32_t dst_offset,
- uint32_t src_offset,
- uint32_t stride,
- uint32_t format)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- emit_linear_load(cl, RENDER_TARGET_0, src, src_offset, stride, format);
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- emit_linear_store(cl, RENDER_TARGET_0,
- dst, dst_offset, stride, false, format);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_copy_buffer(struct v3dv_job *job,
- struct v3dv_bo *dst,
- struct v3dv_bo *src,
- uint32_t dst_offset,
- uint32_t src_offset,
- struct framebuffer_data *framebuffer,
- uint32_t format,
- uint32_t item_size)
-{
- const uint32_t stride = job->frame_tiling.width * item_size;
- emit_copy_buffer_per_tile_list(job, dst, src,
- dst_offset, src_offset,
- stride, format);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_copy_buffer_rcl(struct v3dv_job *job,
- struct v3dv_bo *dst,
- struct v3dv_bo *src,
- uint32_t dst_offset,
- uint32_t src_offset,
- struct framebuffer_data *framebuffer,
- uint32_t format,
- uint32_t item_size)
-{
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
- v3dv_return_if_oom(NULL, job);
-
- emit_frame_setup(job, 0, NULL);
-
- emit_copy_buffer(job, dst, src, dst_offset, src_offset,
- framebuffer, format, item_size);
-
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
-/* Figure out a TLB size configuration for a number of pixels to process.
- * Beware that we can't "render" more than 4096x4096 pixels in a single job,
- * if the pixel count is larger than this, the caller might need to split
- * the job and call this function multiple times.
- */
-static void
-framebuffer_size_for_pixel_count(uint32_t num_pixels,
- uint32_t *width,
- uint32_t *height)
-{
- assert(num_pixels > 0);
-
- const uint32_t max_dim_pixels = 4096;
- const uint32_t max_pixels = max_dim_pixels * max_dim_pixels;
-
- uint32_t w, h;
- if (num_pixels > max_pixels) {
- w = max_dim_pixels;
- h = max_dim_pixels;
- } else {
- w = num_pixels;
- h = 1;
- while (w > max_dim_pixels || ((w % 2) == 0 && w > 2 * h)) {
- w >>= 1;
- h <<= 1;
- }
- }
- assert(w <= max_dim_pixels && h <= max_dim_pixels);
- assert(w * h <= num_pixels);
- assert(w > 0 && h > 0);
-
- *width = w;
- *height = h;
-}
-
-static struct v3dv_job *
-copy_buffer(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_bo *dst,
- uint32_t dst_offset,
- struct v3dv_bo *src,
- uint32_t src_offset,
- const VkBufferCopy2KHR *region)
-{
- const uint32_t internal_bpp = V3D_INTERNAL_BPP_32;
- const uint32_t internal_type = V3D_INTERNAL_TYPE_8UI;
-
- /* Select appropriate pixel format for the copy operation based on the
- * size to copy and the alignment of the source and destination offsets.
- */
- src_offset += region->srcOffset;
- dst_offset += region->dstOffset;
- uint32_t item_size = 4;
- while (item_size > 1 &&
- (src_offset % item_size != 0 || dst_offset % item_size != 0)) {
- item_size /= 2;
- }
-
- while (item_size > 1 && region->size % item_size != 0)
- item_size /= 2;
-
- assert(region->size % item_size == 0);
- uint32_t num_items = region->size / item_size;
- assert(num_items > 0);
-
- uint32_t format;
- VkFormat vk_format;
- switch (item_size) {
- case 4:
- format = V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
- vk_format = VK_FORMAT_R8G8B8A8_UINT;
- break;
- case 2:
- format = V3D_OUTPUT_IMAGE_FORMAT_RG8UI;
- vk_format = VK_FORMAT_R8G8_UINT;
- break;
- default:
- format = V3D_OUTPUT_IMAGE_FORMAT_R8UI;
- vk_format = VK_FORMAT_R8_UINT;
- break;
- }
-
- struct v3dv_job *job = NULL;
- while (num_items > 0) {
- job = v3dv_cmd_buffer_start_job(cmd_buffer, -1, V3DV_JOB_TYPE_GPU_CL);
- if (!job)
- return NULL;
-
- uint32_t width, height;
- framebuffer_size_for_pixel_count(num_items, &width, &height);
-
- v3dv_job_start_frame(job, width, height, 1, 1, internal_bpp, false);
-
- struct framebuffer_data framebuffer;
- setup_framebuffer_data(job->device, &framebuffer, vk_format,
- internal_type, &job->frame_tiling);
-
- v3dv_job_emit_binning_flush(job);
-
- emit_copy_buffer_rcl(job, dst, src, dst_offset, src_offset,
- &framebuffer, format, item_size);
-
- v3dv_cmd_buffer_finish_job(cmd_buffer);
-
- const uint32_t items_copied = width * height;
- const uint32_t bytes_copied = items_copied * item_size;
- num_items -= items_copied;
- src_offset += bytes_copied;
- dst_offset += bytes_copied;
- }
-
- return job;
-}
-
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2KHR *pCopyBufferInfo)
@@ -2390,10 +1314,11 @@ v3dv_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
V3DV_FROM_HANDLE(v3dv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
for (uint32_t i = 0; i < pCopyBufferInfo->regionCount; i++) {
- copy_buffer(cmd_buffer,
- dst_buffer->mem->bo, dst_buffer->mem_offset,
- src_buffer->mem->bo, src_buffer->mem_offset,
- &pCopyBufferInfo->pRegions[i]);
+ v3dv_X(cmd_buffer->device, cmd_buffer_copy_buffer)
+ (cmd_buffer,
+ dst_buffer->mem->bo, dst_buffer->mem_offset,
+ src_buffer->mem->bo, src_buffer->mem_offset,
+ &pCopyBufferInfo->pRegions[i]);
}
}
@@ -2441,10 +1366,10 @@ v3dv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
.size = dataSize,
};
struct v3dv_job *copy_job =
- copy_buffer(cmd_buffer,
- dst_buffer->mem->bo, dst_buffer->mem_offset,
- src_bo, 0,
- &region);
+ v3dv_X(cmd_buffer->device, cmd_buffer_copy_buffer)
+ (cmd_buffer, dst_buffer->mem->bo, dst_buffer->mem_offset,
+ src_bo, 0, &region);
+
if (!copy_job)
return;
@@ -2452,118 +1377,6 @@ v3dv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
cmd_buffer, (uint64_t)(uintptr_t)src_bo, destroy_update_buffer_cb);
}
-static void
-emit_fill_buffer_per_tile_list(struct v3dv_job *job,
- struct v3dv_bo *bo,
- uint32_t offset,
- uint32_t stride)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- emit_linear_store(cl, RENDER_TARGET_0, bo, offset, stride, false,
- V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_fill_buffer(struct v3dv_job *job,
- struct v3dv_bo *bo,
- uint32_t offset,
- struct framebuffer_data *framebuffer)
-{
- const uint32_t stride = job->frame_tiling.width * 4;
- emit_fill_buffer_per_tile_list(job, bo, offset, stride);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_fill_buffer_rcl(struct v3dv_job *job,
- struct v3dv_bo *bo,
- uint32_t offset,
- struct framebuffer_data *framebuffer,
- uint32_t data)
-{
- const union v3dv_clear_value clear_value = {
- .color = { data, 0, 0, 0 },
- };
-
- const struct rcl_clear_info clear_info = {
- .clear_value = &clear_value,
- .image = NULL,
- .aspects = VK_IMAGE_ASPECT_COLOR_BIT,
- .layer = 0,
- .level = 0,
- };
-
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, &clear_info);
- v3dv_return_if_oom(NULL, job);
-
- emit_frame_setup(job, 0, &clear_value);
- emit_fill_buffer(job, bo, offset, framebuffer);
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
-static void
-fill_buffer(struct v3dv_cmd_buffer *cmd_buffer,
- struct v3dv_bo *bo,
- uint32_t offset,
- uint32_t size,
- uint32_t data)
-{
- assert(size > 0 && size % 4 == 0);
- assert(offset + size <= bo->size);
-
- const uint32_t internal_bpp = V3D_INTERNAL_BPP_32;
- const uint32_t internal_type = V3D_INTERNAL_TYPE_8UI;
- uint32_t num_items = size / 4;
-
- while (num_items > 0) {
- struct v3dv_job *job =
- v3dv_cmd_buffer_start_job(cmd_buffer, -1, V3DV_JOB_TYPE_GPU_CL);
- if (!job)
- return;
-
- uint32_t width, height;
- framebuffer_size_for_pixel_count(num_items, &width, &height);
-
- v3dv_job_start_frame(job, width, height, 1, 1, internal_bpp, false);
-
- struct framebuffer_data framebuffer;
- setup_framebuffer_data(cmd_buffer->device, &framebuffer,
- VK_FORMAT_R8G8B8A8_UINT, internal_type,
- &job->frame_tiling);
-
- v3dv_job_emit_binning_flush(job);
-
- emit_fill_buffer_rcl(job, bo, offset, &framebuffer, data);
-
- v3dv_cmd_buffer_finish_job(cmd_buffer);
-
- const uint32_t items_copied = width * height;
- const uint32_t bytes_copied = items_copied * 4;
- num_items -= items_copied;
- offset += bytes_copied;
- }
-}
-
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdFillBuffer(VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
@@ -2586,7 +1399,8 @@ v3dv_CmdFillBuffer(VkCommandBuffer commandBuffer,
size -= size % 4;
}
- fill_buffer(cmd_buffer, bo, dstOffset, size, data);
+ v3dv_X(cmd_buffer->device, cmd_buffer_fill_buffer)
+ (cmd_buffer, bo, dstOffset, size, data);
}
/**
@@ -2718,142 +1532,6 @@ copy_buffer_to_image_tfu(struct v3dv_cmd_buffer *cmd_buffer,
return true;
}
-static void
-emit_copy_buffer_to_layer_per_tile_list(struct v3dv_job *job,
- struct framebuffer_data *framebuffer,
- struct v3dv_image *image,
- struct v3dv_buffer *buffer,
- uint32_t layer,
- const VkBufferImageCopy2KHR *region)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- const VkImageSubresourceLayers *imgrsc = &region->imageSubresource;
- assert((image->type != VK_IMAGE_TYPE_3D && layer < imgrsc->layerCount) ||
- layer < image->extent.depth);
-
- /* Load TLB from buffer */
- uint32_t width, height;
- if (region->bufferRowLength == 0)
- width = region->imageExtent.width;
- else
- width = region->bufferRowLength;
-
- if (region->bufferImageHeight == 0)
- height = region->imageExtent.height;
- else
- height = region->bufferImageHeight;
-
- /* Handle copy to compressed format using a compatible format */
- width = DIV_ROUND_UP(width, vk_format_get_blockwidth(image->vk_format));
- height = DIV_ROUND_UP(height, vk_format_get_blockheight(image->vk_format));
-
- uint32_t cpp = imgrsc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT ?
- 1 : image->cpp;
- uint32_t buffer_stride = width * cpp;
- uint32_t buffer_offset =
- buffer->mem_offset + region->bufferOffset + height * buffer_stride * layer;
-
- uint32_t format = choose_tlb_format(framebuffer, imgrsc->aspectMask,
- false, false, true);
-
- emit_linear_load(cl, RENDER_TARGET_0, buffer->mem->bo,
- buffer_offset, buffer_stride, format);
-
- /* Because we can't do raster loads/stores of Z/S formats we need to
- * use a color tile buffer with a compatible RGBA color format instead.
- * However, when we are uploading a single aspect to a combined
- * depth/stencil image we have the problem that our tile buffer stores don't
- * allow us to mask out the other aspect, so we always write all four RGBA
- * channels to the image and we end up overwriting that other aspect with
- * undefined values. To work around that, we first load the aspect we are
- * not copying from the image memory into a proper Z/S tile buffer. Then we
- * do our store from the color buffer for the aspect we are copying, and
- * after that, we do another store from the Z/S tile buffer to restore the
- * other aspect to its original value.
- */
- if (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
- if (imgrsc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
- emit_image_load(job->device, cl, framebuffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
- imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
- false, false);
- } else {
- assert(imgrsc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
- emit_image_load(job->device, cl, framebuffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
- imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
- false, false);
- }
- }
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- /* Store TLB to image */
- emit_image_store(job->device, cl, framebuffer, image, imgrsc->aspectMask,
- imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
- false, true, false);
-
- if (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
- if (imgrsc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
- emit_image_store(job->device, cl, framebuffer, image,
- VK_IMAGE_ASPECT_STENCIL_BIT,
- imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
- false, false, false);
- } else {
- assert(imgrsc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
- emit_image_store(job->device, cl, framebuffer, image,
- VK_IMAGE_ASPECT_DEPTH_BIT,
- imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
- false, false, false);
- }
- }
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_copy_buffer_to_layer(struct v3dv_job *job,
- struct v3dv_image *image,
- struct v3dv_buffer *buffer,
- struct framebuffer_data *framebuffer,
- uint32_t layer,
- const VkBufferImageCopy2KHR *region)
-{
- emit_frame_setup(job, layer, NULL);
- emit_copy_buffer_to_layer_per_tile_list(job, framebuffer, image, buffer,
- layer, region);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_copy_buffer_to_image_rcl(struct v3dv_job *job,
- struct v3dv_image *image,
- struct v3dv_buffer *buffer,
- struct framebuffer_data *framebuffer,
- const VkBufferImageCopy2KHR *region)
-{
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
- v3dv_return_if_oom(NULL, job);
-
- for (int layer = 0; layer < job->frame_tiling.layers; layer++)
- emit_copy_buffer_to_layer(job, image, buffer, framebuffer, layer, region);
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
/**
* Returns true if the implementation supports the requested operation (even if
* it failed to process it, for example, due to an out-of-memory error).
@@ -2869,9 +1547,10 @@ copy_buffer_to_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
return false;
uint32_t internal_type, internal_bpp;
- get_internal_type_bpp_for_image_aspects(cmd_buffer->device, fb_format,
- region->imageSubresource.aspectMask,
- &internal_type, &internal_bpp);
+ v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_image_aspects)
+ (fb_format, region->imageSubresource.aspectMask,
+ &internal_type, &internal_bpp);
+
uint32_t num_layers;
if (image->type != VK_IMAGE_TYPE_3D)
num_layers = region->imageSubresource.layerCount;
@@ -2893,11 +1572,12 @@ copy_buffer_to_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
v3dv_job_start_frame(job, width, height, num_layers, 1, internal_bpp, false);
struct framebuffer_data framebuffer;
- setup_framebuffer_data(job->device, &framebuffer, fb_format,
- internal_type, &job->frame_tiling);
+ v3dv_X(job->device, setup_framebuffer_data)(&framebuffer, fb_format, internal_type,
+ &job->frame_tiling);
- v3dv_job_emit_binning_flush(job);
- emit_copy_buffer_to_image_rcl(job, image, buffer, &framebuffer, region);
+ v3dv_X(job->device, job_emit_binning_flush)(job);
+ v3dv_X(job->device, job_emit_copy_buffer_to_image_rcl)
+ (job, image, buffer, &framebuffer, region);
v3dv_cmd_buffer_finish_job(cmd_buffer);
@@ -4238,10 +2918,10 @@ blit_tfu(struct v3dv_cmd_buffer *cmd_buffer,
dst_mirror_z ? max_dst_layer - i - 1: min_dst_layer + i;
const uint32_t src_layer =
src_mirror_z ? max_src_layer - i - 1: min_src_layer + i;
- emit_tfu_job(cmd_buffer,
- dst, dst_mip_level, dst_layer,
- src, src_mip_level, src_layer,
- dst_width, dst_height, format);
+ v3dv_X(cmd_buffer->device, cmd_buffer_emit_tfu_job)
+ (cmd_buffer, dst, dst_mip_level, dst_layer,
+ src, src_mip_level, src_layer,
+ dst_width, dst_height, format);
}
return true;
@@ -5566,93 +4246,6 @@ v3dv_CmdBlitImage2KHR(VkCommandBuffer commandBuffer,
}
}
-static void
-emit_resolve_image_layer_per_tile_list(struct v3dv_job *job,
- struct framebuffer_data *framebuffer,
- struct v3dv_image *dst,
- struct v3dv_image *src,
- uint32_t layer_offset,
- const VkImageResolve2KHR *region)
-{
- struct v3dv_cl *cl = &job->indirect;
- v3dv_cl_ensure_space(cl, 200, 1);
- v3dv_return_if_oom(NULL, job);
-
- struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
-
- cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
-
- assert((src->type != VK_IMAGE_TYPE_3D &&
- layer_offset < region->srcSubresource.layerCount) ||
- layer_offset < src->extent.depth);
-
- const uint32_t src_layer = src->type != VK_IMAGE_TYPE_3D ?
- region->srcSubresource.baseArrayLayer + layer_offset :
- region->srcOffset.z + layer_offset;
-
- emit_image_load(job->device, cl, framebuffer, src,
- region->srcSubresource.aspectMask,
- src_layer,
- region->srcSubresource.mipLevel,
- false, false);
-
- cl_emit(cl, END_OF_LOADS, end);
-
- cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
-
- assert((dst->type != VK_IMAGE_TYPE_3D &&
- layer_offset < region->dstSubresource.layerCount) ||
- layer_offset < dst->extent.depth);
-
- const uint32_t dst_layer = dst->type != VK_IMAGE_TYPE_3D ?
- region->dstSubresource.baseArrayLayer + layer_offset :
- region->dstOffset.z + layer_offset;
-
- emit_image_store(job->device, cl, framebuffer, dst,
- region->dstSubresource.aspectMask,
- dst_layer,
- region->dstSubresource.mipLevel,
- false, false, true);
-
- cl_emit(cl, END_OF_TILE_MARKER, end);
-
- cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
-
- cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
- branch.start = tile_list_start;
- branch.end = v3dv_cl_get_address(cl);
- }
-}
-
-static void
-emit_resolve_image_layer(struct v3dv_job *job,
- struct v3dv_image *dst,
- struct v3dv_image *src,
- struct framebuffer_data *framebuffer,
- uint32_t layer,
- const VkImageResolve2KHR *region)
-{
- emit_frame_setup(job, layer, NULL);
- emit_resolve_image_layer_per_tile_list(job, framebuffer,
- dst, src, layer, region);
- emit_supertile_coordinates(job, framebuffer);
-}
-
-static void
-emit_resolve_image_rcl(struct v3dv_job *job,
- struct v3dv_image *dst,
- struct v3dv_image *src,
- struct framebuffer_data *framebuffer,
- const VkImageResolve2KHR *region)
-{
- struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
- v3dv_return_if_oom(NULL, job);
-
- for (int layer = 0; layer < job->frame_tiling.layers; layer++)
- emit_resolve_image_layer(job, dst, src, framebuffer, layer, region);
- cl_emit(rcl, END_OF_RENDERING, end);
-}
-
static bool
resolve_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_image *dst,
@@ -5687,18 +4280,18 @@ resolve_image_tlb(struct v3dv_cmd_buffer *cmd_buffer,
const uint32_t height = DIV_ROUND_UP(region->extent.height, block_h);
uint32_t internal_type, internal_bpp;
- get_internal_type_bpp_for_image_aspects(cmd_buffer->device, fb_format,
- region->srcSubresource.aspectMask,
- &internal_type, &internal_bpp);
+ v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_image_aspects)
+ (fb_format, region->srcSubresource.aspectMask,
+ &internal_type, &internal_bpp);
v3dv_job_start_frame(job, width, height, num_layers, 1, internal_bpp, true);
struct framebuffer_data framebuffer;
- setup_framebuffer_data(job->device, &framebuffer, fb_format,
- internal_type, &job->frame_tiling);
+ v3dv_X(job->device, setup_framebuffer_data)(&framebuffer, fb_format, internal_type,
+ &job->frame_tiling);
- v3dv_job_emit_binning_flush(job);
- emit_resolve_image_rcl(job, dst, src, &framebuffer, region);
+ v3dv_X(job->device, job_emit_binning_flush)(job);
+ v3dv_X(job->device, job_emit_resolve_image_rcl)(job, dst, src, &framebuffer, region);
v3dv_cmd_buffer_finish_job(cmd_buffer);
return true;
diff --git a/src/broadcom/vulkan/v3dv_meta_copy.h b/src/broadcom/vulkan/v3dv_meta_copy.h
new file mode 100644
index 00000000000..ae2bf734f70
--- /dev/null
+++ b/src/broadcom/vulkan/v3dv_meta_copy.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright © 2021 Raspberry Pi
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef V3DV_META_COPY_H
+#define V3DV_META_COPY_H
+
+/* Disable level 0 write, just write following mipmaps */
+#define V3D_TFU_IOA_DIMTW (1 << 0)
+#define V3D_TFU_IOA_FORMAT_SHIFT 3
+#define V3D_TFU_IOA_FORMAT_LINEARTILE 3
+#define V3D_TFU_IOA_FORMAT_UBLINEAR_1_COLUMN 4
+#define V3D_TFU_IOA_FORMAT_UBLINEAR_2_COLUMN 5
+#define V3D_TFU_IOA_FORMAT_UIF_NO_XOR 6
+#define V3D_TFU_IOA_FORMAT_UIF_XOR 7
+
+#define V3D_TFU_ICFG_NUMMM_SHIFT 5
+#define V3D_TFU_ICFG_TTYPE_SHIFT 9
+
+#define V3D_TFU_ICFG_OPAD_SHIFT 22
+
+#define V3D_TFU_ICFG_FORMAT_SHIFT 18
+#define V3D_TFU_ICFG_FORMAT_RASTER 0
+#define V3D_TFU_ICFG_FORMAT_SAND_128 1
+#define V3D_TFU_ICFG_FORMAT_SAND_256 2
+#define V3D_TFU_ICFG_FORMAT_LINEARTILE 11
+#define V3D_TFU_ICFG_FORMAT_UBLINEAR_1_COLUMN 12
+#define V3D_TFU_ICFG_FORMAT_UBLINEAR_2_COLUMN 13
+#define V3D_TFU_ICFG_FORMAT_UIF_NO_XOR 14
+#define V3D_TFU_ICFG_FORMAT_UIF_XOR 15
+
+/**
+ * Copy operations implemented in this file don't operate on a framebuffer
+ * object provided by the user, however, since most use the TLB for this,
+ * we still need to have some representation of the framebuffer. For the most
+ * part, the job's frame tiling information is enough for this, however we
+ * still need additional information such us the internal type of our single
+ * render target, so we use this auxiliary struct to pass that information
+ * around.
+ */
+struct framebuffer_data {
+ /* The internal type of the single render target */
+ uint32_t internal_type;
+
+ /* Supertile coverage */
+ uint32_t min_x_supertile;
+ uint32_t min_y_supertile;
+ uint32_t max_x_supertile;
+ uint32_t max_y_supertile;
+
+ /* Format info */
+ VkFormat vk_format;
+ const struct v3dv_format *format;
+ uint8_t internal_depth_type;
+};
+
+#endif
diff --git a/src/broadcom/vulkan/v3dv_private.h b/src/broadcom/vulkan/v3dv_private.h
index 9eda1d71e9b..1a9e249a7fa 100644
--- a/src/broadcom/vulkan/v3dv_private.h
+++ b/src/broadcom/vulkan/v3dv_private.h
@@ -695,11 +695,6 @@ struct v3dv_cmd_buffer_attachment_state {
union v3dv_clear_value clear_value;
};
-void v3dv_get_hw_clear_color(const VkClearColorValue *color,
- uint32_t internal_type,
- uint32_t internal_size,
- uint32_t *hw_color);
-
struct v3dv_viewport_state {
uint32_t count;
VkViewport viewports[MAX_VIEWPORTS];
@@ -988,7 +983,6 @@ void v3dv_job_destroy(struct v3dv_job *job);
void v3dv_job_add_bo(struct v3dv_job *job, struct v3dv_bo *bo);
void v3dv_job_add_bo_unchecked(struct v3dv_job *job, struct v3dv_bo *bo);
-void v3dv_job_emit_binning_flush(struct v3dv_job *job);
void v3dv_job_start_frame(struct v3dv_job *job,
uint32_t width,
uint32_t height,
@@ -996,11 +990,35 @@ void v3dv_job_start_frame(struct v3dv_job *job,
uint32_t render_target_count,
uint8_t max_internal_bpp,
bool msaa);
+
+struct v3dv_job *
+v3dv_job_clone_in_cmd_buffer(struct v3dv_job *job,
+ struct v3dv_cmd_buffer *cmd_buffer);
+
struct v3dv_job *v3dv_cmd_buffer_create_cpu_job(struct v3dv_device *device,
enum v3dv_job_type type,
struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx);
+void
+v3dv_cmd_buffer_ensure_array_state(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t slot_size,
+ uint32_t used_count,
+ uint32_t *alloc_count,
+ void **ptr);
+
+void v3dv_cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer);
+
+/* FIXME: only used on v3dv_cmd_buffer and v3dvx_cmd_buffer, perhaps move to a
+ * cmd_buffer specific header?
+ */
+struct v3dv_draw_info {
+ uint32_t vertex_count;
+ uint32_t instance_count;
+ uint32_t first_vertex;
+ uint32_t first_instance;
+};
+
struct v3dv_vertex_binding {
struct v3dv_buffer *buffer;
VkDeviceSize offset;
@@ -1295,12 +1313,6 @@ void v3dv_cmd_buffer_meta_state_pop(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t dirty_dynamic_state,
bool needs_subpass_resume);
-void v3dv_render_pass_setup_render_target(struct v3dv_cmd_buffer *cmd_buffer,
- int rt,
- uint32_t *rt_bpp,
- uint32_t *rt_type,
- uint32_t *rt_clamp);
-
void v3dv_cmd_buffer_reset_queries(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_query_pool *pool,
uint32_t first,
@@ -1828,52 +1840,6 @@ v3dv_cmd_buffer_get_descriptor_state(struct v3dv_cmd_buffer *cmd_buffer,
const nir_shader_compiler_options *v3dv_pipeline_get_nir_options(void);
-static inline uint32_t
-v3dv_zs_buffer_from_aspect_bits(VkImageAspectFlags aspects)
-{
- const VkImageAspectFlags zs_aspects =
- VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
- const VkImageAspectFlags filtered_aspects = aspects & zs_aspects;
-
- if (filtered_aspects == zs_aspects)
- return ZSTENCIL;
- else if (filtered_aspects == VK_IMAGE_ASPECT_DEPTH_BIT)
- return Z;
- else if (filtered_aspects == VK_IMAGE_ASPECT_STENCIL_BIT)
- return STENCIL;
- else
- return NONE;
-}
-
-static inline uint32_t
-v3dv_zs_buffer(bool depth, bool stencil)
-{
- if (depth && stencil)
- return ZSTENCIL;
- else if (depth)
- return Z;
- else if (stencil)
- return STENCIL;
- return NONE;
-}
-
-static inline uint8_t
-v3dv_get_internal_depth_type(VkFormat format)
-{
- switch (format) {
- case VK_FORMAT_D16_UNORM:
- return V3D_INTERNAL_TYPE_DEPTH_16;
- case VK_FORMAT_D32_SFLOAT:
- return V3D_INTERNAL_TYPE_DEPTH_32F;
- case VK_FORMAT_X8_D24_UNORM_PACK32:
- case VK_FORMAT_D24_UNORM_S8_UINT:
- return V3D_INTERNAL_TYPE_DEPTH_24;
- default:
- unreachable("Invalid depth format");
- break;
- }
-}
-
uint32_t v3dv_physical_device_vendor_id(struct v3dv_physical_device *dev);
uint32_t v3dv_physical_device_device_id(struct v3dv_physical_device *dev);
diff --git a/src/broadcom/vulkan/v3dvx_cmd_buffer.c b/src/broadcom/vulkan/v3dvx_cmd_buffer.c
new file mode 100644
index 00000000000..3f71ad72cbf
--- /dev/null
+++ b/src/broadcom/vulkan/v3dvx_cmd_buffer.c
@@ -0,0 +1,2081 @@
+/*
+ * Copyright © 2021 Raspberry Pi
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "v3dv_private.h"
+#include "broadcom/common/v3d_macros.h"
+#include "broadcom/cle/v3dx_pack.h"
+#include "broadcom/compiler/v3d_compiler.h"
+
+#include "util/half_float.h"
+#include "vulkan/util/vk_format.h"
+#include "util/u_pack_color.h"
+
+#include "vk_format_info.h"
+
+void
+v3dX(job_emit_binning_flush)(struct v3dv_job *job)
+{
+ assert(job);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(FLUSH));
+ v3dv_return_if_oom(NULL, job);
+
+ cl_emit(&job->bcl, FLUSH, flush);
+}
+
+void
+v3dX(job_emit_binning_prolog)(struct v3dv_job *job,
+ const struct v3dv_frame_tiling *tiling,
+ uint32_t layers)
+{
+ /* This must go before the binning mode configuration. It is
+ * required for layered framebuffers to work.
+ */
+ cl_emit(&job->bcl, NUMBER_OF_LAYERS, config) {
+ config.number_of_layers = layers;
+ }
+
+ cl_emit(&job->bcl, TILE_BINNING_MODE_CFG, config) {
+ config.width_in_pixels = tiling->width;
+ config.height_in_pixels = tiling->height;
+ config.number_of_render_targets = MAX2(tiling->render_target_count, 1);
+ config.multisample_mode_4x = tiling->msaa;
+ config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
+ }
+
+ /* There's definitely nothing in the VCD cache we want. */
+ cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
+
+ /* "Binning mode lists must have a Start Tile Binning item (6) after
+ * any prefix state data before the binning list proper starts."
+ */
+ cl_emit(&job->bcl, START_TILE_BINNING, bin);
+}
+
+void
+v3dX(cmd_buffer_end_render_pass_secondary)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ assert(cmd_buffer->state.job);
+ v3dv_cl_ensure_space_with_branch(&cmd_buffer->state.job->bcl,
+ cl_packet_length(RETURN_FROM_SUB_LIST));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+ cl_emit(&cmd_buffer->state.job->bcl, RETURN_FROM_SUB_LIST, ret);
+}
+
+void
+v3dX(job_emit_clip_window)(struct v3dv_job *job, const VkRect2D *rect)
+{
+ assert(job);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CLIP_WINDOW));
+ v3dv_return_if_oom(NULL, job);
+
+ cl_emit(&job->bcl, CLIP_WINDOW, clip) {
+ clip.clip_window_left_pixel_coordinate = rect->offset.x;
+ clip.clip_window_bottom_pixel_coordinate = rect->offset.y;
+ clip.clip_window_width_in_pixels = rect->extent.width;
+ clip.clip_window_height_in_pixels = rect->extent.height;
+ }
+}
+
+static void
+cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_cl *cl,
+ struct v3dv_image_view *iview,
+ uint32_t layer,
+ uint32_t buffer)
+{
+ const struct v3dv_image *image = iview->image;
+ const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
+ uint32_t layer_offset = v3dv_layer_offset(image,
+ iview->base_level,
+ iview->first_layer + layer);
+
+ cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
+ load.buffer_to_load = buffer;
+ load.address = v3dv_cl_address(image->mem->bo, layer_offset);
+
+ load.input_image_format = iview->format->rt_type;
+ load.r_b_swap = iview->swap_rb;
+ load.memory_format = slice->tiling;
+
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ load.height_in_ub_or_stride =
+ slice->padded_height_of_output_image_in_uif_blocks;
+ } else if (slice->tiling == V3D_TILING_RASTER) {
+ load.height_in_ub_or_stride = slice->stride;
+ }
+
+ if (image->samples > VK_SAMPLE_COUNT_1_BIT)
+ load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
+ else
+ load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+static bool
+check_needs_load(const struct v3dv_cmd_buffer_state *state,
+ VkImageAspectFlags aspect,
+ uint32_t att_first_subpass_idx,
+ VkAttachmentLoadOp load_op)
+{
+ /* We call this with image->aspects & aspect, so 0 means the aspect we are
+ * testing does not exist in the image.
+ */
+ if (!aspect)
+ return false;
+
+ /* Attachment load operations apply on the first subpass that uses the
+ * attachment, otherwise we always need to load.
+ */
+ if (state->job->first_subpass > att_first_subpass_idx)
+ return true;
+
+ /* If the job is continuing a subpass started in another job, we always
+ * need to load.
+ */
+ if (state->job->is_subpass_continue)
+ return true;
+
+ /* If the area is not aligned to tile boundaries, we always need to load */
+ if (!state->tile_aligned_render_area)
+ return true;
+
+ /* The attachment load operations must be LOAD */
+ return load_op == VK_ATTACHMENT_LOAD_OP_LOAD;
+}
+
+static inline uint32_t
+v3dv_zs_buffer(bool depth, bool stencil)
+{
+ if (depth && stencil)
+ return ZSTENCIL;
+ else if (depth)
+ return Z;
+ else if (stencil)
+ return STENCIL;
+ return NONE;
+}
+
+static void
+cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_cl *cl,
+ uint32_t layer)
+{
+ const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ const struct v3dv_framebuffer *framebuffer = state->framebuffer;
+ const struct v3dv_render_pass *pass = state->pass;
+ const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
+
+ for (uint32_t i = 0; i < subpass->color_count; i++) {
+ uint32_t attachment_idx = subpass->color_attachments[i].attachment;
+
+ if (attachment_idx == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ const struct v3dv_render_pass_attachment *attachment =
+ &state->pass->attachments[attachment_idx];
+
+ /* According to the Vulkan spec:
+ *
+ * "The load operation for each sample in an attachment happens before
+ * any recorded command which accesses the sample in the first subpass
+ * where the attachment is used."
+ *
+ * If the load operation is CLEAR, we must only clear once on the first
+ * subpass that uses the attachment (and in that case we don't LOAD).
+ * After that, we always want to load so we don't lose any rendering done
+ * by a previous subpass to the same attachment. We also want to load
+ * if the current job is continuing subpass work started by a previous
+ * job, for the same reason.
+ *
+ * If the render area is not aligned to tile boundaries then we have
+ * tiles which are partially covered by it. In this case, we need to
+ * load the tiles so we can preserve the pixels that are outside the
+ * render area for any such tiles.
+ */
+ bool needs_load = check_needs_load(state,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ attachment->first_subpass,
+ attachment->desc.loadOp);
+ if (needs_load) {
+ struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
+ cmd_buffer_render_pass_emit_load(cmd_buffer, cl, iview,
+ layer, RENDER_TARGET_0 + i);
+ }
+ }
+
+ uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
+ if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
+ const struct v3dv_render_pass_attachment *ds_attachment =
+ &state->pass->attachments[ds_attachment_idx];
+
+ const VkImageAspectFlags ds_aspects =
+ vk_format_aspects(ds_attachment->desc.format);
+
+ const bool needs_depth_load =
+ check_needs_load(state,
+ ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.loadOp);
+
+ const bool needs_stencil_load =
+ check_needs_load(state,
+ ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.stencilLoadOp);
+
+ if (needs_depth_load || needs_stencil_load) {
+ struct v3dv_image_view *iview =
+ framebuffer->attachments[ds_attachment_idx];
+ /* From the Vulkan spec:
+ *
+ * "When an image view of a depth/stencil image is used as a
+ * depth/stencil framebuffer attachment, the aspectMask is ignored
+ * and both depth and stencil image subresources are used."
+ *
+ * So we ignore the aspects from the subresource range of the image
+ * view for the depth/stencil attachment, but we still need to restrict
+ * the to aspects compatible with the render pass and the image.
+ */
+ const uint32_t zs_buffer =
+ v3dv_zs_buffer(needs_depth_load, needs_stencil_load);
+ cmd_buffer_render_pass_emit_load(cmd_buffer, cl,
+ iview, layer, zs_buffer);
+ }
+ }
+
+ cl_emit(cl, END_OF_LOADS, end);
+}
+
+static void
+cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_cl *cl,
+ uint32_t attachment_idx,
+ uint32_t layer,
+ uint32_t buffer,
+ bool clear,
+ bool is_multisample_resolve)
+{
+ const struct v3dv_image_view *iview =
+ cmd_buffer->state.framebuffer->attachments[attachment_idx];
+ const struct v3dv_image *image = iview->image;
+ const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
+ uint32_t layer_offset = v3dv_layer_offset(image,
+ iview->base_level,
+ iview->first_layer + layer);
+
+ cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = buffer;
+ store.address = v3dv_cl_address(image->mem->bo, layer_offset);
+ store.clear_buffer_being_stored = clear;
+
+ store.output_image_format = iview->format->rt_type;
+ store.r_b_swap = iview->swap_rb;
+ store.memory_format = slice->tiling;
+
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ store.height_in_ub_or_stride =
+ slice->padded_height_of_output_image_in_uif_blocks;
+ } else if (slice->tiling == V3D_TILING_RASTER) {
+ store.height_in_ub_or_stride = slice->stride;
+ }
+
+ if (image->samples > VK_SAMPLE_COUNT_1_BIT)
+ store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
+ else if (is_multisample_resolve)
+ store.decimate_mode = V3D_DECIMATE_MODE_4X;
+ else
+ store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+static bool
+check_needs_clear(const struct v3dv_cmd_buffer_state *state,
+ VkImageAspectFlags aspect,
+ uint32_t att_first_subpass_idx,
+ VkAttachmentLoadOp load_op,
+ bool do_clear_with_draw)
+{
+ /* We call this with image->aspects & aspect, so 0 means the aspect we are
+ * testing does not exist in the image.
+ */
+ if (!aspect)
+ return false;
+
+ /* If the aspect needs to be cleared with a draw call then we won't emit
+ * the clear here.
+ */
+ if (do_clear_with_draw)
+ return false;
+
+ /* If this is resuming a subpass started with another job, then attachment
+ * load operations don't apply.
+ */
+ if (state->job->is_subpass_continue)
+ return false;
+
+ /* If the render area is not aligned to tile boudaries we can't use the
+ * TLB for a clear.
+ */
+ if (!state->tile_aligned_render_area)
+ return false;
+
+ /* If this job is running in a subpass other than the first subpass in
+ * which this attachment is used then attachment load operations don't apply.
+ */
+ if (state->job->first_subpass != att_first_subpass_idx)
+ return false;
+
+ /* The attachment load operation must be CLEAR */
+ return load_op == VK_ATTACHMENT_LOAD_OP_CLEAR;
+}
+
+static bool
+check_needs_store(const struct v3dv_cmd_buffer_state *state,
+ VkImageAspectFlags aspect,
+ uint32_t att_last_subpass_idx,
+ VkAttachmentStoreOp store_op)
+{
+ /* We call this with image->aspects & aspect, so 0 means the aspect we are
+ * testing does not exist in the image.
+ */
+ if (!aspect)
+ return false;
+
+ /* Attachment store operations only apply on the last subpass where the
+ * attachment is used, in other subpasses we always need to store.
+ */
+ if (state->subpass_idx < att_last_subpass_idx)
+ return true;
+
+ /* Attachment store operations only apply on the last job we emit on the the
+ * last subpass where the attachment is used, otherwise we always need to
+ * store.
+ */
+ if (!state->job->is_subpass_finish)
+ return true;
+
+ /* The attachment store operation must be STORE */
+ return store_op == VK_ATTACHMENT_STORE_OP_STORE;
+}
+
+static void
+cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_cl *cl,
+ uint32_t layer)
+{
+ struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ const struct v3dv_subpass *subpass =
+ &state->pass->subpasses[state->subpass_idx];
+
+ bool has_stores = false;
+ bool use_global_zs_clear = false;
+ bool use_global_rt_clear = false;
+
+ /* FIXME: separate stencil */
+ uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
+ if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
+ const struct v3dv_render_pass_attachment *ds_attachment =
+ &state->pass->attachments[ds_attachment_idx];
+
+ assert(state->job->first_subpass >= ds_attachment->first_subpass);
+ assert(state->subpass_idx >= ds_attachment->first_subpass);
+ assert(state->subpass_idx <= ds_attachment->last_subpass);
+
+ /* From the Vulkan spec, VkImageSubresourceRange:
+ *
+ * "When an image view of a depth/stencil image is used as a
+ * depth/stencil framebuffer attachment, the aspectMask is ignored
+ * and both depth and stencil image subresources are used."
+ *
+ * So we ignore the aspects from the subresource range of the image
+ * view for the depth/stencil attachment, but we still need to restrict
+ * the to aspects compatible with the render pass and the image.
+ */
+ const VkImageAspectFlags aspects =
+ vk_format_aspects(ds_attachment->desc.format);
+
+ /* Only clear once on the first subpass that uses the attachment */
+ bool needs_depth_clear =
+ check_needs_clear(state,
+ aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.loadOp,
+ subpass->do_depth_clear_with_draw);
+
+ bool needs_stencil_clear =
+ check_needs_clear(state,
+ aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.stencilLoadOp,
+ subpass->do_stencil_clear_with_draw);
+
+ /* Skip the last store if it is not required */
+ bool needs_depth_store =
+ check_needs_store(state,
+ aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
+ ds_attachment->last_subpass,
+ ds_attachment->desc.storeOp);
+
+ bool needs_stencil_store =
+ check_needs_store(state,
+ aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
+ ds_attachment->last_subpass,
+ ds_attachment->desc.stencilStoreOp);
+
+ /* GFXH-1689: The per-buffer store command's clear buffer bit is broken
+ * for depth/stencil.
+ *
+ * There used to be some confusion regarding the Clear Tile Buffers
+ * Z/S bit also being broken, but we confirmed with Broadcom that this
+ * is not the case, it was just that some other hardware bugs (that we
+ * need to work around, such as GFXH-1461) could cause this bit to behave
+ * incorrectly.
+ *
+ * There used to be another issue where the RTs bit in the Clear Tile
+ * Buffers packet also cleared Z/S, but Broadcom confirmed this is
+ * fixed since V3D 4.1.
+ *
+ * So if we have to emit a clear of depth or stencil we don't use
+ * the per-buffer store clear bit, even if we need to store the buffers,
+ * instead we always have to use the Clear Tile Buffers Z/S bit.
+ * If we have configured the job to do early Z/S clearing, then we
+ * don't want to emit any Clear Tile Buffers command at all here.
+ *
+ * Note that GFXH-1689 is not reproduced in the simulator, where
+ * using the clear buffer bit in depth/stencil stores works fine.
+ */
+ use_global_zs_clear = !state->job->early_zs_clear &&
+ (needs_depth_clear || needs_stencil_clear);
+ if (needs_depth_store || needs_stencil_store) {
+ const uint32_t zs_buffer =
+ v3dv_zs_buffer(needs_depth_store, needs_stencil_store);
+ cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
+ ds_attachment_idx, layer,
+ zs_buffer, false, false);
+ has_stores = true;
+ }
+ }
+
+ for (uint32_t i = 0; i < subpass->color_count; i++) {
+ uint32_t attachment_idx = subpass->color_attachments[i].attachment;
+
+ if (attachment_idx == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ const struct v3dv_render_pass_attachment *attachment =
+ &state->pass->attachments[attachment_idx];
+
+ assert(state->job->first_subpass >= attachment->first_subpass);
+ assert(state->subpass_idx >= attachment->first_subpass);
+ assert(state->subpass_idx <= attachment->last_subpass);
+
+ /* Only clear once on the first subpass that uses the attachment */
+ bool needs_clear =
+ check_needs_clear(state,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ attachment->first_subpass,
+ attachment->desc.loadOp,
+ false);
+
+ /* Skip the last store if it is not required */
+ bool needs_store =
+ check_needs_store(state,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ attachment->last_subpass,
+ attachment->desc.storeOp);
+
+ /* If we need to resolve this attachment emit that store first. Notice
+ * that we must not request a tile buffer clear here in that case, since
+ * that would clear the tile buffer before we get to emit the actual
+ * color attachment store below, since the clear happens after the
+ * store is completed.
+ *
+ * If the attachment doesn't support TLB resolves then we will have to
+ * fallback to doing the resolve in a shader separately after this
+ * job, so we will need to store the multisampled sttachment even if that
+ * wansn't requested by the client.
+ */
+ const bool needs_resolve =
+ subpass->resolve_attachments &&
+ subpass->resolve_attachments[i].attachment != VK_ATTACHMENT_UNUSED;
+ if (needs_resolve && attachment->use_tlb_resolve) {
+ const uint32_t resolve_attachment_idx =
+ subpass->resolve_attachments[i].attachment;
+ cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
+ resolve_attachment_idx, layer,
+ RENDER_TARGET_0 + i,
+ false, true);
+ has_stores = true;
+ } else if (needs_resolve) {
+ needs_store = true;
+ }
+
+ /* Emit the color attachment store if needed */
+ if (needs_store) {
+ cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
+ attachment_idx, layer,
+ RENDER_TARGET_0 + i,
+ needs_clear && !use_global_rt_clear,
+ false);
+ has_stores = true;
+ } else if (needs_clear) {
+ use_global_rt_clear = true;
+ }
+ }
+
+ /* We always need to emit at least one dummy store */
+ if (!has_stores) {
+ cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = NONE;
+ }
+ }
+
+ /* If we have any depth/stencil clears we can't use the per-buffer clear
+ * bit and instead we have to emit a single clear of all tile buffers.
+ */
+ if (use_global_zs_clear || use_global_rt_clear) {
+ cl_emit(cl, CLEAR_TILE_BUFFERS, clear) {
+ clear.clear_z_stencil_buffer = use_global_zs_clear;
+ clear.clear_all_render_targets = use_global_rt_clear;
+ }
+ }
+}
+
+static void
+cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t layer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ /* Emit the generic list in our indirect state -- the rcl will just
+ * have pointers into it.
+ */
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ cmd_buffer_render_pass_emit_loads(cmd_buffer, cl, layer);
+
+ /* The binner starts out writing tiles assuming that the initial mode
+ * is triangles, so make sure that's the case.
+ */
+ cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
+ fmt.primitive_type = LIST_TRIANGLES;
+ }
+
+ /* PTB assumes that value to be 0, but hw will not set it. */
+ cl_emit(cl, SET_INSTANCEID, set) {
+ set.instance_id = 0;
+ }
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ cmd_buffer_render_pass_emit_stores(cmd_buffer, cl, layer);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t layer)
+{
+ const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ struct v3dv_cl *rcl = &job->rcl;
+
+ /* If doing multicore binning, we would need to initialize each
+ * core's tile list here.
+ */
+ const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
+ const uint32_t tile_alloc_offset =
+ 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
+ cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
+ list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
+ }
+
+ cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
+ config.number_of_bin_tile_lists = 1;
+ config.total_frame_width_in_tiles = tiling->draw_tiles_x;
+ config.total_frame_height_in_tiles = tiling->draw_tiles_y;
+
+ config.supertile_width_in_tiles = tiling->supertile_width;
+ config.supertile_height_in_tiles = tiling->supertile_height;
+
+ config.total_frame_width_in_supertiles =
+ tiling->frame_width_in_supertiles;
+ config.total_frame_height_in_supertiles =
+ tiling->frame_height_in_supertiles;
+ }
+
+ /* Start by clearing the tile buffer. */
+ cl_emit(rcl, TILE_COORDINATES, coords) {
+ coords.tile_column_number = 0;
+ coords.tile_row_number = 0;
+ }
+
+ /* Emit an initial clear of the tile buffers. This is necessary
+ * for any buffers that should be cleared (since clearing
+ * normally happens at the *end* of the generic tile list), but
+ * it's also nice to clear everything so the first tile doesn't
+ * inherit any contents from some previous frame.
+ *
+ * Also, implement the GFXH-1742 workaround. There's a race in
+ * the HW between the RCL updating the TLB's internal type/size
+ * and the spawning of the QPU instances using the TLB's current
+ * internal type/size. To make sure the QPUs get the right
+ * state, we need 1 dummy store in between internal type/size
+ * changes on V3D 3.x, and 2 dummy stores on 4.x.
+ */
+ for (int i = 0; i < 2; i++) {
+ if (i > 0)
+ cl_emit(rcl, TILE_COORDINATES, coords);
+ cl_emit(rcl, END_OF_LOADS, end);
+ cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = NONE;
+ }
+ if (i == 0 && cmd_buffer->state.tile_aligned_render_area) {
+ cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
+ clear.clear_z_stencil_buffer = !job->early_zs_clear;
+ clear.clear_all_render_targets = true;
+ }
+ }
+ cl_emit(rcl, END_OF_TILE_MARKER, end);
+ }
+
+ cl_emit(rcl, FLUSH_VCD_CACHE, flush);
+
+ cmd_buffer_render_pass_emit_per_tile_rcl(cmd_buffer, layer);
+
+ uint32_t supertile_w_in_pixels =
+ tiling->tile_width * tiling->supertile_width;
+ uint32_t supertile_h_in_pixels =
+ tiling->tile_height * tiling->supertile_height;
+ const uint32_t min_x_supertile =
+ state->render_area.offset.x / supertile_w_in_pixels;
+ const uint32_t min_y_supertile =
+ state->render_area.offset.y / supertile_h_in_pixels;
+
+ uint32_t max_render_x = state->render_area.offset.x;
+ if (state->render_area.extent.width > 0)
+ max_render_x += state->render_area.extent.width - 1;
+ uint32_t max_render_y = state->render_area.offset.y;
+ if (state->render_area.extent.height > 0)
+ max_render_y += state->render_area.extent.height - 1;
+ const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
+ const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
+
+ for (int y = min_y_supertile; y <= max_y_supertile; y++) {
+ for (int x = min_x_supertile; x <= max_x_supertile; x++) {
+ cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
+ coords.column_number_in_supertiles = x;
+ coords.row_number_in_supertiles = y;
+ }
+ }
+ }
+}
+
+static void
+set_rcl_early_z_config(struct v3dv_job *job,
+ bool *early_z_disable,
+ uint32_t *early_z_test_and_update_direction)
+{
+ /* If this is true then we have not emitted any draw calls in this job
+ * and we don't get any benefits form early Z.
+ */
+ if (!job->decided_global_ez_enable) {
+ assert(job->draw_count == 0);
+ *early_z_disable = true;
+ return;
+ }
+
+ switch (job->first_ez_state) {
+ case V3D_EZ_UNDECIDED:
+ case V3D_EZ_LT_LE:
+ *early_z_disable = false;
+ *early_z_test_and_update_direction = EARLY_Z_DIRECTION_LT_LE;
+ break;
+ case V3D_EZ_GT_GE:
+ *early_z_disable = false;
+ *early_z_test_and_update_direction = EARLY_Z_DIRECTION_GT_GE;
+ break;
+ case V3D_EZ_DISABLED:
+ *early_z_disable = true;
+ break;
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ const struct v3dv_framebuffer *framebuffer = state->framebuffer;
+
+ /* We can't emit the RCL until we have a framebuffer, which we may not have
+ * if we are recording a secondary command buffer. In that case, we will
+ * have to wait until vkCmdExecuteCommands is called from a primary command
+ * buffer.
+ */
+ if (!framebuffer) {
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ return;
+ }
+
+ const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
+
+ const uint32_t fb_layers = framebuffer->layers;
+ v3dv_cl_ensure_space_with_branch(&job->rcl, 200 +
+ MAX2(fb_layers, 1) * 256 *
+ cl_packet_length(SUPERTILE_COORDINATES));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ assert(state->subpass_idx < state->pass->subpass_count);
+ const struct v3dv_render_pass *pass = state->pass;
+ const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
+ struct v3dv_cl *rcl = &job->rcl;
+
+ /* Comon config must be the first TILE_RENDERING_MODE_CFG and
+ * Z_STENCIL_CLEAR_VALUES must be last. The ones in between are optional
+ * updates to the previous HW state.
+ */
+ bool do_early_zs_clear = false;
+ const uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
+ config.image_width_pixels = framebuffer->width;
+ config.image_height_pixels = framebuffer->height;
+ config.number_of_render_targets = MAX2(subpass->color_count, 1);
+ config.multisample_mode_4x = tiling->msaa;
+ config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
+
+ if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
+ const struct v3dv_image_view *iview =
+ framebuffer->attachments[ds_attachment_idx];
+ config.internal_depth_type = iview->internal_type;
+
+ set_rcl_early_z_config(job,
+ &config.early_z_disable,
+ &config.early_z_test_and_update_direction);
+
+ /* Early-Z/S clear can be enabled if the job is clearing and not
+ * storing (or loading) depth. If a stencil aspect is also present
+ * we have the same requirements for it, however, in this case we
+ * can accept stencil loadOp DONT_CARE as well, so instead of
+ * checking that stencil is cleared we check that is not loaded.
+ *
+ * Early-Z/S clearing is independent of Early Z/S testing, so it is
+ * possible to enable one but not the other so long as their
+ * respective requirements are met.
+ */
+ struct v3dv_render_pass_attachment *ds_attachment =
+ &pass->attachments[ds_attachment_idx];
+
+ const VkImageAspectFlags ds_aspects =
+ vk_format_aspects(ds_attachment->desc.format);
+
+ bool needs_depth_clear =
+ check_needs_clear(state,
+ ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.loadOp,
+ subpass->do_depth_clear_with_draw);
+
+ bool needs_depth_store =
+ check_needs_store(state,
+ ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
+ ds_attachment->last_subpass,
+ ds_attachment->desc.storeOp);
+
+ do_early_zs_clear = needs_depth_clear && !needs_depth_store;
+ if (do_early_zs_clear &&
+ vk_format_has_stencil(ds_attachment->desc.format)) {
+ bool needs_stencil_load =
+ check_needs_load(state,
+ ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.stencilLoadOp);
+
+ bool needs_stencil_store =
+ check_needs_store(state,
+ ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
+ ds_attachment->last_subpass,
+ ds_attachment->desc.stencilStoreOp);
+
+ do_early_zs_clear = !needs_stencil_load && !needs_stencil_store;
+ }
+
+ config.early_depth_stencil_clear = do_early_zs_clear;
+ } else {
+ config.early_z_disable = true;
+ }
+ }
+
+ /* If we enabled early Z/S clear, then we can't emit any "Clear Tile Buffers"
+ * commands with the Z/S bit set, so keep track of whether we enabled this
+ * in the job so we can skip these later.
+ */
+ job->early_zs_clear = do_early_zs_clear;
+
+ for (uint32_t i = 0; i < subpass->color_count; i++) {
+ uint32_t attachment_idx = subpass->color_attachments[i].attachment;
+ if (attachment_idx == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ struct v3dv_image_view *iview =
+ state->framebuffer->attachments[attachment_idx];
+
+ const struct v3dv_image *image = iview->image;
+ const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
+
+ const uint32_t *clear_color =
+ &state->attachments[attachment_idx].clear_value.color[0];
+
+ uint32_t clear_pad = 0;
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ int uif_block_height = v3d_utile_height(image->cpp) * 2;
+
+ uint32_t implicit_padded_height =
+ align(framebuffer->height, uif_block_height) / uif_block_height;
+
+ if (slice->padded_height_of_output_image_in_uif_blocks -
+ implicit_padded_height >= 15) {
+ clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
+ }
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
+ clear.clear_color_low_32_bits = clear_color[0];
+ clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
+ clear.render_target_number = i;
+ };
+
+ if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
+ clear.clear_color_mid_low_32_bits =
+ ((clear_color[1] >> 24) | (clear_color[2] << 8));
+ clear.clear_color_mid_high_24_bits =
+ ((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
+ clear.render_target_number = i;
+ };
+ }
+
+ if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
+ clear.uif_padded_height_in_uif_blocks = clear_pad;
+ clear.clear_color_high_16_bits = clear_color[3] >> 16;
+ clear.render_target_number = i;
+ };
+ }
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 0, &rt.render_target_0_internal_bpp,
+ &rt.render_target_0_internal_type, &rt.render_target_0_clamp);
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 1, &rt.render_target_1_internal_bpp,
+ &rt.render_target_1_internal_type, &rt.render_target_1_clamp);
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 2, &rt.render_target_2_internal_bpp,
+ &rt.render_target_2_internal_type, &rt.render_target_2_clamp);
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 3, &rt.render_target_3_internal_bpp,
+ &rt.render_target_3_internal_type, &rt.render_target_3_clamp);
+ }
+
+ /* Ends rendering mode config. */
+ if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
+ clear.z_clear_value =
+ state->attachments[ds_attachment_idx].clear_value.z;
+ clear.stencil_clear_value =
+ state->attachments[ds_attachment_idx].clear_value.s;
+ };
+ } else {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
+ clear.z_clear_value = 1.0f;
+ clear.stencil_clear_value = 0;
+ };
+ }
+
+ /* Always set initial block size before the first branch, which needs
+ * to match the value from binning mode config.
+ */
+ cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
+ init.use_auto_chained_tile_lists = true;
+ init.size_of_first_block_in_chained_tile_lists =
+ TILE_ALLOCATION_BLOCK_SIZE_64B;
+ }
+
+ for (int layer = 0; layer < MAX2(1, fb_layers); layer++)
+ cmd_buffer_emit_render_pass_layer_rcl(cmd_buffer, layer);
+
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+void
+v3dX(cmd_buffer_emit_viewport)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
+ /* FIXME: right now we only support one viewport. viewporst[0] would work
+ * now, would need to change if we allow multiple viewports
+ */
+ float *vptranslate = dynamic->viewport.translate[0];
+ float *vpscale = dynamic->viewport.scale[0];
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ const uint32_t required_cl_size =
+ cl_packet_length(CLIPPER_XY_SCALING) +
+ cl_packet_length(CLIPPER_Z_SCALE_AND_OFFSET) +
+ cl_packet_length(CLIPPER_Z_MIN_MAX_CLIPPING_PLANES) +
+ cl_packet_length(VIEWPORT_OFFSET);
+ v3dv_cl_ensure_space_with_branch(&job->bcl, required_cl_size);
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
+ clip.viewport_half_width_in_1_256th_of_pixel = vpscale[0] * 256.0f;
+ clip.viewport_half_height_in_1_256th_of_pixel = vpscale[1] * 256.0f;
+ }
+
+ cl_emit(&job->bcl, CLIPPER_Z_SCALE_AND_OFFSET, clip) {
+ clip.viewport_z_offset_zc_to_zs = vptranslate[2];
+ clip.viewport_z_scale_zc_to_zs = vpscale[2];
+ }
+ cl_emit(&job->bcl, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES, clip) {
+ /* Vulkan's Z NDC is [0..1], unlile OpenGL which is [-1, 1] */
+ float z1 = vptranslate[2];
+ float z2 = vptranslate[2] + vpscale[2];
+ clip.minimum_zw = MIN2(z1, z2);
+ clip.maximum_zw = MAX2(z1, z2);
+ }
+
+ cl_emit(&job->bcl, VIEWPORT_OFFSET, vp) {
+ vp.viewport_centre_x_coordinate = vptranslate[0];
+ vp.viewport_centre_y_coordinate = vptranslate[1];
+ }
+
+ cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEWPORT;
+}
+
+void
+v3dX(cmd_buffer_emit_stencil)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ struct v3dv_dynamic_state *dynamic_state = &cmd_buffer->state.dynamic;
+
+ const uint32_t dynamic_stencil_states = V3DV_DYNAMIC_STENCIL_COMPARE_MASK |
+ V3DV_DYNAMIC_STENCIL_WRITE_MASK |
+ V3DV_DYNAMIC_STENCIL_REFERENCE;
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ 2 * cl_packet_length(STENCIL_CFG));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ bool emitted_stencil = false;
+ for (uint32_t i = 0; i < 2; i++) {
+ if (pipeline->emit_stencil_cfg[i]) {
+ if (dynamic_state->mask & dynamic_stencil_states) {
+ cl_emit_with_prepacked(&job->bcl, STENCIL_CFG,
+ pipeline->stencil_cfg[i], config) {
+ if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK) {
+ config.stencil_test_mask =
+ i == 0 ? dynamic_state->stencil_compare_mask.front :
+ dynamic_state->stencil_compare_mask.back;
+ }
+ if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK) {
+ config.stencil_write_mask =
+ i == 0 ? dynamic_state->stencil_write_mask.front :
+ dynamic_state->stencil_write_mask.back;
+ }
+ if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_REFERENCE) {
+ config.stencil_ref_value =
+ i == 0 ? dynamic_state->stencil_reference.front :
+ dynamic_state->stencil_reference.back;
+ }
+ }
+ } else {
+ cl_emit_prepacked(&job->bcl, &pipeline->stencil_cfg[i]);
+ }
+
+ emitted_stencil = true;
+ }
+ }
+
+ if (emitted_stencil) {
+ const uint32_t dynamic_stencil_dirty_flags =
+ V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
+ V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
+ V3DV_CMD_DIRTY_STENCIL_REFERENCE;
+ cmd_buffer->state.dirty &= ~dynamic_stencil_dirty_flags;
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_depth_bias)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ assert(pipeline);
+
+ if (!pipeline->depth_bias.enabled)
+ return;
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(DEPTH_OFFSET));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
+ cl_emit(&job->bcl, DEPTH_OFFSET, bias) {
+ bias.depth_offset_factor = dynamic->depth_bias.slope_factor;
+ bias.depth_offset_units = dynamic->depth_bias.constant_factor;
+ if (pipeline->depth_bias.is_z16)
+ bias.depth_offset_units *= 256.0f;
+ bias.limit = dynamic->depth_bias.depth_bias_clamp;
+ }
+
+ cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_DEPTH_BIAS;
+}
+
+void
+v3dX(cmd_buffer_emit_line_width)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(LINE_WIDTH));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, LINE_WIDTH, line) {
+ line.line_width = cmd_buffer->state.dynamic.line_width;
+ }
+
+ cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_LINE_WIDTH;
+}
+
+void
+v3dX(cmd_buffer_emit_sample_state)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ assert(pipeline);
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(SAMPLE_STATE));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, SAMPLE_STATE, state) {
+ state.coverage = 1.0f;
+ state.mask = pipeline->sample_mask;
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_blend)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ assert(pipeline);
+
+ const uint32_t blend_packets_size =
+ cl_packet_length(BLEND_ENABLES) +
+ cl_packet_length(BLEND_CONSTANT_COLOR) +
+ cl_packet_length(BLEND_CFG) * V3D_MAX_DRAW_BUFFERS +
+ cl_packet_length(COLOR_WRITE_MASKS);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, blend_packets_size);
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
+ if (pipeline->blend.enables) {
+ cl_emit(&job->bcl, BLEND_ENABLES, enables) {
+ enables.mask = pipeline->blend.enables;
+ }
+ }
+
+ for (uint32_t i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
+ if (pipeline->blend.enables & (1 << i))
+ cl_emit_prepacked(&job->bcl, &pipeline->blend.cfg[i]);
+ }
+
+ cl_emit(&job->bcl, COLOR_WRITE_MASKS, mask) {
+ mask.mask = pipeline->blend.color_write_masks;
+ }
+ }
+
+ if (pipeline->blend.needs_color_constants &&
+ cmd_buffer->state.dirty & V3DV_CMD_DIRTY_BLEND_CONSTANTS) {
+ struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
+ cl_emit(&job->bcl, BLEND_CONSTANT_COLOR, color) {
+ color.red_f16 = _mesa_float_to_half(dynamic->blend_constants[0]);
+ color.green_f16 = _mesa_float_to_half(dynamic->blend_constants[1]);
+ color.blue_f16 = _mesa_float_to_half(dynamic->blend_constants[2]);
+ color.alpha_f16 = _mesa_float_to_half(dynamic->blend_constants[3]);
+ }
+ cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_BLEND_CONSTANTS;
+ }
+}
+
+static void
+emit_flat_shade_flags(struct v3dv_job *job,
+ int varying_offset,
+ uint32_t varyings,
+ enum V3DX(Varying_Flags_Action) lower,
+ enum V3DX(Varying_Flags_Action) higher)
+{
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ cl_packet_length(FLAT_SHADE_FLAGS));
+ v3dv_return_if_oom(NULL, job);
+
+ cl_emit(&job->bcl, FLAT_SHADE_FLAGS, flags) {
+ flags.varying_offset_v0 = varying_offset;
+ flags.flat_shade_flags_for_varyings_v024 = varyings;
+ flags.action_for_flat_shade_flags_of_lower_numbered_varyings = lower;
+ flags.action_for_flat_shade_flags_of_higher_numbered_varyings = higher;
+ }
+}
+
+static void
+emit_noperspective_flags(struct v3dv_job *job,
+ int varying_offset,
+ uint32_t varyings,
+ enum V3DX(Varying_Flags_Action) lower,
+ enum V3DX(Varying_Flags_Action) higher)
+{
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ cl_packet_length(NON_PERSPECTIVE_FLAGS));
+ v3dv_return_if_oom(NULL, job);
+
+ cl_emit(&job->bcl, NON_PERSPECTIVE_FLAGS, flags) {
+ flags.varying_offset_v0 = varying_offset;
+ flags.non_perspective_flags_for_varyings_v024 = varyings;
+ flags.action_for_non_perspective_flags_of_lower_numbered_varyings = lower;
+ flags.action_for_non_perspective_flags_of_higher_numbered_varyings = higher;
+ }
+}
+
+static void
+emit_centroid_flags(struct v3dv_job *job,
+ int varying_offset,
+ uint32_t varyings,
+ enum V3DX(Varying_Flags_Action) lower,
+ enum V3DX(Varying_Flags_Action) higher)
+{
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ cl_packet_length(CENTROID_FLAGS));
+ v3dv_return_if_oom(NULL, job);
+
+ cl_emit(&job->bcl, CENTROID_FLAGS, flags) {
+ flags.varying_offset_v0 = varying_offset;
+ flags.centroid_flags_for_varyings_v024 = varyings;
+ flags.action_for_centroid_flags_of_lower_numbered_varyings = lower;
+ flags.action_for_centroid_flags_of_higher_numbered_varyings = higher;
+ }
+}
+
+static bool
+emit_varying_flags(struct v3dv_job *job,
+ uint32_t num_flags,
+ const uint32_t *flags,
+ void (*flag_emit_callback)(struct v3dv_job *job,
+ int varying_offset,
+ uint32_t flags,
+ enum V3DX(Varying_Flags_Action) lower,
+ enum V3DX(Varying_Flags_Action) higher))
+{
+ bool emitted_any = false;
+ for (int i = 0; i < num_flags; i++) {
+ if (!flags[i])
+ continue;
+
+ if (emitted_any) {
+ flag_emit_callback(job, i, flags[i],
+ V3D_VARYING_FLAGS_ACTION_UNCHANGED,
+ V3D_VARYING_FLAGS_ACTION_UNCHANGED);
+ } else if (i == 0) {
+ flag_emit_callback(job, i, flags[i],
+ V3D_VARYING_FLAGS_ACTION_UNCHANGED,
+ V3D_VARYING_FLAGS_ACTION_ZEROED);
+ } else {
+ flag_emit_callback(job, i, flags[i],
+ V3D_VARYING_FLAGS_ACTION_ZEROED,
+ V3D_VARYING_FLAGS_ACTION_ZEROED);
+ }
+
+ emitted_any = true;
+ }
+
+ return emitted_any;
+}
+
+void
+v3dX(cmd_buffer_emit_varyings_state)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+
+ struct v3d_fs_prog_data *prog_data_fs =
+ pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
+
+ const uint32_t num_flags =
+ ARRAY_SIZE(prog_data_fs->flat_shade_flags);
+ const uint32_t *flat_shade_flags = prog_data_fs->flat_shade_flags;
+ const uint32_t *noperspective_flags = prog_data_fs->noperspective_flags;
+ const uint32_t *centroid_flags = prog_data_fs->centroid_flags;
+
+ if (!emit_varying_flags(job, num_flags, flat_shade_flags,
+ emit_flat_shade_flags)) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(ZERO_ALL_FLAT_SHADE_FLAGS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, ZERO_ALL_FLAT_SHADE_FLAGS, flags);
+ }
+
+ if (!emit_varying_flags(job, num_flags, noperspective_flags,
+ emit_noperspective_flags)) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(ZERO_ALL_NON_PERSPECTIVE_FLAGS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, ZERO_ALL_NON_PERSPECTIVE_FLAGS, flags);
+ }
+
+ if (!emit_varying_flags(job, num_flags, centroid_flags,
+ emit_centroid_flags)) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(ZERO_ALL_CENTROID_FLAGS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, ZERO_ALL_CENTROID_FLAGS, flags);
+ }
+}
+
+static void
+job_update_ez_state(struct v3dv_job *job,
+ struct v3dv_pipeline *pipeline,
+ struct v3dv_cmd_buffer *cmd_buffer)
+{
+ /* If first_ez_state is V3D_EZ_DISABLED it means that we have already
+ * determined that we should disable EZ completely for all draw calls in
+ * this job. This will cause us to disable EZ for the entire job in the
+ * Tile Rendering Mode RCL packet and when we do that we need to make sure
+ * we never emit a draw call in the job with EZ enabled in the CFG_BITS
+ * packet, so ez_state must also be V3D_EZ_DISABLED;
+ */
+ if (job->first_ez_state == V3D_EZ_DISABLED) {
+ assert(job->ez_state == V3D_EZ_DISABLED);
+ return;
+ }
+
+ /* This is part of the pre draw call handling, so we should be inside a
+ * render pass.
+ */
+ assert(cmd_buffer->state.pass);
+
+ /* If this is the first time we update EZ state for this job we first check
+ * if there is anything that requires disabling it completely for the entire
+ * job (based on state that is not related to the current draw call and
+ * pipeline state).
+ */
+ if (!job->decided_global_ez_enable) {
+ job->decided_global_ez_enable = true;
+
+ struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ assert(state->subpass_idx < state->pass->subpass_count);
+ struct v3dv_subpass *subpass = &state->pass->subpasses[state->subpass_idx];
+ if (subpass->ds_attachment.attachment == VK_ATTACHMENT_UNUSED) {
+ job->first_ez_state = V3D_EZ_DISABLED;
+ job->ez_state = V3D_EZ_DISABLED;
+ return;
+ }
+
+ /* GFXH-1918: the early-z buffer may load incorrect depth values
+ * if the frame has odd width or height.
+ *
+ * So we need to disable EZ in this case.
+ */
+ const struct v3dv_render_pass_attachment *ds_attachment =
+ &state->pass->attachments[subpass->ds_attachment.attachment];
+
+ const VkImageAspectFlags ds_aspects =
+ vk_format_aspects(ds_attachment->desc.format);
+
+ bool needs_depth_load =
+ check_needs_load(state,
+ ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
+ ds_attachment->first_subpass,
+ ds_attachment->desc.loadOp);
+
+ if (needs_depth_load) {
+ struct v3dv_framebuffer *fb = state->framebuffer;
+
+ if (!fb) {
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ perf_debug("Loading depth aspect in a secondary command buffer "
+ "without framebuffer info disables early-z tests.\n");
+ job->first_ez_state = V3D_EZ_DISABLED;
+ job->ez_state = V3D_EZ_DISABLED;
+ return;
+ }
+
+ if (((fb->width % 2) != 0 || (fb->height % 2) != 0)) {
+ perf_debug("Loading depth aspect for framebuffer with odd width "
+ "or height disables early-Z tests.\n");
+ job->first_ez_state = V3D_EZ_DISABLED;
+ job->ez_state = V3D_EZ_DISABLED;
+ return;
+ }
+ }
+ }
+
+ /* Otherwise, we can decide to selectively enable or disable EZ for draw
+ * calls using the CFG_BITS packet based on the bound pipeline state.
+ */
+
+ /* If the FS writes Z, then it may update against the chosen EZ direction */
+ struct v3dv_shader_variant *fs_variant =
+ pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
+ if (fs_variant->prog_data.fs->writes_z) {
+ job->ez_state = V3D_EZ_DISABLED;
+ return;
+ }
+
+ switch (pipeline->ez_state) {
+ case V3D_EZ_UNDECIDED:
+ /* If the pipeline didn't pick a direction but didn't disable, then go
+ * along with the current EZ state. This allows EZ optimization for Z
+ * func == EQUAL or NEVER.
+ */
+ break;
+
+ case V3D_EZ_LT_LE:
+ case V3D_EZ_GT_GE:
+ /* If the pipeline picked a direction, then it needs to match the current
+ * direction if we've decided on one.
+ */
+ if (job->ez_state == V3D_EZ_UNDECIDED)
+ job->ez_state = pipeline->ez_state;
+ else if (job->ez_state != pipeline->ez_state)
+ job->ez_state = V3D_EZ_DISABLED;
+ break;
+
+ case V3D_EZ_DISABLED:
+ /* If the pipeline disables EZ because of a bad Z func or stencil
+ * operation, then we can't do any more EZ in this frame.
+ */
+ job->ez_state = V3D_EZ_DISABLED;
+ break;
+ }
+
+ if (job->first_ez_state == V3D_EZ_UNDECIDED &&
+ job->ez_state != V3D_EZ_DISABLED) {
+ job->first_ez_state = job->ez_state;
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_configuration_bits)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ assert(pipeline);
+
+ job_update_ez_state(job, pipeline, cmd_buffer);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CFG_BITS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit_with_prepacked(&job->bcl, CFG_BITS, pipeline->cfg_bits, config) {
+ config.early_z_enable = job->ez_state != V3D_EZ_DISABLED;
+ config.early_z_updates_enable = config.early_z_enable &&
+ pipeline->z_updates_enable;
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_occlusion_query)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ cl_packet_length(OCCLUSION_QUERY_COUNTER));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
+ if (cmd_buffer->state.query.active_query.bo) {
+ counter.address =
+ v3dv_cl_address(cmd_buffer->state.query.active_query.bo,
+ cmd_buffer->state.query.active_query.offset);
+ }
+ }
+
+ cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
+}
+
+static struct v3dv_job *
+cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer *cmd_buffer,
+ bool is_bcl_barrier)
+{
+ assert(cmd_buffer->state.subpass_idx != -1);
+ v3dv_cmd_buffer_finish_job(cmd_buffer);
+ struct v3dv_job *job =
+ v3dv_cmd_buffer_subpass_resume(cmd_buffer,
+ cmd_buffer->state.subpass_idx);
+ if (!job)
+ return NULL;
+
+ job->serialize = true;
+ job->needs_bcl_sync = is_bcl_barrier;
+ return job;
+}
+
+static void
+cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer *primary,
+ struct v3dv_cmd_buffer *secondary)
+{
+ struct v3dv_cmd_buffer_state *p_state = &primary->state;
+ struct v3dv_cmd_buffer_state *s_state = &secondary->state;
+
+ const uint32_t total_state_count =
+ p_state->query.end.used_count + s_state->query.end.used_count;
+ v3dv_cmd_buffer_ensure_array_state(primary,
+ sizeof(struct v3dv_end_query_cpu_job_info),
+ total_state_count,
+ &p_state->query.end.alloc_count,
+ (void **) &p_state->query.end.states);
+ v3dv_return_if_oom(primary, NULL);
+
+ for (uint32_t i = 0; i < s_state->query.end.used_count; i++) {
+ const struct v3dv_end_query_cpu_job_info *s_qstate =
+ &secondary->state.query.end.states[i];
+
+ struct v3dv_end_query_cpu_job_info *p_qstate =
+ &p_state->query.end.states[p_state->query.end.used_count++];
+
+ p_qstate->pool = s_qstate->pool;
+ p_qstate->query = s_qstate->query;
+ }
+}
+
+void
+v3dX(cmd_buffer_execute_inside_pass)(struct v3dv_cmd_buffer *primary,
+ uint32_t cmd_buffer_count,
+ const VkCommandBuffer *cmd_buffers)
+{
+ assert(primary->state.job);
+
+ /* Emit occlusion query state if needed so the draw calls inside our
+ * secondaries update the counters.
+ */
+ bool has_occlusion_query =
+ primary->state.dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY;
+ if (has_occlusion_query)
+ v3dX(cmd_buffer_emit_occlusion_query)(primary);
+
+ /* FIXME: if our primary job tiling doesn't enable MSSA but any of the
+ * pipelines used by the secondaries do, we need to re-start the primary
+ * job to enable MSAA. See cmd_buffer_restart_job_for_msaa_if_needed.
+ */
+ bool pending_barrier = false;
+ bool pending_bcl_barrier = false;
+ for (uint32_t i = 0; i < cmd_buffer_count; i++) {
+ V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
+
+ assert(secondary->usage_flags &
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
+
+ list_for_each_entry(struct v3dv_job, secondary_job,
+ &secondary->jobs, list_link) {
+ if (secondary_job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
+ /* If the job is a CL, then we branch to it from the primary BCL.
+ * In this case the secondary's BCL is finished with a
+ * RETURN_FROM_SUB_LIST command to return back to the primary BCL
+ * once we are done executing it.
+ */
+ assert(v3dv_cl_offset(&secondary_job->rcl) == 0);
+ assert(secondary_job->bcl.bo);
+
+ /* Sanity check that secondary BCL ends with RETURN_FROM_SUB_LIST */
+ STATIC_ASSERT(cl_packet_length(RETURN_FROM_SUB_LIST) == 1);
+ assert(v3dv_cl_offset(&secondary_job->bcl) >= 1);
+ assert(*(((uint8_t *)secondary_job->bcl.next) - 1) ==
+ V3DX(RETURN_FROM_SUB_LIST_opcode));
+
+ /* If this secondary has any barriers (or we had any pending barrier
+ * to apply), then we can't just branch to it from the primary, we
+ * need to split the primary to create a new job that can consume
+ * the barriers first.
+ *
+ * FIXME: in this case, maybe just copy the secondary BCL without
+ * the RETURN_FROM_SUB_LIST into the primary job to skip the
+ * branch?
+ */
+ struct v3dv_job *primary_job = primary->state.job;
+ if (!primary_job || secondary_job->serialize || pending_barrier) {
+ const bool needs_bcl_barrier =
+ secondary_job->needs_bcl_sync || pending_bcl_barrier;
+ primary_job =
+ cmd_buffer_subpass_split_for_barrier(primary,
+ needs_bcl_barrier);
+ v3dv_return_if_oom(primary, NULL);
+
+ /* Since we have created a new primary we need to re-emit
+ * occlusion query state.
+ */
+ if (has_occlusion_query)
+ v3dX(cmd_buffer_emit_occlusion_query)(primary);
+ }
+
+ /* Make sure our primary job has all required BO references */
+ set_foreach(secondary_job->bos, entry) {
+ struct v3dv_bo *bo = (struct v3dv_bo *)entry->key;
+ v3dv_job_add_bo(primary_job, bo);
+ }
+
+ /* Emit required branch instructions. We expect each of these
+ * to end with a corresponding 'return from sub list' item.
+ */
+ list_for_each_entry(struct v3dv_bo, bcl_bo,
+ &secondary_job->bcl.bo_list, list_link) {
+ v3dv_cl_ensure_space_with_branch(&primary_job->bcl,
+ cl_packet_length(BRANCH_TO_SUB_LIST));
+ v3dv_return_if_oom(primary, NULL);
+ cl_emit(&primary_job->bcl, BRANCH_TO_SUB_LIST, branch) {
+ branch.address = v3dv_cl_address(bcl_bo, 0);
+ }
+ }
+
+ primary_job->tmu_dirty_rcl |= secondary_job->tmu_dirty_rcl;
+ } else if (secondary_job->type == V3DV_JOB_TYPE_CPU_CLEAR_ATTACHMENTS) {
+ if (pending_barrier) {
+ cmd_buffer_subpass_split_for_barrier(primary, pending_bcl_barrier);
+ v3dv_return_if_oom(primary, NULL);
+ }
+
+ const struct v3dv_clear_attachments_cpu_job_info *info =
+ &secondary_job->cpu.clear_attachments;
+ v3dv_CmdClearAttachments(v3dv_cmd_buffer_to_handle(primary),
+ info->attachment_count,
+ info->attachments,
+ info->rect_count,
+ info->rects);
+ } else {
+ /* This is a regular job (CPU or GPU), so just finish the current
+ * primary job (if any) and then add the secondary job to the
+ * primary's job list right after it.
+ */
+ v3dv_cmd_buffer_finish_job(primary);
+ v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
+ if (pending_barrier) {
+ secondary_job->serialize = true;
+ if (pending_bcl_barrier)
+ secondary_job->needs_bcl_sync = true;
+ }
+ }
+
+ pending_barrier = false;
+ pending_bcl_barrier = false;
+ }
+
+ /* If the secondary has recorded any vkCmdEndQuery commands, we need to
+ * copy this state to the primary so it is processed properly when the
+ * current primary job is finished.
+ */
+ cmd_buffer_copy_secondary_end_query_state(primary, secondary);
+
+ /* If this secondary had any pending barrier state we will need that
+ * barrier state consumed with whatever comes next in the primary.
+ */
+ assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
+ pending_barrier = secondary->state.has_barrier;
+ pending_bcl_barrier = secondary->state.has_bcl_barrier;
+ }
+
+ if (pending_barrier) {
+ primary->state.has_barrier = true;
+ primary->state.has_bcl_barrier |= pending_bcl_barrier;
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_gl_shader_state)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ struct v3dv_pipeline *pipeline = state->gfx.pipeline;
+ assert(pipeline);
+
+ struct v3d_vs_prog_data *prog_data_vs =
+ pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX]->prog_data.vs;
+ struct v3d_vs_prog_data *prog_data_vs_bin =
+ pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN]->prog_data.vs;
+ struct v3d_fs_prog_data *prog_data_fs =
+ pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
+
+ /* Update the cache dirty flag based on the shader progs data */
+ job->tmu_dirty_rcl |= prog_data_vs_bin->base.tmu_dirty_rcl;
+ job->tmu_dirty_rcl |= prog_data_vs->base.tmu_dirty_rcl;
+ job->tmu_dirty_rcl |= prog_data_fs->base.tmu_dirty_rcl;
+
+ /* See GFXH-930 workaround below */
+ uint32_t num_elements_to_emit = MAX2(pipeline->va_count, 1);
+
+ uint32_t shader_rec_offset =
+ v3dv_cl_ensure_space(&job->indirect,
+ cl_packet_length(GL_SHADER_STATE_RECORD) +
+ num_elements_to_emit *
+ cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
+ 32);
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ struct v3dv_shader_variant *vs_variant =
+ pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
+ struct v3dv_shader_variant *vs_bin_variant =
+ pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
+ struct v3dv_shader_variant *fs_variant =
+ pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
+ struct v3dv_bo *assembly_bo = pipeline->shared_data->assembly_bo;
+
+ struct v3dv_bo *default_attribute_values =
+ pipeline->default_attribute_values != NULL ?
+ pipeline->default_attribute_values :
+ pipeline->device->default_attribute_float;
+
+ cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_RECORD,
+ pipeline->shader_state_record, shader) {
+
+ /* FIXME: we are setting this values here and during the
+ * prepacking. This is because both cl_emit_with_prepacked and v3dvx_pack
+ * asserts for minimum values of these. It would be good to get
+ * v3dvx_pack to assert on the final value if possible
+ */
+ shader.min_coord_shader_input_segments_required_in_play =
+ pipeline->vpm_cfg_bin.As;
+ shader.min_vertex_shader_input_segments_required_in_play =
+ pipeline->vpm_cfg.As;
+
+ shader.coordinate_shader_code_address =
+ v3dv_cl_address(assembly_bo, vs_bin_variant->assembly_offset);
+ shader.vertex_shader_code_address =
+ v3dv_cl_address(assembly_bo, vs_variant->assembly_offset);
+ shader.fragment_shader_code_address =
+ v3dv_cl_address(assembly_bo, fs_variant->assembly_offset);
+
+ shader.coordinate_shader_uniforms_address = cmd_buffer->state.uniforms.vs_bin;
+ shader.vertex_shader_uniforms_address = cmd_buffer->state.uniforms.vs;
+ shader.fragment_shader_uniforms_address = cmd_buffer->state.uniforms.fs;
+
+ shader.address_of_default_attribute_values =
+ v3dv_cl_address(default_attribute_values, 0);
+ }
+
+ /* Upload vertex element attributes (SHADER_STATE_ATTRIBUTE_RECORD) */
+ bool cs_loaded_any = false;
+ const bool cs_uses_builtins = prog_data_vs_bin->uses_iid ||
+ prog_data_vs_bin->uses_biid ||
+ prog_data_vs_bin->uses_vid;
+ const uint32_t packet_length =
+ cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
+
+ uint32_t emitted_va_count = 0;
+ for (uint32_t i = 0; emitted_va_count < pipeline->va_count; i++) {
+ assert(i < MAX_VERTEX_ATTRIBS);
+
+ if (pipeline->va[i].vk_format == VK_FORMAT_UNDEFINED)
+ continue;
+
+ const uint32_t binding = pipeline->va[i].binding;
+
+ /* We store each vertex attribute in the array using its driver location
+ * as index.
+ */
+ const uint32_t location = i;
+
+ struct v3dv_vertex_binding *c_vb = &cmd_buffer->state.vertex_bindings[binding];
+
+ cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD,
+ &pipeline->vertex_attrs[i * packet_length], attr) {
+
+ assert(c_vb->buffer->mem->bo);
+ attr.address = v3dv_cl_address(c_vb->buffer->mem->bo,
+ c_vb->buffer->mem_offset +
+ pipeline->va[i].offset +
+ c_vb->offset);
+
+ attr.number_of_values_read_by_coordinate_shader =
+ prog_data_vs_bin->vattr_sizes[location];
+ attr.number_of_values_read_by_vertex_shader =
+ prog_data_vs->vattr_sizes[location];
+
+ /* GFXH-930: At least one attribute must be enabled and read by CS
+ * and VS. If we have attributes being consumed by the VS but not
+ * the CS, then set up a dummy load of the last attribute into the
+ * CS's VPM inputs. (Since CS is just dead-code-elimination compared
+ * to VS, we can't have CS loading but not VS).
+ *
+ * GFXH-1602: first attribute must be active if using builtins.
+ */
+ if (prog_data_vs_bin->vattr_sizes[location])
+ cs_loaded_any = true;
+
+ if (i == 0 && cs_uses_builtins && !cs_loaded_any) {
+ attr.number_of_values_read_by_coordinate_shader = 1;
+ cs_loaded_any = true;
+ } else if (i == pipeline->va_count - 1 && !cs_loaded_any) {
+ attr.number_of_values_read_by_coordinate_shader = 1;
+ cs_loaded_any = true;
+ }
+
+ attr.maximum_index = 0xffffff;
+ }
+
+ emitted_va_count++;
+ }
+
+ if (pipeline->va_count == 0) {
+ /* GFXH-930: At least one attribute must be enabled and read
+ * by CS and VS. If we have no attributes being consumed by
+ * the shader, set up a dummy to be loaded into the VPM.
+ */
+ cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
+ /* Valid address of data whose value will be unused. */
+ attr.address = v3dv_cl_address(job->indirect.bo, 0);
+
+ attr.type = ATTRIBUTE_FLOAT;
+ attr.stride = 0;
+ attr.vec_size = 1;
+
+ attr.number_of_values_read_by_coordinate_shader = 1;
+ attr.number_of_values_read_by_vertex_shader = 1;
+ }
+ }
+
+ if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ sizeof(pipeline->vcm_cache_size));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit_prepacked(&job->bcl, &pipeline->vcm_cache_size);
+ }
+
+ v3dv_cl_ensure_space_with_branch(&job->bcl,
+ cl_packet_length(GL_SHADER_STATE));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, GL_SHADER_STATE, state) {
+ state.address = v3dv_cl_address(job->indirect.bo,
+ shader_rec_offset);
+ state.number_of_attribute_arrays = num_elements_to_emit;
+ }
+
+ cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_VERTEX_BUFFER |
+ V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
+ V3DV_CMD_DIRTY_PUSH_CONSTANTS);
+ cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
+ cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
+}
+
+/* FIXME: C&P from v3dx_draw. Refactor to common place? */
+static uint32_t
+v3d_hw_prim_type(enum pipe_prim_type prim_type)
+{
+ switch (prim_type) {
+ case PIPE_PRIM_POINTS:
+ case PIPE_PRIM_LINES:
+ case PIPE_PRIM_LINE_LOOP:
+ case PIPE_PRIM_LINE_STRIP:
+ case PIPE_PRIM_TRIANGLES:
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ case PIPE_PRIM_TRIANGLE_FAN:
+ return prim_type;
+
+ case PIPE_PRIM_LINES_ADJACENCY:
+ case PIPE_PRIM_LINE_STRIP_ADJACENCY:
+ case PIPE_PRIM_TRIANGLES_ADJACENCY:
+ case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
+ return 8 + (prim_type - PIPE_PRIM_LINES_ADJACENCY);
+
+ default:
+ unreachable("Unsupported primitive type");
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_draw)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_draw_info *info)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ struct v3dv_pipeline *pipeline = state->gfx.pipeline;
+
+ assert(pipeline);
+
+ uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
+
+ if (info->first_instance > 0) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
+ base.base_instance = info->first_instance;
+ base.base_vertex = 0;
+ }
+ }
+
+ if (info->instance_count > 1) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(VERTEX_ARRAY_INSTANCED_PRIMS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
+ prim.mode = hw_prim_type;
+ prim.index_of_first_vertex = info->first_vertex;
+ prim.number_of_instances = info->instance_count;
+ prim.instance_length = info->vertex_count;
+ }
+ } else {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(VERTEX_ARRAY_PRIMS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+ cl_emit(&job->bcl, VERTEX_ARRAY_PRIMS, prim) {
+ prim.mode = hw_prim_type;
+ prim.length = info->vertex_count;
+ prim.index_of_first_vertex = info->first_vertex;
+ }
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_index_buffer)(struct v3dv_cmd_buffer *cmd_buffer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ /* We flag all state as dirty when we create a new job so make sure we
+ * have a valid index buffer before attempting to emit state for it.
+ */
+ struct v3dv_buffer *ibuffer =
+ v3dv_buffer_from_handle(cmd_buffer->state.index_buffer.buffer);
+ if (ibuffer) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(INDEX_BUFFER_SETUP));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ const uint32_t offset = cmd_buffer->state.index_buffer.offset;
+ cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
+ ib.address = v3dv_cl_address(ibuffer->mem->bo,
+ ibuffer->mem_offset + offset);
+ ib.size = ibuffer->mem->bo->size;
+ }
+ }
+
+ cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_INDEX_BUFFER;
+}
+
+void
+v3dX(cmd_buffer_emit_draw_indexed)(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance)
+{
+ v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
+ uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
+ uint32_t index_offset = firstIndex * cmd_buffer->state.index_buffer.index_size;
+
+ if (vertexOffset != 0 || firstInstance != 0) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
+ base.base_instance = firstInstance;
+ base.base_vertex = vertexOffset;
+ }
+ }
+
+ if (instanceCount == 1) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(INDEXED_PRIM_LIST));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, INDEXED_PRIM_LIST, prim) {
+ prim.index_type = index_type;
+ prim.length = indexCount;
+ prim.index_offset = index_offset;
+ prim.mode = hw_prim_type;
+ prim.enable_primitive_restarts = pipeline->primitive_restart;
+ }
+ } else if (instanceCount > 1) {
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(INDEXED_INSTANCED_PRIM_LIST));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, INDEXED_INSTANCED_PRIM_LIST, prim) {
+ prim.index_type = index_type;
+ prim.index_offset = index_offset;
+ prim.mode = hw_prim_type;
+ prim.enable_primitive_restarts = pipeline->primitive_restart;
+ prim.number_of_instances = instanceCount;
+ prim.instance_length = indexCount;
+ }
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_draw_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_buffer *buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
+
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
+ prim.mode = hw_prim_type;
+ prim.number_of_draw_indirect_array_records = drawCount;
+ prim.stride_in_multiples_of_4_bytes = stride >> 2;
+ prim.address = v3dv_cl_address(buffer->mem->bo,
+ buffer->mem_offset + offset);
+ }
+}
+
+void
+v3dX(cmd_buffer_emit_indexed_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_buffer *buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
+ uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
+ uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
+
+ v3dv_cl_ensure_space_with_branch(
+ &job->bcl, cl_packet_length(INDIRECT_INDEXED_INSTANCED_PRIM_LIST));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ cl_emit(&job->bcl, INDIRECT_INDEXED_INSTANCED_PRIM_LIST, prim) {
+ prim.index_type = index_type;
+ prim.mode = hw_prim_type;
+ prim.enable_primitive_restarts = pipeline->primitive_restart;
+ prim.number_of_draw_indirect_indexed_records = drawCount;
+ prim.stride_in_multiples_of_4_bytes = stride >> 2;
+ prim.address = v3dv_cl_address(buffer->mem->bo,
+ buffer->mem_offset + offset);
+ }
+}
+
+void
+v3dX(cmd_buffer_render_pass_setup_render_target)(struct v3dv_cmd_buffer *cmd_buffer,
+ int rt,
+ uint32_t *rt_bpp,
+ uint32_t *rt_type,
+ uint32_t *rt_clamp)
+{
+ const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+
+ assert(state->subpass_idx < state->pass->subpass_count);
+ const struct v3dv_subpass *subpass =
+ &state->pass->subpasses[state->subpass_idx];
+
+ if (rt >= subpass->color_count)
+ return;
+
+ struct v3dv_subpass_attachment *attachment = &subpass->color_attachments[rt];
+ const uint32_t attachment_idx = attachment->attachment;
+ if (attachment_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ const struct v3dv_framebuffer *framebuffer = state->framebuffer;
+ assert(attachment_idx < framebuffer->attachment_count);
+ struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
+ assert(iview->aspects & VK_IMAGE_ASPECT_COLOR_BIT);
+
+ *rt_bpp = iview->internal_bpp;
+ *rt_type = iview->internal_type;
+ if (vk_format_is_int(iview->vk_format))
+ *rt_clamp = V3D_RENDER_TARGET_CLAMP_INT;
+ else if (vk_format_is_srgb(iview->vk_format))
+ *rt_clamp = V3D_RENDER_TARGET_CLAMP_NORM;
+ else
+ *rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
+}
diff --git a/src/broadcom/vulkan/v3dvx_device.c b/src/broadcom/vulkan/v3dvx_device.c
index 3cd4870a597..c1518f53c35 100644
--- a/src/broadcom/vulkan/v3dvx_device.c
+++ b/src/broadcom/vulkan/v3dvx_device.c
@@ -26,6 +26,8 @@
#include "broadcom/common/v3d_macros.h"
#include "broadcom/cle/v3dx_pack.h"
#include "broadcom/compiler/v3d_compiler.h"
+#include "vk_format_info.h"
+#include "util/u_pack_color.h"
static const enum V3DX(Wrap_Mode) vk_to_v3d_wrap_mode[] = {
[VK_SAMPLER_ADDRESS_MODE_REPEAT] = V3D_WRAP_MODE_REPEAT,
@@ -180,3 +182,56 @@ v3dX(framebuffer_compute_internal_bpp_msaa)(
return;
}
+
+uint32_t
+v3dX(zs_buffer_from_aspect_bits)(VkImageAspectFlags aspects)
+{
+ const VkImageAspectFlags zs_aspects =
+ VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+ const VkImageAspectFlags filtered_aspects = aspects & zs_aspects;
+
+ if (filtered_aspects == zs_aspects)
+ return ZSTENCIL;
+ else if (filtered_aspects == VK_IMAGE_ASPECT_DEPTH_BIT)
+ return Z;
+ else if (filtered_aspects == VK_IMAGE_ASPECT_STENCIL_BIT)
+ return STENCIL;
+ else
+ return NONE;
+}
+
+void
+v3dX(get_hw_clear_color)(const VkClearColorValue *color,
+ uint32_t internal_type,
+ uint32_t internal_size,
+ uint32_t *hw_color)
+{
+ union util_color uc;
+ switch (internal_type) {
+ case V3D_INTERNAL_TYPE_8:
+ util_pack_color(color->float32, PIPE_FORMAT_R8G8B8A8_UNORM, &uc);
+ memcpy(hw_color, uc.ui, internal_size);
+ break;
+ case V3D_INTERNAL_TYPE_8I:
+ case V3D_INTERNAL_TYPE_8UI:
+ hw_color[0] = ((color->uint32[0] & 0xff) |
+ (color->uint32[1] & 0xff) << 8 |
+ (color->uint32[2] & 0xff) << 16 |
+ (color->uint32[3] & 0xff) << 24);
+ break;
+ case V3D_INTERNAL_TYPE_16F:
+ util_pack_color(color->float32, PIPE_FORMAT_R16G16B16A16_FLOAT, &uc);
+ memcpy(hw_color, uc.ui, internal_size);
+ break;
+ case V3D_INTERNAL_TYPE_16I:
+ case V3D_INTERNAL_TYPE_16UI:
+ hw_color[0] = ((color->uint32[0] & 0xffff) | color->uint32[1] << 16);
+ hw_color[1] = ((color->uint32[2] & 0xffff) | color->uint32[3] << 16);
+ break;
+ case V3D_INTERNAL_TYPE_32F:
+ case V3D_INTERNAL_TYPE_32I:
+ case V3D_INTERNAL_TYPE_32UI:
+ memcpy(hw_color, color->uint32, internal_size);
+ break;
+ }
+}
diff --git a/src/broadcom/vulkan/v3dvx_formats.c b/src/broadcom/vulkan/v3dvx_formats.c
index c5570a2d919..22ba7e91f19 100644
--- a/src/broadcom/vulkan/v3dvx_formats.c
+++ b/src/broadcom/vulkan/v3dvx_formats.c
@@ -402,3 +402,64 @@ v3dX(tfu_supports_tex_format)(uint32_t tex_format)
return false;
}
}
+
+uint8_t
+v3dX(get_internal_depth_type)(VkFormat format)
+{
+ switch (format) {
+ case VK_FORMAT_D16_UNORM:
+ return V3D_INTERNAL_TYPE_DEPTH_16;
+ case VK_FORMAT_D32_SFLOAT:
+ return V3D_INTERNAL_TYPE_DEPTH_32F;
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ return V3D_INTERNAL_TYPE_DEPTH_24;
+ default:
+ unreachable("Invalid depth format");
+ break;
+ }
+}
+
+void
+v3dX(get_internal_type_bpp_for_image_aspects)(VkFormat vk_format,
+ VkImageAspectFlags aspect_mask,
+ uint32_t *internal_type,
+ uint32_t *internal_bpp)
+{
+ const VkImageAspectFlags ds_aspects = VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ /* We can't store depth/stencil pixel formats to a raster format, so
+ * so instead we load our depth/stencil aspects to a compatible color
+ * format.
+ */
+ /* FIXME: pre-compute this at image creation time? */
+ if (aspect_mask & ds_aspects) {
+ switch (vk_format) {
+ case VK_FORMAT_D16_UNORM:
+ *internal_type = V3D_INTERNAL_TYPE_16UI;
+ *internal_bpp = V3D_INTERNAL_BPP_64;
+ break;
+ case VK_FORMAT_D32_SFLOAT:
+ *internal_type = V3D_INTERNAL_TYPE_32F;
+ *internal_bpp = V3D_INTERNAL_BPP_128;
+ break;
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ /* Use RGBA8 format so we can relocate the X/S bits in the appropriate
+ * place to match Vulkan expectations. See the comment on the tile
+ * load command for more details.
+ */
+ *internal_type = V3D_INTERNAL_TYPE_8UI;
+ *internal_bpp = V3D_INTERNAL_BPP_32;
+ break;
+ default:
+ assert(!"unsupported format");
+ break;
+ }
+ } else {
+ const struct v3dv_format *format = v3dX(get_format)(vk_format);
+ v3dX(get_internal_type_bpp_for_output_format)(format->rt_type,
+ internal_type, internal_bpp);
+ }
+}
diff --git a/src/broadcom/vulkan/v3dvx_meta_clear.c b/src/broadcom/vulkan/v3dvx_meta_clear.c
new file mode 100644
index 00000000000..01940896f59
--- /dev/null
+++ b/src/broadcom/vulkan/v3dvx_meta_clear.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright © 2021 Raspberry Pi
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "v3dv_private.h"
+#include "broadcom/common/v3d_macros.h"
+#include "broadcom/cle/v3dx_pack.h"
+#include "broadcom/compiler/v3d_compiler.h"
+
+static void
+emit_tlb_clear_store(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_cl *cl,
+ uint32_t attachment_idx,
+ uint32_t layer,
+ uint32_t buffer)
+{
+ const struct v3dv_image_view *iview =
+ cmd_buffer->state.framebuffer->attachments[attachment_idx];
+ const struct v3dv_image *image = iview->image;
+ const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
+ uint32_t layer_offset = v3dv_layer_offset(image,
+ iview->base_level,
+ iview->first_layer + layer);
+
+ cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = buffer;
+ store.address = v3dv_cl_address(image->mem->bo, layer_offset);
+ store.clear_buffer_being_stored = false;
+
+ store.output_image_format = iview->format->rt_type;
+ store.r_b_swap = iview->swap_rb;
+ store.memory_format = slice->tiling;
+
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ store.height_in_ub_or_stride =
+ slice->padded_height_of_output_image_in_uif_blocks;
+ } else if (slice->tiling == V3D_TILING_RASTER) {
+ store.height_in_ub_or_stride = slice->stride;
+ }
+
+ if (image->samples > VK_SAMPLE_COUNT_1_BIT)
+ store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
+ else
+ store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+static void
+emit_tlb_clear_stores(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_cl *cl,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t layer)
+{
+ struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ const struct v3dv_subpass *subpass =
+ &state->pass->subpasses[state->subpass_idx];
+
+ bool has_stores = false;
+ for (uint32_t i = 0; i < attachment_count; i++) {
+ uint32_t attachment_idx;
+ uint32_t buffer;
+ if (attachments[i].aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ attachment_idx = subpass->ds_attachment.attachment;
+ buffer = v3dX(zs_buffer_from_aspect_bits)(attachments[i].aspectMask);
+ } else {
+ uint32_t rt_idx = attachments[i].colorAttachment;
+ attachment_idx = subpass->color_attachments[rt_idx].attachment;
+ buffer = RENDER_TARGET_0 + rt_idx;
+ }
+
+ if (attachment_idx == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ has_stores = true;
+ emit_tlb_clear_store(cmd_buffer, cl, attachment_idx, layer, buffer);
+ }
+
+ if (!has_stores) {
+ cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = NONE;
+ }
+ }
+}
+
+static void
+emit_tlb_clear_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t layer)
+{
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ cl_emit(cl, END_OF_LOADS, end); /* Nothing to load */
+
+ cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
+ fmt.primitive_type = LIST_TRIANGLES;
+ }
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ emit_tlb_clear_stores(cmd_buffer, cl, attachment_count, attachments, layer);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_tlb_clear_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t layer)
+{
+ const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ const struct v3dv_framebuffer *framebuffer = state->framebuffer;
+
+ struct v3dv_job *job = cmd_buffer->state.job;
+ struct v3dv_cl *rcl = &job->rcl;
+
+ const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
+
+ const uint32_t tile_alloc_offset =
+ 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
+ cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
+ list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
+ }
+
+ cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
+ config.number_of_bin_tile_lists = 1;
+ config.total_frame_width_in_tiles = tiling->draw_tiles_x;
+ config.total_frame_height_in_tiles = tiling->draw_tiles_y;
+
+ config.supertile_width_in_tiles = tiling->supertile_width;
+ config.supertile_height_in_tiles = tiling->supertile_height;
+
+ config.total_frame_width_in_supertiles =
+ tiling->frame_width_in_supertiles;
+ config.total_frame_height_in_supertiles =
+ tiling->frame_height_in_supertiles;
+ }
+
+ /* Emit the clear and also the workaround for GFXH-1742 */
+ for (int i = 0; i < 2; i++) {
+ cl_emit(rcl, TILE_COORDINATES, coords);
+ cl_emit(rcl, END_OF_LOADS, end);
+ cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = NONE;
+ }
+ if (i == 0) {
+ cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
+ clear.clear_z_stencil_buffer = true;
+ clear.clear_all_render_targets = true;
+ }
+ }
+ cl_emit(rcl, END_OF_TILE_MARKER, end);
+ }
+
+ cl_emit(rcl, FLUSH_VCD_CACHE, flush);
+
+ emit_tlb_clear_per_tile_rcl(cmd_buffer, attachment_count, attachments, layer);
+
+ uint32_t supertile_w_in_pixels =
+ tiling->tile_width * tiling->supertile_width;
+ uint32_t supertile_h_in_pixels =
+ tiling->tile_height * tiling->supertile_height;
+
+ const uint32_t max_render_x = framebuffer->width - 1;
+ const uint32_t max_render_y = framebuffer->height - 1;
+ const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
+ const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
+
+ for (int y = 0; y <= max_y_supertile; y++) {
+ for (int x = 0; x <= max_x_supertile; x++) {
+ cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
+ coords.column_number_in_supertiles = x;
+ coords.row_number_in_supertiles = y;
+ }
+ }
+ }
+}
+
+static void
+emit_tlb_clear_job(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t base_layer,
+ uint32_t layer_count)
+{
+ const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
+ const struct v3dv_framebuffer *framebuffer = state->framebuffer;
+ const struct v3dv_subpass *subpass =
+ &state->pass->subpasses[state->subpass_idx];
+ struct v3dv_job *job = cmd_buffer->state.job;
+ assert(job);
+
+ /* Check how many color attachments we have and also if we have a
+ * depth/stencil attachment.
+ */
+ uint32_t color_attachment_count = 0;
+ VkClearAttachment color_attachments[4];
+ const VkClearDepthStencilValue *ds_clear_value = NULL;
+ uint8_t internal_depth_type = V3D_INTERNAL_TYPE_DEPTH_32F;
+ for (uint32_t i = 0; i < attachment_count; i++) {
+ if (attachments[i].aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ assert(subpass->ds_attachment.attachment != VK_ATTACHMENT_UNUSED);
+ ds_clear_value = &attachments[i].clearValue.depthStencil;
+ struct v3dv_render_pass_attachment *att =
+ &state->pass->attachments[subpass->ds_attachment.attachment];
+ internal_depth_type = v3dX(get_internal_depth_type)(att->desc.format);
+ } else if (attachments[i].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
+ color_attachments[color_attachment_count++] = attachments[i];
+ }
+ }
+
+ uint8_t internal_bpp;
+ bool msaa;
+ v3dX(framebuffer_compute_internal_bpp_msaa)(framebuffer, subpass,
+ &internal_bpp, &msaa);
+
+ v3dv_job_start_frame(job,
+ framebuffer->width,
+ framebuffer->height,
+ framebuffer->layers,
+ color_attachment_count,
+ internal_bpp, msaa);
+
+ struct v3dv_cl *rcl = &job->rcl;
+ v3dv_cl_ensure_space_with_branch(rcl, 200 +
+ layer_count * 256 *
+ cl_packet_length(SUPERTILE_COORDINATES));
+ v3dv_return_if_oom(cmd_buffer, NULL);
+
+ const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
+ config.early_z_disable = true;
+ config.image_width_pixels = framebuffer->width;
+ config.image_height_pixels = framebuffer->height;
+ config.number_of_render_targets = MAX2(color_attachment_count, 1);
+ config.multisample_mode_4x = false; /* FIXME */
+ config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
+ config.internal_depth_type = internal_depth_type;
+ }
+
+ for (uint32_t i = 0; i < color_attachment_count; i++) {
+ uint32_t rt_idx = color_attachments[i].colorAttachment;
+ uint32_t attachment_idx = subpass->color_attachments[rt_idx].attachment;
+ if (attachment_idx == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ const struct v3dv_render_pass_attachment *attachment =
+ &state->pass->attachments[attachment_idx];
+
+ uint32_t internal_type, internal_bpp, internal_size;
+ const struct v3dv_format *format =
+ v3dX(get_format)(attachment->desc.format);
+ v3dX(get_internal_type_bpp_for_output_format)(format->rt_type, &internal_type,
+ &internal_bpp);
+ internal_size = 4 << internal_bpp;
+
+ uint32_t clear_color[4] = { 0 };
+ v3dX(get_hw_clear_color)(&color_attachments[i].clearValue.color,
+ internal_type, internal_size, clear_color);
+
+ struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
+ const struct v3dv_image *image = iview->image;
+ const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
+
+ uint32_t clear_pad = 0;
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ int uif_block_height = v3d_utile_height(image->cpp) * 2;
+
+ uint32_t implicit_padded_height =
+ align(framebuffer->height, uif_block_height) / uif_block_height;
+
+ if (slice->padded_height_of_output_image_in_uif_blocks -
+ implicit_padded_height >= 15) {
+ clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
+ }
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
+ clear.clear_color_low_32_bits = clear_color[0];
+ clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
+ clear.render_target_number = i;
+ };
+
+ if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
+ clear.clear_color_mid_low_32_bits =
+ ((clear_color[1] >> 24) | (clear_color[2] << 8));
+ clear.clear_color_mid_high_24_bits =
+ ((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
+ clear.render_target_number = i;
+ };
+ }
+
+ if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
+ clear.uif_padded_height_in_uif_blocks = clear_pad;
+ clear.clear_color_high_16_bits = clear_color[3] >> 16;
+ clear.render_target_number = i;
+ };
+ }
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 0, &rt.render_target_0_internal_bpp,
+ &rt.render_target_0_internal_type, &rt.render_target_0_clamp);
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 1, &rt.render_target_1_internal_bpp,
+ &rt.render_target_1_internal_type, &rt.render_target_1_clamp);
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 2, &rt.render_target_2_internal_bpp,
+ &rt.render_target_2_internal_type, &rt.render_target_2_clamp);
+ v3dX(cmd_buffer_render_pass_setup_render_target)
+ (cmd_buffer, 3, &rt.render_target_3_internal_bpp,
+ &rt.render_target_3_internal_type, &rt.render_target_3_clamp);
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
+ clear.z_clear_value = ds_clear_value ? ds_clear_value->depth : 1.0f;
+ clear.stencil_clear_value = ds_clear_value ? ds_clear_value->stencil : 0;
+ };
+
+ cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
+ init.use_auto_chained_tile_lists = true;
+ init.size_of_first_block_in_chained_tile_lists =
+ TILE_ALLOCATION_BLOCK_SIZE_64B;
+ }
+
+ for (int layer = base_layer; layer < base_layer + layer_count; layer++) {
+ emit_tlb_clear_layer_rcl(cmd_buffer,
+ attachment_count,
+ attachments,
+ layer);
+ }
+
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+void
+v3dX(cmd_buffer_emit_tlb_clear)(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t base_layer,
+ uint32_t layer_count)
+{
+ struct v3dv_job *job =
+ v3dv_cmd_buffer_start_job(cmd_buffer, cmd_buffer->state.subpass_idx,
+ V3DV_JOB_TYPE_GPU_CL);
+
+ if (!job)
+ return;
+
+ /* vkCmdClearAttachments runs inside a render pass */
+ job->is_subpass_continue = true;
+
+ emit_tlb_clear_job(cmd_buffer,
+ attachment_count,
+ attachments,
+ base_layer, layer_count);
+
+ v3dv_cmd_buffer_subpass_resume(cmd_buffer, cmd_buffer->state.subpass_idx);
+}
diff --git a/src/broadcom/vulkan/v3dvx_meta_copy.c b/src/broadcom/vulkan/v3dvx_meta_copy.c
new file mode 100644
index 00000000000..d1f629ff140
--- /dev/null
+++ b/src/broadcom/vulkan/v3dvx_meta_copy.c
@@ -0,0 +1,1353 @@
+/*
+ * Copyright © 2021 Raspberry Pi
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "v3dv_private.h"
+#include "v3dv_meta_copy.h"
+#include "broadcom/common/v3d_macros.h"
+#include "broadcom/cle/v3dx_pack.h"
+#include "broadcom/compiler/v3d_compiler.h"
+
+#include "vk_format_info.h"
+
+struct rcl_clear_info {
+ const union v3dv_clear_value *clear_value;
+ struct v3dv_image *image;
+ VkImageAspectFlags aspects;
+ uint32_t layer;
+ uint32_t level;
+};
+
+static struct v3dv_cl *
+emit_rcl_prologue(struct v3dv_job *job,
+ struct framebuffer_data *fb,
+ const struct rcl_clear_info *clear_info)
+{
+ const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
+
+ struct v3dv_cl *rcl = &job->rcl;
+ v3dv_cl_ensure_space_with_branch(rcl, 200 +
+ tiling->layers * 256 *
+ cl_packet_length(SUPERTILE_COORDINATES));
+ if (job->cmd_buffer->state.oom)
+ return NULL;
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
+ config.early_z_disable = true;
+ config.image_width_pixels = tiling->width;
+ config.image_height_pixels = tiling->height;
+ config.number_of_render_targets = 1;
+ config.multisample_mode_4x = tiling->msaa;
+ config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
+ config.internal_depth_type = fb->internal_depth_type;
+ }
+
+ if (clear_info && (clear_info->aspects & VK_IMAGE_ASPECT_COLOR_BIT)) {
+ uint32_t clear_pad = 0;
+ if (clear_info->image) {
+ const struct v3dv_image *image = clear_info->image;
+ const struct v3d_resource_slice *slice =
+ &image->slices[clear_info->level];
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ int uif_block_height = v3d_utile_height(image->cpp) * 2;
+
+ uint32_t implicit_padded_height =
+ align(tiling->height, uif_block_height) / uif_block_height;
+
+ if (slice->padded_height_of_output_image_in_uif_blocks -
+ implicit_padded_height >= 15) {
+ clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
+ }
+ }
+ }
+
+ const uint32_t *color = &clear_info->clear_value->color[0];
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
+ clear.clear_color_low_32_bits = color[0];
+ clear.clear_color_next_24_bits = color[1] & 0x00ffffff;
+ clear.render_target_number = 0;
+ };
+
+ if (tiling->internal_bpp >= V3D_INTERNAL_BPP_64) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
+ clear.clear_color_mid_low_32_bits =
+ ((color[1] >> 24) | (color[2] << 8));
+ clear.clear_color_mid_high_24_bits =
+ ((color[2] >> 24) | ((color[3] & 0xffff) << 8));
+ clear.render_target_number = 0;
+ };
+ }
+
+ if (tiling->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
+ clear.uif_padded_height_in_uif_blocks = clear_pad;
+ clear.clear_color_high_16_bits = color[3] >> 16;
+ clear.render_target_number = 0;
+ };
+ }
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
+ rt.render_target_0_internal_bpp = tiling->internal_bpp;
+ rt.render_target_0_internal_type = fb->internal_type;
+ rt.render_target_0_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
+ }
+
+ cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
+ clear.z_clear_value = clear_info ? clear_info->clear_value->z : 1.0f;
+ clear.stencil_clear_value = clear_info ? clear_info->clear_value->s : 0;
+ };
+
+ cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
+ init.use_auto_chained_tile_lists = true;
+ init.size_of_first_block_in_chained_tile_lists =
+ TILE_ALLOCATION_BLOCK_SIZE_64B;
+ }
+
+ return rcl;
+}
+
+static void
+emit_frame_setup(struct v3dv_job *job,
+ uint32_t layer,
+ const union v3dv_clear_value *clear_value)
+{
+ v3dv_return_if_oom(NULL, job);
+
+ const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
+
+ struct v3dv_cl *rcl = &job->rcl;
+
+ const uint32_t tile_alloc_offset =
+ 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
+ cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
+ list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
+ }
+
+ cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
+ config.number_of_bin_tile_lists = 1;
+ config.total_frame_width_in_tiles = tiling->draw_tiles_x;
+ config.total_frame_height_in_tiles = tiling->draw_tiles_y;
+
+ config.supertile_width_in_tiles = tiling->supertile_width;
+ config.supertile_height_in_tiles = tiling->supertile_height;
+
+ config.total_frame_width_in_supertiles =
+ tiling->frame_width_in_supertiles;
+ config.total_frame_height_in_supertiles =
+ tiling->frame_height_in_supertiles;
+ }
+
+ /* Implement GFXH-1742 workaround. Also, if we are clearing we have to do
+ * it here.
+ */
+ for (int i = 0; i < 2; i++) {
+ cl_emit(rcl, TILE_COORDINATES, coords);
+ cl_emit(rcl, END_OF_LOADS, end);
+ cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = NONE;
+ }
+ if (clear_value && i == 0) {
+ cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
+ clear.clear_z_stencil_buffer = true;
+ clear.clear_all_render_targets = true;
+ }
+ }
+ cl_emit(rcl, END_OF_TILE_MARKER, end);
+ }
+
+ cl_emit(rcl, FLUSH_VCD_CACHE, flush);
+}
+
+static void
+emit_supertile_coordinates(struct v3dv_job *job,
+ struct framebuffer_data *framebuffer)
+{
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl *rcl = &job->rcl;
+
+ const uint32_t min_y = framebuffer->min_y_supertile;
+ const uint32_t max_y = framebuffer->max_y_supertile;
+ const uint32_t min_x = framebuffer->min_x_supertile;
+ const uint32_t max_x = framebuffer->max_x_supertile;
+
+ for (int y = min_y; y <= max_y; y++) {
+ for (int x = min_x; x <= max_x; x++) {
+ cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
+ coords.column_number_in_supertiles = x;
+ coords.row_number_in_supertiles = y;
+ }
+ }
+ }
+}
+
+static void
+emit_linear_load(struct v3dv_cl *cl,
+ uint32_t buffer,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ uint32_t stride,
+ uint32_t format)
+{
+ cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
+ load.buffer_to_load = buffer;
+ load.address = v3dv_cl_address(bo, offset);
+ load.input_image_format = format;
+ load.memory_format = V3D_TILING_RASTER;
+ load.height_in_ub_or_stride = stride;
+ load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+static void
+emit_linear_store(struct v3dv_cl *cl,
+ uint32_t buffer,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ uint32_t stride,
+ bool msaa,
+ uint32_t format)
+{
+ cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = RENDER_TARGET_0;
+ store.address = v3dv_cl_address(bo, offset);
+ store.clear_buffer_being_stored = false;
+ store.output_image_format = format;
+ store.memory_format = V3D_TILING_RASTER;
+ store.height_in_ub_or_stride = stride;
+ store.decimate_mode = msaa ? V3D_DECIMATE_MODE_ALL_SAMPLES :
+ V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+/* This chooses a tile buffer format that is appropriate for the copy operation.
+ * Typically, this is the image render target type, however, if we are copying
+ * depth/stencil to/from a buffer the hardware can't do raster loads/stores, so
+ * we need to load and store to/from a tile color buffer using a compatible
+ * color format.
+ */
+static uint32_t
+choose_tlb_format(struct framebuffer_data *framebuffer,
+ VkImageAspectFlags aspect,
+ bool for_store,
+ bool is_copy_to_buffer,
+ bool is_copy_from_buffer)
+{
+ if (is_copy_to_buffer || is_copy_from_buffer) {
+ switch (framebuffer->vk_format) {
+ case VK_FORMAT_D16_UNORM:
+ return V3D_OUTPUT_IMAGE_FORMAT_R16UI;
+ case VK_FORMAT_D32_SFLOAT:
+ return V3D_OUTPUT_IMAGE_FORMAT_R32F;
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ return V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ /* When storing the stencil aspect of a combined depth/stencil image
+ * to a buffer, the Vulkan spec states that the output buffer must
+ * have packed stencil values, so we choose an R8UI format for our
+ * store outputs. For the load input we still want RGBA8UI since the
+ * source image contains 4 channels (including the 3 channels
+ * containing the 24-bit depth value).
+ *
+ * When loading the stencil aspect of a combined depth/stencil image
+ * from a buffer, we read packed 8-bit stencil values from the buffer
+ * that we need to put into the LSB of the 32-bit format (the R
+ * channel), so we use R8UI. For the store, if we used R8UI then we
+ * would write 8-bit stencil values consecutively over depth channels,
+ * so we need to use RGBA8UI. This will write each stencil value in
+ * its correct position, but will overwrite depth values (channels G
+ * B,A) with undefined values. To fix this, we will have to restore
+ * the depth aspect from the Z tile buffer, which we should pre-load
+ * from the image before the store).
+ */
+ if (aspect & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ return V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
+ } else {
+ assert(aspect & VK_IMAGE_ASPECT_STENCIL_BIT);
+ if (is_copy_to_buffer) {
+ return for_store ? V3D_OUTPUT_IMAGE_FORMAT_R8UI :
+ V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
+ } else {
+ assert(is_copy_from_buffer);
+ return for_store ? V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI :
+ V3D_OUTPUT_IMAGE_FORMAT_R8UI;
+ }
+ }
+ default: /* Color formats */
+ return framebuffer->format->rt_type;
+ break;
+ }
+ } else {
+ return framebuffer->format->rt_type;
+ }
+}
+
+static inline bool
+format_needs_rb_swap(struct v3dv_device *device,
+ VkFormat format)
+{
+ const uint8_t *swizzle = v3dv_get_format_swizzle(device, format);
+ return swizzle[0] == PIPE_SWIZZLE_Z;
+}
+
+static void
+emit_image_load(struct v3dv_device *device,
+ struct v3dv_cl *cl,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_image *image,
+ VkImageAspectFlags aspect,
+ uint32_t layer,
+ uint32_t mip_level,
+ bool is_copy_to_buffer,
+ bool is_copy_from_buffer)
+{
+ uint32_t layer_offset = v3dv_layer_offset(image, mip_level, layer);
+
+ /* For image to/from buffer copies we always load to and store from RT0,
+ * even for depth/stencil aspects, because the hardware can't do raster
+ * stores or loads from/to the depth/stencil tile buffers.
+ */
+ bool load_to_color_tlb = is_copy_to_buffer || is_copy_from_buffer ||
+ aspect == VK_IMAGE_ASPECT_COLOR_BIT;
+
+ const struct v3d_resource_slice *slice = &image->slices[mip_level];
+ cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
+ load.buffer_to_load = load_to_color_tlb ?
+ RENDER_TARGET_0 : v3dX(zs_buffer_from_aspect_bits)(aspect);
+
+ load.address = v3dv_cl_address(image->mem->bo, layer_offset);
+
+ load.input_image_format = choose_tlb_format(framebuffer, aspect, false,
+ is_copy_to_buffer,
+ is_copy_from_buffer);
+ load.memory_format = slice->tiling;
+
+ /* When copying depth/stencil images to a buffer, for D24 formats Vulkan
+ * expects the depth value in the LSB bits of each 32-bit pixel.
+ * Unfortunately, the hardware seems to put the S8/X8 bits there and the
+ * depth bits on the MSB. To work around that we can reverse the channel
+ * order and then swap the R/B channels to get what we want.
+ *
+ * NOTE: reversing and swapping only gets us the behavior we want if the
+ * operations happen in that exact order, which seems to be the case when
+ * done on the tile buffer load operations. On the store, it seems the
+ * order is not the same. The order on the store is probably reversed so
+ * that reversing and swapping on both the load and the store preserves
+ * the original order of the channels in memory.
+ *
+ * Notice that we only need to do this when copying to a buffer, where
+ * depth and stencil aspects are copied as separate regions and
+ * the spec expects them to be tightly packed.
+ */
+ bool needs_rb_swap = false;
+ bool needs_chan_reverse = false;
+ if (is_copy_to_buffer &&
+ (framebuffer->vk_format == VK_FORMAT_X8_D24_UNORM_PACK32 ||
+ (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT &&
+ (aspect & VK_IMAGE_ASPECT_DEPTH_BIT)))) {
+ needs_rb_swap = true;
+ needs_chan_reverse = true;
+ } else if (!is_copy_from_buffer && !is_copy_to_buffer &&
+ (aspect & VK_IMAGE_ASPECT_COLOR_BIT)) {
+ /* This is not a raw data copy (i.e. we are clearing the image),
+ * so we need to make sure we respect the format swizzle.
+ */
+ needs_rb_swap = format_needs_rb_swap(device, framebuffer->vk_format);
+ }
+
+ load.r_b_swap = needs_rb_swap;
+ load.channel_reverse = needs_chan_reverse;
+
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ load.height_in_ub_or_stride =
+ slice->padded_height_of_output_image_in_uif_blocks;
+ } else if (slice->tiling == V3D_TILING_RASTER) {
+ load.height_in_ub_or_stride = slice->stride;
+ }
+
+ if (image->samples > VK_SAMPLE_COUNT_1_BIT)
+ load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
+ else
+ load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+static void
+emit_image_store(struct v3dv_device *device,
+ struct v3dv_cl *cl,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_image *image,
+ VkImageAspectFlags aspect,
+ uint32_t layer,
+ uint32_t mip_level,
+ bool is_copy_to_buffer,
+ bool is_copy_from_buffer,
+ bool is_multisample_resolve)
+{
+ uint32_t layer_offset = v3dv_layer_offset(image, mip_level, layer);
+
+ bool store_from_color_tlb = is_copy_to_buffer || is_copy_from_buffer ||
+ aspect == VK_IMAGE_ASPECT_COLOR_BIT;
+
+ const struct v3d_resource_slice *slice = &image->slices[mip_level];
+ cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
+ store.buffer_to_store = store_from_color_tlb ?
+ RENDER_TARGET_0 : v3dX(zs_buffer_from_aspect_bits)(aspect);
+
+ store.address = v3dv_cl_address(image->mem->bo, layer_offset);
+ store.clear_buffer_being_stored = false;
+
+ /* See rationale in emit_image_load() */
+ bool needs_rb_swap = false;
+ bool needs_chan_reverse = false;
+ if (is_copy_from_buffer &&
+ (framebuffer->vk_format == VK_FORMAT_X8_D24_UNORM_PACK32 ||
+ (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT &&
+ (aspect & VK_IMAGE_ASPECT_DEPTH_BIT)))) {
+ needs_rb_swap = true;
+ needs_chan_reverse = true;
+ } else if (!is_copy_from_buffer && !is_copy_to_buffer &&
+ (aspect & VK_IMAGE_ASPECT_COLOR_BIT)) {
+ needs_rb_swap = format_needs_rb_swap(device, framebuffer->vk_format);
+ }
+
+ store.r_b_swap = needs_rb_swap;
+ store.channel_reverse = needs_chan_reverse;
+
+ store.output_image_format = choose_tlb_format(framebuffer, aspect, true,
+ is_copy_to_buffer,
+ is_copy_from_buffer);
+ store.memory_format = slice->tiling;
+ if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ slice->tiling == V3D_TILING_UIF_XOR) {
+ store.height_in_ub_or_stride =
+ slice->padded_height_of_output_image_in_uif_blocks;
+ } else if (slice->tiling == V3D_TILING_RASTER) {
+ store.height_in_ub_or_stride = slice->stride;
+ }
+
+ if (image->samples > VK_SAMPLE_COUNT_1_BIT)
+ store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
+ else if (is_multisample_resolve)
+ store.decimate_mode = V3D_DECIMATE_MODE_4X;
+ else
+ store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
+ }
+}
+
+static void
+emit_copy_layer_to_buffer_per_tile_list(struct v3dv_job *job,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_buffer *buffer,
+ struct v3dv_image *image,
+ uint32_t layer_offset,
+ const VkBufferImageCopy2KHR *region)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ /* Load image to TLB */
+ assert((image->type != VK_IMAGE_TYPE_3D &&
+ layer_offset < region->imageSubresource.layerCount) ||
+ layer_offset < image->extent.depth);
+
+ const uint32_t image_layer = image->type != VK_IMAGE_TYPE_3D ?
+ region->imageSubresource.baseArrayLayer + layer_offset :
+ region->imageOffset.z + layer_offset;
+
+ emit_image_load(job->device, cl, framebuffer, image,
+ region->imageSubresource.aspectMask,
+ image_layer,
+ region->imageSubresource.mipLevel,
+ true, false);
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ /* Store TLB to buffer */
+ uint32_t width, height;
+ if (region->bufferRowLength == 0)
+ width = region->imageExtent.width;
+ else
+ width = region->bufferRowLength;
+
+ if (region->bufferImageHeight == 0)
+ height = region->imageExtent.height;
+ else
+ height = region->bufferImageHeight;
+
+ /* Handle copy from compressed format */
+ width = DIV_ROUND_UP(width, vk_format_get_blockwidth(image->vk_format));
+ height = DIV_ROUND_UP(height, vk_format_get_blockheight(image->vk_format));
+
+ /* If we are storing stencil from a combined depth/stencil format the
+ * Vulkan spec states that the output buffer must have packed stencil
+ * values, where each stencil value is 1 byte.
+ */
+ uint32_t cpp =
+ region->imageSubresource.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT ?
+ 1 : image->cpp;
+ uint32_t buffer_stride = width * cpp;
+ uint32_t buffer_offset = buffer->mem_offset + region->bufferOffset +
+ height * buffer_stride * layer_offset;
+
+ uint32_t format = choose_tlb_format(framebuffer,
+ region->imageSubresource.aspectMask,
+ true, true, false);
+ bool msaa = image->samples > VK_SAMPLE_COUNT_1_BIT;
+
+ emit_linear_store(cl, RENDER_TARGET_0, buffer->mem->bo,
+ buffer_offset, buffer_stride, msaa, format);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_copy_layer_to_buffer(struct v3dv_job *job,
+ struct v3dv_buffer *buffer,
+ struct v3dv_image *image,
+ struct framebuffer_data *framebuffer,
+ uint32_t layer,
+ const VkBufferImageCopy2KHR *region)
+{
+ emit_frame_setup(job, layer, NULL);
+ emit_copy_layer_to_buffer_per_tile_list(job, framebuffer, buffer,
+ image, layer, region);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_copy_image_to_buffer_rcl)(struct v3dv_job *job,
+ struct v3dv_buffer *buffer,
+ struct v3dv_image *image,
+ struct framebuffer_data *framebuffer,
+ const VkBufferImageCopy2KHR *region)
+{
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
+ v3dv_return_if_oom(NULL, job);
+
+ for (int layer = 0; layer < job->frame_tiling.layers; layer++)
+ emit_copy_layer_to_buffer(job, buffer, image, framebuffer, layer, region);
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+static void
+emit_resolve_image_layer_per_tile_list(struct v3dv_job *job,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ uint32_t layer_offset,
+ const VkImageResolve2KHR *region)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ assert((src->type != VK_IMAGE_TYPE_3D &&
+ layer_offset < region->srcSubresource.layerCount) ||
+ layer_offset < src->extent.depth);
+
+ const uint32_t src_layer = src->type != VK_IMAGE_TYPE_3D ?
+ region->srcSubresource.baseArrayLayer + layer_offset :
+ region->srcOffset.z + layer_offset;
+
+ emit_image_load(job->device, cl, framebuffer, src,
+ region->srcSubresource.aspectMask,
+ src_layer,
+ region->srcSubresource.mipLevel,
+ false, false);
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ assert((dst->type != VK_IMAGE_TYPE_3D &&
+ layer_offset < region->dstSubresource.layerCount) ||
+ layer_offset < dst->extent.depth);
+
+ const uint32_t dst_layer = dst->type != VK_IMAGE_TYPE_3D ?
+ region->dstSubresource.baseArrayLayer + layer_offset :
+ region->dstOffset.z + layer_offset;
+
+ emit_image_store(job->device, cl, framebuffer, dst,
+ region->dstSubresource.aspectMask,
+ dst_layer,
+ region->dstSubresource.mipLevel,
+ false, false, true);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_resolve_image_layer(struct v3dv_job *job,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ struct framebuffer_data *framebuffer,
+ uint32_t layer,
+ const VkImageResolve2KHR *region)
+{
+ emit_frame_setup(job, layer, NULL);
+ emit_resolve_image_layer_per_tile_list(job, framebuffer,
+ dst, src, layer, region);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_resolve_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ struct framebuffer_data *framebuffer,
+ const VkImageResolve2KHR *region)
+{
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
+ v3dv_return_if_oom(NULL, job);
+
+ for (int layer = 0; layer < job->frame_tiling.layers; layer++)
+ emit_resolve_image_layer(job, dst, src, framebuffer, layer, region);
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+static void
+emit_copy_buffer_per_tile_list(struct v3dv_job *job,
+ struct v3dv_bo *dst,
+ struct v3dv_bo *src,
+ uint32_t dst_offset,
+ uint32_t src_offset,
+ uint32_t stride,
+ uint32_t format)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ emit_linear_load(cl, RENDER_TARGET_0, src, src_offset, stride, format);
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ emit_linear_store(cl, RENDER_TARGET_0,
+ dst, dst_offset, stride, false, format);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+void
+v3dX(job_emit_copy_buffer)(struct v3dv_job *job,
+ struct v3dv_bo *dst,
+ struct v3dv_bo *src,
+ uint32_t dst_offset,
+ uint32_t src_offset,
+ struct framebuffer_data *framebuffer,
+ uint32_t format,
+ uint32_t item_size)
+{
+ const uint32_t stride = job->frame_tiling.width * item_size;
+ emit_copy_buffer_per_tile_list(job, dst, src,
+ dst_offset, src_offset,
+ stride, format);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_copy_buffer_rcl)(struct v3dv_job *job,
+ struct v3dv_bo *dst,
+ struct v3dv_bo *src,
+ uint32_t dst_offset,
+ uint32_t src_offset,
+ struct framebuffer_data *framebuffer,
+ uint32_t format,
+ uint32_t item_size)
+{
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
+ v3dv_return_if_oom(NULL, job);
+
+ emit_frame_setup(job, 0, NULL);
+
+ v3dX(job_emit_copy_buffer)(job, dst, src, dst_offset, src_offset,
+ framebuffer, format, item_size);
+
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+static void
+emit_copy_image_layer_per_tile_list(struct v3dv_job *job,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ uint32_t layer_offset,
+ const VkImageCopy2KHR *region)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ assert((src->type != VK_IMAGE_TYPE_3D &&
+ layer_offset < region->srcSubresource.layerCount) ||
+ layer_offset < src->extent.depth);
+
+ const uint32_t src_layer = src->type != VK_IMAGE_TYPE_3D ?
+ region->srcSubresource.baseArrayLayer + layer_offset :
+ region->srcOffset.z + layer_offset;
+
+ emit_image_load(job->device, cl, framebuffer, src,
+ region->srcSubresource.aspectMask,
+ src_layer,
+ region->srcSubresource.mipLevel,
+ false, false);
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ assert((dst->type != VK_IMAGE_TYPE_3D &&
+ layer_offset < region->dstSubresource.layerCount) ||
+ layer_offset < dst->extent.depth);
+
+ const uint32_t dst_layer = dst->type != VK_IMAGE_TYPE_3D ?
+ region->dstSubresource.baseArrayLayer + layer_offset :
+ region->dstOffset.z + layer_offset;
+
+ emit_image_store(job->device, cl, framebuffer, dst,
+ region->dstSubresource.aspectMask,
+ dst_layer,
+ region->dstSubresource.mipLevel,
+ false, false, false);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_copy_image_layer(struct v3dv_job *job,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ struct framebuffer_data *framebuffer,
+ uint32_t layer,
+ const VkImageCopy2KHR *region)
+{
+ emit_frame_setup(job, layer, NULL);
+ emit_copy_image_layer_per_tile_list(job, framebuffer, dst, src, layer, region);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_copy_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ struct framebuffer_data *framebuffer,
+ const VkImageCopy2KHR *region)
+{
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
+ v3dv_return_if_oom(NULL, job);
+
+ for (int layer = 0; layer < job->frame_tiling.layers; layer++)
+ emit_copy_image_layer(job, dst, src, framebuffer, layer, region);
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+void
+v3dX(cmd_buffer_emit_tfu_job)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_image *dst,
+ uint32_t dst_mip_level,
+ uint32_t dst_layer,
+ struct v3dv_image *src,
+ uint32_t src_mip_level,
+ uint32_t src_layer,
+ uint32_t width,
+ uint32_t height,
+ const struct v3dv_format *format)
+{
+ const struct v3d_resource_slice *src_slice = &src->slices[src_mip_level];
+ const struct v3d_resource_slice *dst_slice = &dst->slices[dst_mip_level];
+
+ assert(dst->mem && dst->mem->bo);
+ const struct v3dv_bo *dst_bo = dst->mem->bo;
+
+ assert(src->mem && src->mem->bo);
+ const struct v3dv_bo *src_bo = src->mem->bo;
+
+ struct drm_v3d_submit_tfu tfu = {
+ .ios = (height << 16) | width,
+ .bo_handles = {
+ dst_bo->handle,
+ src_bo->handle != dst_bo->handle ? src_bo->handle : 0
+ },
+ };
+
+ const uint32_t src_offset =
+ src_bo->offset + v3dv_layer_offset(src, src_mip_level, src_layer);
+ tfu.iia |= src_offset;
+
+ uint32_t icfg;
+ if (src_slice->tiling == V3D_TILING_RASTER) {
+ icfg = V3D_TFU_ICFG_FORMAT_RASTER;
+ } else {
+ icfg = V3D_TFU_ICFG_FORMAT_LINEARTILE +
+ (src_slice->tiling - V3D_TILING_LINEARTILE);
+ }
+ tfu.icfg |= icfg << V3D_TFU_ICFG_FORMAT_SHIFT;
+
+ const uint32_t dst_offset =
+ dst_bo->offset + v3dv_layer_offset(dst, dst_mip_level, dst_layer);
+ tfu.ioa |= dst_offset;
+
+ tfu.ioa |= (V3D_TFU_IOA_FORMAT_LINEARTILE +
+ (dst_slice->tiling - V3D_TILING_LINEARTILE)) <<
+ V3D_TFU_IOA_FORMAT_SHIFT;
+ tfu.icfg |= format->tex_type << V3D_TFU_ICFG_TTYPE_SHIFT;
+
+ switch (src_slice->tiling) {
+ case V3D_TILING_UIF_NO_XOR:
+ case V3D_TILING_UIF_XOR:
+ tfu.iis |= src_slice->padded_height / (2 * v3d_utile_height(src->cpp));
+ break;
+ case V3D_TILING_RASTER:
+ tfu.iis |= src_slice->stride / src->cpp;
+ break;
+ default:
+ break;
+ }
+
+ /* If we're writing level 0 (!IOA_DIMTW), then we need to supply the
+ * OPAD field for the destination (how many extra UIF blocks beyond
+ * those necessary to cover the height).
+ */
+ if (dst_slice->tiling == V3D_TILING_UIF_NO_XOR ||
+ dst_slice->tiling == V3D_TILING_UIF_XOR) {
+ uint32_t uif_block_h = 2 * v3d_utile_height(dst->cpp);
+ uint32_t implicit_padded_height = align(height, uif_block_h);
+ uint32_t icfg =
+ (dst_slice->padded_height - implicit_padded_height) / uif_block_h;
+ tfu.icfg |= icfg << V3D_TFU_ICFG_OPAD_SHIFT;
+ }
+
+ v3dv_cmd_buffer_add_tfu_job(cmd_buffer, &tfu);
+}
+
+static void
+emit_clear_image_per_tile_list(struct v3dv_job *job,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_image *image,
+ VkImageAspectFlags aspects,
+ uint32_t layer,
+ uint32_t level)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ emit_image_store(job->device, cl, framebuffer, image, aspects,
+ layer, level, false, false, false);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_clear_image(struct v3dv_job *job,
+ struct v3dv_image *image,
+ struct framebuffer_data *framebuffer,
+ VkImageAspectFlags aspects,
+ uint32_t layer,
+ uint32_t level)
+{
+ emit_clear_image_per_tile_list(job, framebuffer, image, aspects, layer, level);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_clear_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *image,
+ struct framebuffer_data *framebuffer,
+ const union v3dv_clear_value *clear_value,
+ VkImageAspectFlags aspects,
+ uint32_t layer,
+ uint32_t level)
+{
+ const struct rcl_clear_info clear_info = {
+ .clear_value = clear_value,
+ .image = image,
+ .aspects = aspects,
+ .layer = layer,
+ .level = level,
+ };
+
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, &clear_info);
+ v3dv_return_if_oom(NULL, job);
+
+ emit_frame_setup(job, 0, clear_value);
+ emit_clear_image(job, image, framebuffer, aspects, layer, level);
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+static void
+emit_fill_buffer_per_tile_list(struct v3dv_job *job,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ uint32_t stride)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ emit_linear_store(cl, RENDER_TARGET_0, bo, offset, stride, false,
+ V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI);
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_fill_buffer(struct v3dv_job *job,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ struct framebuffer_data *framebuffer)
+{
+ const uint32_t stride = job->frame_tiling.width * 4;
+ emit_fill_buffer_per_tile_list(job, bo, offset, stride);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_fill_buffer_rcl)(struct v3dv_job *job,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ struct framebuffer_data *framebuffer,
+ uint32_t data)
+{
+ const union v3dv_clear_value clear_value = {
+ .color = { data, 0, 0, 0 },
+ };
+
+ const struct rcl_clear_info clear_info = {
+ .clear_value = &clear_value,
+ .image = NULL,
+ .aspects = VK_IMAGE_ASPECT_COLOR_BIT,
+ .layer = 0,
+ .level = 0,
+ };
+
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, &clear_info);
+ v3dv_return_if_oom(NULL, job);
+
+ emit_frame_setup(job, 0, &clear_value);
+ emit_fill_buffer(job, bo, offset, framebuffer);
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+
+static void
+emit_copy_buffer_to_layer_per_tile_list(struct v3dv_job *job,
+ struct framebuffer_data *framebuffer,
+ struct v3dv_image *image,
+ struct v3dv_buffer *buffer,
+ uint32_t layer,
+ const VkBufferImageCopy2KHR *region)
+{
+ struct v3dv_cl *cl = &job->indirect;
+ v3dv_cl_ensure_space(cl, 200, 1);
+ v3dv_return_if_oom(NULL, job);
+
+ struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
+
+ cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
+
+ const VkImageSubresourceLayers *imgrsc = &region->imageSubresource;
+ assert((image->type != VK_IMAGE_TYPE_3D && layer < imgrsc->layerCount) ||
+ layer < image->extent.depth);
+
+ /* Load TLB from buffer */
+ uint32_t width, height;
+ if (region->bufferRowLength == 0)
+ width = region->imageExtent.width;
+ else
+ width = region->bufferRowLength;
+
+ if (region->bufferImageHeight == 0)
+ height = region->imageExtent.height;
+ else
+ height = region->bufferImageHeight;
+
+ /* Handle copy to compressed format using a compatible format */
+ width = DIV_ROUND_UP(width, vk_format_get_blockwidth(image->vk_format));
+ height = DIV_ROUND_UP(height, vk_format_get_blockheight(image->vk_format));
+
+ uint32_t cpp = imgrsc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT ?
+ 1 : image->cpp;
+ uint32_t buffer_stride = width * cpp;
+ uint32_t buffer_offset =
+ buffer->mem_offset + region->bufferOffset + height * buffer_stride * layer;
+
+ uint32_t format = choose_tlb_format(framebuffer, imgrsc->aspectMask,
+ false, false, true);
+
+ emit_linear_load(cl, RENDER_TARGET_0, buffer->mem->bo,
+ buffer_offset, buffer_stride, format);
+
+ /* Because we can't do raster loads/stores of Z/S formats we need to
+ * use a color tile buffer with a compatible RGBA color format instead.
+ * However, when we are uploading a single aspect to a combined
+ * depth/stencil image we have the problem that our tile buffer stores don't
+ * allow us to mask out the other aspect, so we always write all four RGBA
+ * channels to the image and we end up overwriting that other aspect with
+ * undefined values. To work around that, we first load the aspect we are
+ * not copying from the image memory into a proper Z/S tile buffer. Then we
+ * do our store from the color buffer for the aspect we are copying, and
+ * after that, we do another store from the Z/S tile buffer to restore the
+ * other aspect to its original value.
+ */
+ if (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
+ if (imgrsc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ emit_image_load(job->device, cl, framebuffer, image,
+ VK_IMAGE_ASPECT_STENCIL_BIT,
+ imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
+ false, false);
+ } else {
+ assert(imgrsc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
+ emit_image_load(job->device, cl, framebuffer, image,
+ VK_IMAGE_ASPECT_DEPTH_BIT,
+ imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
+ false, false);
+ }
+ }
+
+ cl_emit(cl, END_OF_LOADS, end);
+
+ cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
+
+ /* Store TLB to image */
+ emit_image_store(job->device, cl, framebuffer, image, imgrsc->aspectMask,
+ imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
+ false, true, false);
+
+ if (framebuffer->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
+ if (imgrsc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ emit_image_store(job->device, cl, framebuffer, image,
+ VK_IMAGE_ASPECT_STENCIL_BIT,
+ imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
+ false, false, false);
+ } else {
+ assert(imgrsc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
+ emit_image_store(job->device, cl, framebuffer, image,
+ VK_IMAGE_ASPECT_DEPTH_BIT,
+ imgrsc->baseArrayLayer + layer, imgrsc->mipLevel,
+ false, false, false);
+ }
+ }
+
+ cl_emit(cl, END_OF_TILE_MARKER, end);
+
+ cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
+
+ cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
+ branch.start = tile_list_start;
+ branch.end = v3dv_cl_get_address(cl);
+ }
+}
+
+static void
+emit_copy_buffer_to_layer(struct v3dv_job *job,
+ struct v3dv_image *image,
+ struct v3dv_buffer *buffer,
+ struct framebuffer_data *framebuffer,
+ uint32_t layer,
+ const VkBufferImageCopy2KHR *region)
+{
+ emit_frame_setup(job, layer, NULL);
+ emit_copy_buffer_to_layer_per_tile_list(job, framebuffer, image, buffer,
+ layer, region);
+ emit_supertile_coordinates(job, framebuffer);
+}
+
+void
+v3dX(job_emit_copy_buffer_to_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *image,
+ struct v3dv_buffer *buffer,
+ struct framebuffer_data *framebuffer,
+ const VkBufferImageCopy2KHR *region)
+{
+ struct v3dv_cl *rcl = emit_rcl_prologue(job, framebuffer, NULL);
+ v3dv_return_if_oom(NULL, job);
+
+ for (int layer = 0; layer < job->frame_tiling.layers; layer++)
+ emit_copy_buffer_to_layer(job, image, buffer, framebuffer, layer, region);
+ cl_emit(rcl, END_OF_RENDERING, end);
+}
+
+/* Figure out a TLB size configuration for a number of pixels to process.
+ * Beware that we can't "render" more than 4096x4096 pixels in a single job,
+ * if the pixel count is larger than this, the caller might need to split
+ * the job and call this function multiple times.
+ */
+static void
+framebuffer_size_for_pixel_count(uint32_t num_pixels,
+ uint32_t *width,
+ uint32_t *height)
+{
+ assert(num_pixels > 0);
+
+ const uint32_t max_dim_pixels = 4096;
+ const uint32_t max_pixels = max_dim_pixels * max_dim_pixels;
+
+ uint32_t w, h;
+ if (num_pixels > max_pixels) {
+ w = max_dim_pixels;
+ h = max_dim_pixels;
+ } else {
+ w = num_pixels;
+ h = 1;
+ while (w > max_dim_pixels || ((w % 2) == 0 && w > 2 * h)) {
+ w >>= 1;
+ h <<= 1;
+ }
+ }
+ assert(w <= max_dim_pixels && h <= max_dim_pixels);
+ assert(w * h <= num_pixels);
+ assert(w > 0 && h > 0);
+
+ *width = w;
+ *height = h;
+}
+
+struct v3dv_job *
+v3dX(cmd_buffer_copy_buffer)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_bo *dst,
+ uint32_t dst_offset,
+ struct v3dv_bo *src,
+ uint32_t src_offset,
+ const VkBufferCopy2KHR *region)
+{
+ const uint32_t internal_bpp = V3D_INTERNAL_BPP_32;
+ const uint32_t internal_type = V3D_INTERNAL_TYPE_8UI;
+
+ /* Select appropriate pixel format for the copy operation based on the
+ * size to copy and the alignment of the source and destination offsets.
+ */
+ src_offset += region->srcOffset;
+ dst_offset += region->dstOffset;
+ uint32_t item_size = 4;
+ while (item_size > 1 &&
+ (src_offset % item_size != 0 || dst_offset % item_size != 0)) {
+ item_size /= 2;
+ }
+
+ while (item_size > 1 && region->size % item_size != 0)
+ item_size /= 2;
+
+ assert(region->size % item_size == 0);
+ uint32_t num_items = region->size / item_size;
+ assert(num_items > 0);
+
+ uint32_t format;
+ VkFormat vk_format;
+ switch (item_size) {
+ case 4:
+ format = V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI;
+ vk_format = VK_FORMAT_R8G8B8A8_UINT;
+ break;
+ case 2:
+ format = V3D_OUTPUT_IMAGE_FORMAT_RG8UI;
+ vk_format = VK_FORMAT_R8G8_UINT;
+ break;
+ default:
+ format = V3D_OUTPUT_IMAGE_FORMAT_R8UI;
+ vk_format = VK_FORMAT_R8_UINT;
+ break;
+ }
+
+ struct v3dv_job *job = NULL;
+ while (num_items > 0) {
+ job = v3dv_cmd_buffer_start_job(cmd_buffer, -1, V3DV_JOB_TYPE_GPU_CL);
+ if (!job)
+ return NULL;
+
+ uint32_t width, height;
+ framebuffer_size_for_pixel_count(num_items, &width, &height);
+
+ v3dv_job_start_frame(job, width, height, 1, 1, internal_bpp, false);
+
+ struct framebuffer_data framebuffer;
+ v3dX(setup_framebuffer_data)(&framebuffer, vk_format, internal_type,
+ &job->frame_tiling);
+
+ v3dX(job_emit_binning_flush)(job);
+
+ v3dX(job_emit_copy_buffer_rcl)(job, dst, src, dst_offset, src_offset,
+ &framebuffer, format, item_size);
+
+ v3dv_cmd_buffer_finish_job(cmd_buffer);
+
+ const uint32_t items_copied = width * height;
+ const uint32_t bytes_copied = items_copied * item_size;
+ num_items -= items_copied;
+ src_offset += bytes_copied;
+ dst_offset += bytes_copied;
+ }
+
+ return job;
+}
+
+void
+v3dX(cmd_buffer_fill_buffer)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data)
+{
+ assert(size > 0 && size % 4 == 0);
+ assert(offset + size <= bo->size);
+
+ const uint32_t internal_bpp = V3D_INTERNAL_BPP_32;
+ const uint32_t internal_type = V3D_INTERNAL_TYPE_8UI;
+ uint32_t num_items = size / 4;
+
+ while (num_items > 0) {
+ struct v3dv_job *job =
+ v3dv_cmd_buffer_start_job(cmd_buffer, -1, V3DV_JOB_TYPE_GPU_CL);
+ if (!job)
+ return;
+
+ uint32_t width, height;
+ framebuffer_size_for_pixel_count(num_items, &width, &height);
+
+ v3dv_job_start_frame(job, width, height, 1, 1, internal_bpp, false);
+
+ struct framebuffer_data framebuffer;
+ v3dX(setup_framebuffer_data)(&framebuffer, VK_FORMAT_R8G8B8A8_UINT,
+ internal_type, &job->frame_tiling);
+
+ v3dX(job_emit_binning_flush)(job);
+
+ v3dX(job_emit_fill_buffer_rcl)(job, bo, offset, &framebuffer, data);
+
+ v3dv_cmd_buffer_finish_job(cmd_buffer);
+
+ const uint32_t items_copied = width * height;
+ const uint32_t bytes_copied = items_copied * 4;
+ num_items -= items_copied;
+ offset += bytes_copied;
+ }
+}
+
+void
+v3dX(setup_framebuffer_data)(struct framebuffer_data *fb,
+ VkFormat vk_format,
+ uint32_t internal_type,
+ const struct v3dv_frame_tiling *tiling)
+{
+ fb->internal_type = internal_type;
+
+ /* Supertile coverage always starts at 0,0 */
+ uint32_t supertile_w_in_pixels =
+ tiling->tile_width * tiling->supertile_width;
+ uint32_t supertile_h_in_pixels =
+ tiling->tile_height * tiling->supertile_height;
+
+ fb->min_x_supertile = 0;
+ fb->min_y_supertile = 0;
+ fb->max_x_supertile = (tiling->width - 1) / supertile_w_in_pixels;
+ fb->max_y_supertile = (tiling->height - 1) / supertile_h_in_pixels;
+
+ fb->vk_format = vk_format;
+ fb->format = v3dX(get_format)(vk_format);
+
+ fb->internal_depth_type = V3D_INTERNAL_TYPE_DEPTH_32F;
+ if (vk_format_is_depth_or_stencil(vk_format))
+ fb->internal_depth_type = v3dX(get_internal_depth_type)(vk_format);
+}
diff --git a/src/broadcom/vulkan/v3dvx_private.h b/src/broadcom/vulkan/v3dvx_private.h
index 2de8996903c..e689b4d5eb8 100644
--- a/src/broadcom/vulkan/v3dvx_private.h
+++ b/src/broadcom/vulkan/v3dvx_private.h
@@ -29,6 +29,103 @@
#error This file is included by means other than v3dv_private.h
#endif
+/* Used at v3dv_cmd_buffer */
+void
+v3dX(job_emit_binning_flush)(struct v3dv_job *job);
+
+void
+v3dX(cmd_buffer_end_render_pass_secondary)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(job_emit_clip_window)(struct v3dv_job *job, const VkRect2D *rect);
+
+void
+v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_viewport)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_stencil)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_depth_bias)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_line_width)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_sample_state)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_blend)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_varyings_state)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_configuration_bits)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(job_emit_binning_prolog)(struct v3dv_job *job,
+ const struct v3dv_frame_tiling *tiling,
+ uint32_t layers);
+
+void
+v3dX(cmd_buffer_execute_inside_pass)(struct v3dv_cmd_buffer *primary,
+ uint32_t cmd_buffer_count,
+ const VkCommandBuffer *cmd_buffers);
+
+void
+v3dX(cmd_buffer_emit_occlusion_query)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_gl_shader_state)(struct v3dv_cmd_buffer *cmd_buffer);
+
+
+void
+v3dX(cmd_buffer_emit_draw)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_draw_info *info);
+
+
+void
+v3dX(cmd_buffer_emit_index_buffer)(struct v3dv_cmd_buffer *cmd_buffer);
+
+void
+v3dX(cmd_buffer_emit_draw_indexed)(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance);
+
+void
+v3dX(cmd_buffer_emit_draw_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_buffer *buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+void
+v3dX(cmd_buffer_emit_indexed_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_buffer *buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+void
+v3dX(get_hw_clear_color)(const VkClearColorValue *color,
+ uint32_t internal_type,
+ uint32_t internal_size,
+ uint32_t *hw_color);
+
+void
+v3dX(cmd_buffer_render_pass_setup_render_target)(struct v3dv_cmd_buffer *cmd_buffer,
+ int rt,
+ uint32_t *rt_bpp,
+ uint32_t *rt_type,
+ uint32_t *rt_clamp);
+
/* Used at v3dv_device */
void
@@ -68,6 +165,128 @@ void
v3dX(pack_texture_shader_state_from_buffer_view)(struct v3dv_device *device,
struct v3dv_buffer_view *buffer_view);
+/* Used at v3dv_meta_clear */
+void
+v3dX(cmd_buffer_emit_tlb_clear)(struct v3dv_cmd_buffer *cmd_buffer,
+ uint32_t attachment_count,
+ const VkClearAttachment *attachments,
+ uint32_t base_layer,
+ uint32_t layer_count);
+
+uint32_t
+v3dX(zs_buffer_from_aspect_bits)(VkImageAspectFlags aspects);
+
+uint8_t
+v3dX(get_internal_depth_type)(VkFormat format);
+
+
+/* Used at v3dv_meta_copy */
+struct framebuffer_data;
+
+void
+v3dX(job_emit_copy_image_to_buffer_rcl)(struct v3dv_job *job,
+ struct v3dv_buffer *buffer,
+ struct v3dv_image *image,
+ struct framebuffer_data *framebuffer,
+ const VkBufferImageCopy2KHR *region);
+
+void
+v3dX(job_emit_resolve_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ struct framebuffer_data *framebuffer,
+ const VkImageResolve2KHR *region);
+
+
+void
+v3dX(job_emit_copy_buffer)(struct v3dv_job *job,
+ struct v3dv_bo *dst,
+ struct v3dv_bo *src,
+ uint32_t dst_offset,
+ uint32_t src_offset,
+ struct framebuffer_data *framebuffer,
+ uint32_t format,
+ uint32_t item_size);
+
+void
+v3dX(job_emit_copy_buffer_rcl)(struct v3dv_job *job,
+ struct v3dv_bo *dst,
+ struct v3dv_bo *src,
+ uint32_t dst_offset,
+ uint32_t src_offset,
+ struct framebuffer_data *framebuffer,
+ uint32_t format,
+ uint32_t item_size);
+
+void
+v3dX(job_emit_copy_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *dst,
+ struct v3dv_image *src,
+ struct framebuffer_data *framebuffer,
+ const VkImageCopy2KHR *region);
+
+void
+v3dX(cmd_buffer_emit_tfu_job)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_image *dst,
+ uint32_t dst_mip_level,
+ uint32_t dst_layer,
+ struct v3dv_image *src,
+ uint32_t src_mip_level,
+ uint32_t src_layer,
+ uint32_t width,
+ uint32_t height,
+ const struct v3dv_format *format);
+
+void
+v3dX(job_emit_clear_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *image,
+ struct framebuffer_data *framebuffer,
+ const union v3dv_clear_value *clear_value,
+ VkImageAspectFlags aspects,
+ uint32_t layer,
+ uint32_t level);
+
+void
+v3dX(job_emit_fill_buffer_rcl)(struct v3dv_job *job,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ struct framebuffer_data *framebuffer,
+ uint32_t data);
+
+void
+v3dX(job_emit_copy_buffer_to_image_rcl)(struct v3dv_job *job,
+ struct v3dv_image *image,
+ struct v3dv_buffer *buffer,
+ struct framebuffer_data *framebuffer,
+ const VkBufferImageCopy2KHR *region);
+
+void
+v3dX(get_internal_type_bpp_for_image_aspects)(VkFormat vk_format,
+ VkImageAspectFlags aspect_mask,
+ uint32_t *internal_type,
+ uint32_t *internal_bpp);
+
+struct v3dv_job *
+v3dX(cmd_buffer_copy_buffer)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_bo *dst,
+ uint32_t dst_offset,
+ struct v3dv_bo *src,
+ uint32_t src_offset,
+ const VkBufferCopy2KHR *region);
+
+void
+v3dX(cmd_buffer_fill_buffer)(struct v3dv_cmd_buffer *cmd_buffer,
+ struct v3dv_bo *bo,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data);
+
+void
+v3dX(setup_framebuffer_data)(struct framebuffer_data *fb,
+ VkFormat vk_format,
+ uint32_t internal_type,
+ const struct v3dv_frame_tiling *tiling);
+
/* Used at v3dv_pipeline */
void
v3dX(pipeline_pack_state)(struct v3dv_pipeline *pipeline,
diff --git a/src/broadcom/vulkan/v3dvx_queue.c b/src/broadcom/vulkan/v3dvx_queue.c
index 3d082291b16..4a70141bfca 100644
--- a/src/broadcom/vulkan/v3dvx_queue.c
+++ b/src/broadcom/vulkan/v3dvx_queue.c
@@ -30,7 +30,7 @@ void
v3dX(job_emit_noop)(struct v3dv_job *job)
{
v3dv_job_start_frame(job, 1, 1, 1, 1, V3D_INTERNAL_BPP_32, false);
- v3dv_job_emit_binning_flush(job);
+ v3dX(job_emit_binning_flush)(job);
struct v3dv_cl *rcl = &job->rcl;
v3dv_cl_ensure_space_with_branch(rcl, 200 + 1 * 256 *