summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Nicholls <mnicholls@feralinteractive.com>2018-01-29 16:26:18 +0000
committerEmil Velikov <emil.l.velikov@gmail.com>2018-02-09 04:23:17 +0000
commit69beac3f38566aea5e486a8228911ae4e05bb8d7 (patch)
tree6c38475beb244572b32c404bc989dbbc17ddcfde
parentda327c6ce6645f2e7057ce5a5c6626f6ca83d37f (diff)
radv: remove predication on cache flushes
This can lead to a situation where cache flushes could get conditionally disabled while still clearing the flush_bits, and thus flushes due to application pipeline barriers may never get executed. Fixes: a6c2001ace (radv: add support for cmd predication.) Signed-off-by: Dave Airlie <airlied@redhat.com> (cherry picked from commit ef272b161e05e8216f2d1f4df5023f3aed0ae4fa) [Emil Velikov: trivial conflicts] Signed-off-by: Emil Velikov <emil.velikov@collabora.com> Conflicts: src/amd/vulkan/radv_cmd_buffer.c
-rw-r--r--src/amd/vulkan/radv_cmd_buffer.c2
-rw-r--r--src/amd/vulkan/radv_device.c2
-rw-r--r--src/amd/vulkan/radv_private.h1
-rw-r--r--src/amd/vulkan/si_cmd_buffer.c26
4 files changed, 13 insertions, 18 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index 0a9780b64d5..4facd63e65e 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -380,7 +380,7 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer)
flags = RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
- si_cs_emit_cache_flush(cmd_buffer->cs, false,
+ si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->device->physical_device->rad_info.chip_class,
NULL, 0,
radv_cmd_buffer_uses_mec(cmd_buffer),
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index 6c1f21be5d8..6140a2fd31e 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -1690,7 +1690,6 @@ radv_get_preamble_cs(struct radv_queue *queue,
if (i == 0) {
si_cs_emit_cache_flush(cs,
- false,
queue->device->physical_device->rad_info.chip_class,
NULL, 0,
queue->queue_family_index == RING_COMPUTE &&
@@ -1702,7 +1701,6 @@ radv_get_preamble_cs(struct radv_queue *queue,
RADV_CMD_FLAG_INV_GLOBAL_L2);
} else if (i == 1) {
si_cs_emit_cache_flush(cs,
- false,
queue->device->physical_device->rad_info.chip_class,
NULL, 0,
queue->queue_family_index == RING_COMPUTE &&
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index ae799e2c829..f07ec28df68 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -918,7 +918,6 @@ void si_emit_wait_fence(struct radeon_winsys_cs *cs,
uint64_t va, uint32_t ref,
uint32_t mask);
void si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
- bool predicated,
enum chip_class chip_class,
uint32_t *fence_ptr, uint64_t va,
bool is_mec,
diff --git a/src/amd/vulkan/si_cmd_buffer.c b/src/amd/vulkan/si_cmd_buffer.c
index b312d526bb2..0abaeac79d3 100644
--- a/src/amd/vulkan/si_cmd_buffer.c
+++ b/src/amd/vulkan/si_cmd_buffer.c
@@ -919,7 +919,6 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs,
void
si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
- bool predicated,
enum chip_class chip_class,
uint32_t *flush_cnt,
uint64_t flush_va,
@@ -950,7 +949,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
/* Necessary for DCC */
if (chip_class >= VI) {
si_cs_emit_write_event_eop(cs,
- predicated,
+ false,
chip_class,
is_mec,
V_028A90_FLUSH_AND_INV_CB_DATA_TS,
@@ -964,12 +963,12 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
}
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
}
@@ -982,7 +981,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
}
if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
@@ -1036,14 +1035,14 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
assert(flush_cnt);
uint32_t old_fence = (*flush_cnt)++;
- si_cs_emit_write_event_eop(cs, predicated, chip_class, false, cb_db_event, tc_flags, 1,
+ si_cs_emit_write_event_eop(cs, false, chip_class, false, cb_db_event, tc_flags, 1,
flush_va, old_fence, *flush_cnt);
- si_emit_wait_fence(cs, predicated, flush_va, *flush_cnt, 0xffffffff);
+ si_emit_wait_fence(cs, false, flush_va, *flush_cnt, 0xffffffff);
}
/* VGT state sync */
if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
@@ -1056,13 +1055,13 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
RADV_CMD_FLAG_INV_GLOBAL_L2 |
RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) &&
!is_mec) {
- radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cs, 0);
}
if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
(chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
- si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9,
+ si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9,
cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
@@ -1076,7 +1075,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
*
* WB doesn't work without NC.
*/
- si_emit_acquire_mem(cs, is_mec, predicated,
+ si_emit_acquire_mem(cs, is_mec, false,
chip_class >= GFX9,
cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
@@ -1085,7 +1084,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
}
if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
si_emit_acquire_mem(cs, is_mec,
- predicated, chip_class >= GFX9,
+ false, chip_class >= GFX9,
cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
@@ -1096,7 +1095,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
* Therefore, it should be last. Done in PFP.
*/
if (cp_coher_cntl)
- si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, cp_coher_cntl);
+ si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl);
}
void
@@ -1126,7 +1125,6 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
ptr = &cmd_buffer->gfx9_fence_idx;
}
si_cs_emit_cache_flush(cmd_buffer->cs,
- cmd_buffer->state.predicating,
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),