summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mesa/drivers/dri/i965/brw_blorp_clear.cpp119
1 files changed, 66 insertions, 53 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_blorp_clear.cpp b/src/mesa/drivers/dri/i965/brw_blorp_clear.cpp
index d1933ced754..c7f485eb47f 100644
--- a/src/mesa/drivers/dri/i965/brw_blorp_clear.cpp
+++ b/src/mesa/drivers/dri/i965/brw_blorp_clear.cpp
@@ -439,13 +439,75 @@ brw_blorp_const_color_program::compile(struct brw_context *brw,
return brw_get_program(&func, program_size);
}
-extern "C" {
+
bool
-brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
+do_single_blorp_clear(struct brw_context *brw, struct gl_framebuffer *fb,
+ struct gl_renderbuffer *rb, unsigned buf,
bool partial_clear)
{
struct gl_context *ctx = &brw->ctx;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ brw_blorp_clear_params params(brw, fb, rb, ctx->Color.ColorMask[buf],
+ partial_clear);
+
+ bool is_fast_clear =
+ (params.fast_clear_op == GEN7_FAST_CLEAR_OP_FAST_CLEAR);
+ if (is_fast_clear) {
+ /* Record the clear color in the miptree so that it will be
+ * programmed in SURFACE_STATE by later rendering and resolve
+ * operations.
+ */
+ uint32_t new_color_value =
+ compute_fast_clear_color_bits(&ctx->Color.ClearColor);
+ if (irb->mt->fast_clear_color_value != new_color_value) {
+ irb->mt->fast_clear_color_value = new_color_value;
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ }
+
+ /* If the buffer is already in INTEL_MCS_STATE_CLEAR, the clear is
+ * redundant and can be skipped.
+ */
+ if (irb->mt->mcs_state == INTEL_MCS_STATE_CLEAR)
+ return true;
+
+ /* If the MCS buffer hasn't been allocated yet, we need to allocate
+ * it now.
+ */
+ if (!irb->mt->mcs_mt) {
+ if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
+ /* MCS allocation failed--probably this will only happen in
+ * out-of-memory conditions. But in any case, try to recover
+ * by falling back to a non-blorp clear technique.
+ */
+ return false;
+ }
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ }
+ }
+
+ DBG("%s to mt %p level %d layer %d\n", __FUNCTION__,
+ irb->mt, irb->mt_level, irb->mt_layer);
+
+ brw_blorp_exec(brw, &params);
+
+ if (is_fast_clear) {
+ /* Now that the fast clear has occurred, put the buffer in
+ * INTEL_MCS_STATE_CLEAR so that we won't waste time doing redundant
+ * clears.
+ */
+ irb->mt->mcs_state = INTEL_MCS_STATE_CLEAR;
+ }
+
+ return true;
+}
+
+extern "C" {
+bool
+brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
+ bool partial_clear)
+{
/* The constant color clear code doesn't work for multisampled surfaces, so
* we need to support falling back to other clear mechanisms.
* Unfortunately, our clear code is based on a bitmask that doesn't
@@ -463,7 +525,6 @@ brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
for (unsigned buf = 0; buf < fb->_NumColorDrawBuffers; buf++) {
struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[buf];
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
/* If this is an ES2 context or GL_ARB_ES2_compatibility is supported,
* the framebuffer can be complete with some attachments missing. In
@@ -472,56 +533,8 @@ brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
if (rb == NULL)
continue;
- brw_blorp_clear_params params(brw, fb, rb, ctx->Color.ColorMask[buf],
- partial_clear);
-
- bool is_fast_clear =
- (params.fast_clear_op == GEN7_FAST_CLEAR_OP_FAST_CLEAR);
- if (is_fast_clear) {
- /* Record the clear color in the miptree so that it will be
- * programmed in SURFACE_STATE by later rendering and resolve
- * operations.
- */
- uint32_t new_color_value =
- compute_fast_clear_color_bits(&ctx->Color.ClearColor);
- if (irb->mt->fast_clear_color_value != new_color_value) {
- irb->mt->fast_clear_color_value = new_color_value;
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
- }
-
- /* If the buffer is already in INTEL_MCS_STATE_CLEAR, the clear is
- * redundant and can be skipped.
- */
- if (irb->mt->mcs_state == INTEL_MCS_STATE_CLEAR)
- continue;
-
- /* If the MCS buffer hasn't been allocated yet, we need to allocate
- * it now.
- */
- if (!irb->mt->mcs_mt) {
- if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
- /* MCS allocation failed--probably this will only happen in
- * out-of-memory conditions. But in any case, try to recover
- * by falling back to a non-blorp clear technique.
- */
- return false;
- }
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
- }
- }
-
- DBG("%s to mt %p level %d layer %d\n", __FUNCTION__,
- irb->mt, irb->mt_level, irb->mt_layer);
-
- brw_blorp_exec(brw, &params);
-
- if (is_fast_clear) {
- /* Now that the fast clear has occurred, put the buffer in
- * INTEL_MCS_STATE_CLEAR so that we won't waste time doing redundant
- * clears.
- */
- irb->mt->mcs_state = INTEL_MCS_STATE_CLEAR;
- }
+ if (!do_single_blorp_clear(brw, fb, rb, buf, partial_clear))
+ return false;
}
return true;