diff options
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_context.c')
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_context.c | 86 |
1 files changed, 84 insertions, 2 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c index 937a81eec7e..b5de65236a2 100644 --- a/src/mesa/drivers/dri/i965/brw_context.c +++ b/src/mesa/drivers/dri/i965/brw_context.c @@ -240,13 +240,42 @@ intel_flush_front(struct gl_context *ctx) } static void +brw_display_shared_buffer(struct brw_context *brw) +{ + __DRIcontext *dri_context = brw->driContext; + __DRIdrawable *dri_drawable = dri_context->driDrawablePriv; + __DRIscreen *dri_screen = brw->screen->driScrnPriv; + int fence_fd = -1; + + if (!brw->is_shared_buffer_bound) + return; + + if (!brw->is_shared_buffer_dirty) + return; + + if (brw->screen->has_exec_fence) { + /* This function is always called during a flush operation, so there is + * no need to flush again here. But we want to provide a fence_fd to the + * loader, and a redundant flush is the easiest way to acquire one. + */ + if (intel_batchbuffer_flush_fence(brw, -1, &fence_fd)) + return; + } + + dri_screen->mutableRenderBuffer.loader + ->displaySharedBuffer(dri_drawable, fence_fd, + dri_drawable->loaderPrivate); + brw->is_shared_buffer_dirty = false; +} + +static void intel_glFlush(struct gl_context *ctx) { struct brw_context *brw = brw_context(ctx); intel_batchbuffer_flush(brw); intel_flush_front(ctx); - + brw_display_shared_buffer(brw); brw->need_flush_throttle = true; } @@ -1454,6 +1483,11 @@ intel_prepare_render(struct brw_context *brw) */ if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) brw->front_buffer_dirty = true; + + if (brw->is_shared_buffer_bound) { + /* Subsequent rendering will probably dirty the shared buffer. */ + brw->is_shared_buffer_dirty = true; + } } /** @@ -1687,8 +1721,12 @@ intel_update_image_buffer(struct brw_context *intel, else last_mt = rb->singlesample_mt; - if (last_mt && last_mt->bo == buffer->bo) + if (last_mt && last_mt->bo == buffer->bo) { + if (buffer_type == __DRI_IMAGE_BUFFER_SHARED) { + intel_miptree_make_shareable(intel, last_mt); + } return; + } /* Only allow internal compression if samples == 0. For multisampled * window system buffers, the only thing the single-sampled buffer is used @@ -1717,6 +1755,35 @@ intel_update_image_buffer(struct brw_context *intel, rb->Base.Base.NumSamples > 1) { intel_renderbuffer_upsample(intel, rb); } + + if (buffer_type == __DRI_IMAGE_BUFFER_SHARED) { + /* The compositor and the application may access this image + * concurrently. The display hardware may even scanout the image while + * the GPU is rendering to it. Aux surfaces cause difficulty with + * concurrent access, so permanently disable aux for this miptree. + * + * Perhaps we could improve overall application performance by + * re-enabling the aux surface when EGL_RENDER_BUFFER transitions to + * EGL_BACK_BUFFER, then disabling it again when EGL_RENDER_BUFFER + * returns to EGL_SINGLE_BUFFER. I expect the wins and losses with this + * approach to be highly dependent on the application's GL usage. + * + * I [chadv] expect clever disabling/reenabling to be counterproductive + * in the use cases I care about: applications that render nearly + * realtime handwriting to the surface while possibly undergiong + * simultaneously scanout as a display plane. The app requires low + * render latency. Even though the app spends most of its time in + * shared-buffer mode, it also frequently transitions between + * shared-buffer (EGL_SINGLE_BUFFER) and double-buffer (EGL_BACK_BUFFER) + * mode. Visual sutter during the transitions should be avoided. + * + * In this case, I [chadv] believe reducing the GPU workload at + * shared-buffer/double-buffer transitions would offer a smoother app + * experience than any savings due to aux compression. But I've + * collected no data to prove my theory. + */ + intel_miptree_make_shareable(intel, mt); + } } static void @@ -1777,4 +1844,19 @@ intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable) images.back, __DRI_IMAGE_BUFFER_BACK); } + + if (images.image_mask & __DRI_IMAGE_BUFFER_SHARED) { + assert(images.image_mask == __DRI_IMAGE_BUFFER_SHARED); + drawable->w = images.back->width; + drawable->h = images.back->height; + intel_update_image_buffer(brw, + drawable, + back_rb, + images.back, + __DRI_IMAGE_BUFFER_SHARED); + brw->is_shared_buffer_bound = true; + } else { + brw->is_shared_buffer_bound = false; + brw->is_shared_buffer_dirty = false; + } } |