summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/i965/brw_context.c
diff options
context:
space:
mode:
authorKenneth Graunke <kenneth@whitecape.org>2021-02-22 10:35:03 -0800
committerMarge Bot <eric+marge@anholt.net>2021-02-25 21:03:48 +0000
commit462c9e173c18bff25e687a534a3863d12f545c9c (patch)
tree615d5afdfdb71e6b0b7e7be58b4b9c24f13b7179 /src/mesa/drivers/dri/i965/brw_context.c
parenta56f4f2b4aee0d77ab9c1875138f0e033836a318 (diff)
i965: Rename intel_batchbuffer_* to brw_batch_*.
Shorter, matching the convention in iris, and drops use of "intel_" on i965-specific code that isn't shared. Acked-by: Jason Ekstrand <jason@jlekstrand.net> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9207>
Diffstat (limited to 'src/mesa/drivers/dri/i965/brw_context.c')
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index be26d4f5004..93f51a7fe20 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -237,7 +237,7 @@ intel_flush_front(struct gl_context *ctx)
* performance.
*/
intel_resolve_for_dri2_flush(brw, driDrawable);
- intel_batchbuffer_flush(brw);
+ brw_batch_flush(brw);
flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
@@ -268,7 +268,7 @@ brw_display_shared_buffer(struct brw_context *brw)
* no need to flush again here. But we want to provide a fence_fd to the
* loader, and a redundant flush is the easiest way to acquire one.
*/
- if (intel_batchbuffer_flush_fence(brw, -1, &fence_fd))
+ if (brw_batch_flush_fence(brw, -1, &fence_fd))
return;
}
@@ -283,7 +283,7 @@ intel_glFlush(struct gl_context *ctx)
{
struct brw_context *brw = brw_context(ctx);
- intel_batchbuffer_flush(brw);
+ brw_batch_flush(brw);
intel_flush_front(ctx);
brw_display_shared_buffer(brw);
brw->need_flush_throttle = true;
@@ -297,8 +297,8 @@ intel_glEnable(struct gl_context *ctx, GLenum cap, GLboolean state)
switch (cap) {
case GL_BLACKHOLE_RENDER_INTEL:
brw->frontend_noop = state;
- intel_batchbuffer_flush(brw);
- intel_batchbuffer_maybe_noop(brw);
+ brw_batch_flush(brw);
+ brw_batch_maybe_noop(brw);
/* Because we started previous batches with a potential
* MI_BATCH_BUFFER_END if NOOP was enabled, that means that anything
* that was ever emitted after that never made it to the HW. So when the
@@ -1057,7 +1057,7 @@ brwCreateContext(gl_api api,
intel_fbo_init(brw);
- intel_batchbuffer_init(brw);
+ brw_batch_init(brw);
/* Create a new hardware context. Using a hardware context means that
* our GPU state will be saved/restored on context switch, allowing us
@@ -1259,7 +1259,7 @@ intelDestroyContext(__DRIcontext * driContextPriv)
_swrast_DestroyContext(&brw->ctx);
brw_fini_pipe_control(brw);
- intel_batchbuffer_free(&brw->batch);
+ brw_batch_free(&brw->batch);
brw_bo_unreference(brw->throttle_batch[1]);
brw_bo_unreference(brw->throttle_batch[0]);
@@ -1628,7 +1628,7 @@ intel_query_dri2_buffers(struct brw_context *brw,
* query, we need to make sure all the pending drawing has landed in the
* real front buffer.
*/
- intel_batchbuffer_flush(brw);
+ brw_batch_flush(brw);
intel_flush_front(&brw->ctx);
attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
@@ -1640,7 +1640,7 @@ intel_query_dri2_buffers(struct brw_context *brw,
* So before doing the query, make sure all the pending drawing has
* landed in the real front buffer.
*/
- intel_batchbuffer_flush(brw);
+ brw_batch_flush(brw);
intel_flush_front(&brw->ctx);
}