summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/gallium/context.rst32
-rw-r--r--docs/gallium/screen.rst2
-rw-r--r--src/gallium/auxiliary/driver_rbug/rbug_core.c2
-rw-r--r--src/gallium/auxiliary/driver_trace/tr_context.c2
-rw-r--r--src/gallium/auxiliary/hud/font.c2
-rw-r--r--src/gallium/auxiliary/indices/u_primconvert.c2
-rw-r--r--src/gallium/auxiliary/postprocess/pp_mlaa.c2
-rw-r--r--src/gallium/auxiliary/util/u_debug_flush.c4
-rw-r--r--src/gallium/auxiliary/util/u_debug_image.c4
-rw-r--r--src/gallium/auxiliary/util/u_draw.c4
-rw-r--r--src/gallium/auxiliary/util/u_dump_defines.c20
-rw-r--r--src/gallium/auxiliary/util/u_index_modify.c6
-rw-r--r--src/gallium/auxiliary/util/u_inlines.h16
-rw-r--r--src/gallium/auxiliary/util/u_prim_restart.c8
-rw-r--r--src/gallium/auxiliary/util/u_pstipple.c2
-rw-r--r--src/gallium/auxiliary/util/u_suballoc.c2
-rw-r--r--src/gallium/auxiliary/util/u_surface.c14
-rw-r--r--src/gallium/auxiliary/util/u_tests.c2
-rw-r--r--src/gallium/auxiliary/util/u_threaded_context.c86
-rw-r--r--src/gallium/auxiliary/util/u_threaded_context.h4
-rw-r--r--src/gallium/auxiliary/util/u_transfer.c18
-rw-r--r--src/gallium/auxiliary/util/u_transfer_helper.c14
-rw-r--r--src/gallium/auxiliary/util/u_upload_mgr.c28
-rw-r--r--src/gallium/auxiliary/util/u_vbuf.c8
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor.c2
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor_cs.c2
-rw-r--r--src/gallium/auxiliary/vl/vl_idct.c4
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_decoder.c4
-rw-r--r--src/gallium/auxiliary/vl/vl_vertex_buffers.c8
-rw-r--r--src/gallium/auxiliary/vl/vl_zscan.c6
-rw-r--r--src/gallium/drivers/etnaviv/etnaviv_transfer.c50
-rw-r--r--src/gallium/drivers/freedreno/freedreno_resource.c38
-rw-r--r--src/gallium/drivers/i915/i915_resource_texture.c8
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.h10
-rw-r--r--src/gallium/drivers/iris/iris_resource.c52
-rw-r--r--src/gallium/drivers/lima/lima_resource.c26
-rw-r--r--src/gallium/drivers/llvmpipe/lp_setup.c2
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_cs.c4
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_sampler.c4
-rw-r--r--src/gallium/drivers/llvmpipe/lp_surface.c10
-rw-r--r--src/gallium/drivers/llvmpipe/lp_texture.c18
-rw-r--r--src/gallium/drivers/nouveau/nouveau_buffer.c52
-rw-r--r--src/gallium/drivers/nouveau/nouveau_winsys.h8
-rw-r--r--src/gallium/drivers/nouveau/nv30/nv30_draw.c12
-rw-r--r--src/gallium/drivers/nouveau/nv30/nv30_fragprog.c2
-rw-r--r--src/gallium/drivers/nouveau/nv30/nv30_miptree.c8
-rw-r--r--src/gallium/drivers/nouveau/nv30/nv30_transfer.c2
-rw-r--r--src/gallium/drivers/nouveau/nv50/nv50_transfer.c10
-rw-r--r--src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c24
-rw-r--r--src/gallium/drivers/panfrost/pan_resource.c30
-rw-r--r--src/gallium/drivers/r300/r300_query.c4
-rw-r--r--src/gallium/drivers/r300/r300_render.c8
-rw-r--r--src/gallium/drivers/r300/r300_render_translate.c6
-rw-r--r--src/gallium/drivers/r300/r300_screen_buffer.c10
-rw-r--r--src/gallium/drivers/r300/r300_transfer.c8
-rw-r--r--src/gallium/drivers/r600/compute_memory_pool.c6
-rw-r--r--src/gallium/drivers/r600/eg_debug.c4
-rw-r--r--src/gallium/drivers/r600/evergreen_compute.c8
-rw-r--r--src/gallium/drivers/r600/r600_asm.c2
-rw-r--r--src/gallium/drivers/r600/r600_blit.c2
-rw-r--r--src/gallium/drivers/r600/r600_buffer_common.c62
-rw-r--r--src/gallium/drivers/r600/r600_pipe.c2
-rw-r--r--src/gallium/drivers/r600/r600_query.c12
-rw-r--r--src/gallium/drivers/r600/r600_shader.c2
-rw-r--r--src/gallium/drivers/r600/r600_state.c2
-rw-r--r--src/gallium/drivers/r600/r600_state_common.c2
-rw-r--r--src/gallium/drivers/r600/r600_test_dma.c4
-rw-r--r--src/gallium/drivers/r600/r600_texture.c14
-rw-r--r--src/gallium/drivers/r600/radeon_uvd.c6
-rw-r--r--src/gallium/drivers/r600/radeon_vce.c4
-rw-r--r--src/gallium/drivers/r600/radeon_video.c4
-rw-r--r--src/gallium/drivers/radeon/radeon_uvd.c6
-rw-r--r--src/gallium/drivers/radeon/radeon_uvd_enc.c2
-rw-r--r--src/gallium/drivers/radeon/radeon_vce.c4
-rw-r--r--src/gallium/drivers/radeon/radeon_vcn_dec.c10
-rw-r--r--src/gallium/drivers/radeon/radeon_vcn_enc.c2
-rw-r--r--src/gallium/drivers/radeon/radeon_video.c4
-rw-r--r--src/gallium/drivers/radeon/radeon_winsys.h4
-rw-r--r--src/gallium/drivers/radeonsi/gfx10_query.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_buffer.c74
-rw-r--r--src/gallium/drivers/radeonsi/si_debug.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_fence.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_perfcounter.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_pipe.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_query.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_shader.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_state.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_state_draw.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_test_dma.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_texture.c16
-rw-r--r--src/gallium/drivers/softpipe/sp_compute.c2
-rw-r--r--src/gallium/drivers/softpipe/sp_state_sampler.c2
-rw-r--r--src/gallium/drivers/softpipe/sp_tex_tile_cache.c2
-rw-r--r--src/gallium/drivers/softpipe/sp_texture.c10
-rw-r--r--src/gallium/drivers/softpipe/sp_tile_cache.c4
-rw-r--r--src/gallium/drivers/svga/svga_draw_arrays.c2
-rw-r--r--src/gallium/drivers/svga/svga_draw_elements.c6
-rw-r--r--src/gallium/drivers/svga/svga_pipe_query.c2
-rw-r--r--src/gallium/drivers/svga/svga_pipe_streamout.c2
-rw-r--r--src/gallium/drivers/svga/svga_resource_buffer.c22
-rw-r--r--src/gallium/drivers/svga/svga_resource_buffer.h4
-rw-r--r--src/gallium/drivers/svga/svga_resource_buffer_upload.c8
-rw-r--r--src/gallium/drivers/svga/svga_resource_texture.c42
-rw-r--r--src/gallium/drivers/svga/svga_state_constants.c4
-rw-r--r--src/gallium/drivers/svga/svga_swtnl_backend.c8
-rw-r--r--src/gallium/drivers/svga/svga_swtnl_draw.c12
-rw-r--r--src/gallium/drivers/svga/svga_winsys.h6
-rw-r--r--src/gallium/drivers/swr/swr_context.cpp12
-rw-r--r--src/gallium/drivers/swr/swr_screen.cpp2
-rw-r--r--src/gallium/drivers/v3d/v3d_resource.c28
-rw-r--r--src/gallium/drivers/v3d/v3dx_draw.c2
-rw-r--r--src/gallium/drivers/vc4/vc4_resource.c28
-rw-r--r--src/gallium/drivers/virgl/virgl_buffer.c4
-rw-r--r--src/gallium/drivers/virgl/virgl_query.c2
-rw-r--r--src/gallium/drivers/virgl/virgl_resource.c40
-rw-r--r--src/gallium/drivers/virgl/virgl_texture.c14
-rw-r--r--src/gallium/drivers/zink/zink_resource.c20
-rw-r--r--src/gallium/frontends/clover/core/resource.cpp12
-rw-r--r--src/gallium/frontends/dri/dri2.c4
-rw-r--r--src/gallium/frontends/dri/drisw.c2
-rw-r--r--src/gallium/frontends/glx/xlib/xm_api.c2
-rw-r--r--src/gallium/frontends/nine/buffer9.c12
-rw-r--r--src/gallium/frontends/nine/device9.c10
-rw-r--r--src/gallium/frontends/nine/nine_buffer_upload.c12
-rw-r--r--src/gallium/frontends/nine/nine_state.c4
-rw-r--r--src/gallium/frontends/nine/surface9.c8
-rw-r--r--src/gallium/frontends/nine/volume9.c6
-rw-r--r--src/gallium/frontends/omx/bellagio/vid_enc.c2
-rw-r--r--src/gallium/frontends/omx/tizonia/h264einport.c2
-rw-r--r--src/gallium/frontends/omx/vid_dec_common.c2
-rw-r--r--src/gallium/frontends/omx/vid_enc_common.c8
-rw-r--r--src/gallium/frontends/osmesa/osmesa.c4
-rw-r--r--src/gallium/frontends/va/buffer.c2
-rw-r--r--src/gallium/frontends/va/image.c8
-rw-r--r--src/gallium/frontends/va/surface.c2
-rw-r--r--src/gallium/frontends/vallium/val_execute.c14
-rw-r--r--src/gallium/frontends/vdpau/bitmap.c2
-rw-r--r--src/gallium/frontends/vdpau/output.c10
-rw-r--r--src/gallium/frontends/vdpau/surface.c8
-rw-r--r--src/gallium/frontends/xa/xa_context.c18
-rw-r--r--src/gallium/frontends/xvmc/subpicture.c6
-rw-r--r--src/gallium/include/frontend/sw_winsys.h2
-rw-r--r--src/gallium/include/pipe/p_context.h6
-rw-r--r--src/gallium/include/pipe/p_defines.h40
-rw-r--r--src/gallium/tests/graw/fs-test.c4
-rw-r--r--src/gallium/tests/graw/graw_util.h4
-rw-r--r--src/gallium/tests/graw/gs-test.c8
-rw-r--r--src/gallium/tests/graw/quad-sample.c4
-rw-r--r--src/gallium/tests/graw/vs-test.c6
-rw-r--r--src/gallium/tests/trivial/compute.c4
-rw-r--r--src/gallium/tests/trivial/quad-tex.c2
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_bo.c8
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_cs.c4
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_bo.c8
-rw-r--r--src/gallium/winsys/svga/drm/vmw_buffer.c18
-rw-r--r--src/gallium/winsys/svga/drm/vmw_query.c4
-rw-r--r--src/gallium/winsys/svga/drm/vmw_screen_svga.c2
-rw-r--r--src/gallium/winsys/svga/drm/vmw_shader.c2
-rw-r--r--src/gallium/winsys/svga/drm/vmw_surface.c34
-rw-r--r--src/gallium/winsys/svga/drm/vmw_surface.h2
-rw-r--r--src/gallium/winsys/sw/dri/dri_sw_winsys.c4
-rw-r--r--src/gallium/winsys/sw/kms-dri/kms_dri_sw_winsys.c4
-rw-r--r--src/gallium/winsys/sw/wrapper/wrapper_sw_winsys.c4
-rw-r--r--src/mesa/state_tracker/st_atom_pixeltransfer.c2
-rw-r--r--src/mesa/state_tracker/st_cb_bitmap.c4
-rw-r--r--src/mesa/state_tracker/st_cb_bufferobjects.c32
-rw-r--r--src/mesa/state_tracker/st_cb_copyimage.c4
-rw-r--r--src/mesa/state_tracker/st_cb_drawpixels.c24
-rw-r--r--src/mesa/state_tracker/st_cb_readpixels.c2
-rw-r--r--src/mesa/state_tracker/st_cb_texture.c12
-rw-r--r--src/mesa/state_tracker/st_draw_feedback.c16
-rw-r--r--src/mesa/state_tracker/st_texture.c4
-rw-r--r--src/panfrost/shared/pan_minmax_cache.c2
173 files changed, 865 insertions, 865 deletions
diff --git a/docs/gallium/context.rst b/docs/gallium/context.rst
index 7f8111b2316..cc95ee4dc17 100644
--- a/docs/gallium/context.rst
+++ b/docs/gallium/context.rst
@@ -754,49 +754,49 @@ the last (partial) page requires a box that ends at the end of the buffer
.. _pipe_transfer:
-PIPE_TRANSFER
+PIPE_MAP
^^^^^^^^^^^^^
These flags control the behavior of a transfer object.
-``PIPE_TRANSFER_READ``
+``PIPE_MAP_READ``
Resource contents read back (or accessed directly) at transfer create time.
-``PIPE_TRANSFER_WRITE``
+``PIPE_MAP_WRITE``
Resource contents will be written back at transfer_unmap time (or modified
as a result of being accessed directly).
-``PIPE_TRANSFER_MAP_DIRECTLY``
+``PIPE_MAP_DIRECTLY``
a transfer should directly map the resource. May return NULL if not supported.
-``PIPE_TRANSFER_DISCARD_RANGE``
+``PIPE_MAP_DISCARD_RANGE``
The memory within the mapped region is discarded. Cannot be used with
- ``PIPE_TRANSFER_READ``.
+ ``PIPE_MAP_READ``.
-``PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE``
+``PIPE_MAP_DISCARD_WHOLE_RESOURCE``
Discards all memory backing the resource. It should not be used with
- ``PIPE_TRANSFER_READ``.
+ ``PIPE_MAP_READ``.
-``PIPE_TRANSFER_DONTBLOCK``
+``PIPE_MAP_DONTBLOCK``
Fail if the resource cannot be mapped immediately.
-``PIPE_TRANSFER_UNSYNCHRONIZED``
+``PIPE_MAP_UNSYNCHRONIZED``
Do not synchronize pending operations on the resource when mapping. The
interaction of any writes to the map and any operations pending on the
- resource are undefined. Cannot be used with ``PIPE_TRANSFER_READ``.
+ resource are undefined. Cannot be used with ``PIPE_MAP_READ``.
-``PIPE_TRANSFER_FLUSH_EXPLICIT``
+``PIPE_MAP_FLUSH_EXPLICIT``
Written ranges will be notified later with :ref:`transfer_flush_region`.
- Cannot be used with ``PIPE_TRANSFER_READ``.
+ Cannot be used with ``PIPE_MAP_READ``.
-``PIPE_TRANSFER_PERSISTENT``
+``PIPE_MAP_PERSISTENT``
Allows the resource to be used for rendering while mapped.
PIPE_RESOURCE_FLAG_MAP_PERSISTENT must be set when creating
the resource.
If COHERENT is not set, memory_barrier(PIPE_BARRIER_MAPPED_BUFFER)
must be called to ensure the device can see what the CPU has written.
-``PIPE_TRANSFER_COHERENT``
+``PIPE_MAP_COHERENT``
If PERSISTENT is set, this ensures any writes done by the device are
immediately visible to the CPU and vice versa.
PIPE_RESOURCE_FLAG_MAP_COHERENT must be set when creating
@@ -909,4 +909,4 @@ uploaded data, unless:
mapping, memory_barrier(PIPE_BARRIER_MAPPED_BUFFER) should be called on the
context that has mapped the resource. No flush is required.
-* Mapping the resource with PIPE_TRANSFER_MAP_DIRECTLY.
+* Mapping the resource with PIPE_MAP_DIRECTLY.
diff --git a/docs/gallium/screen.rst b/docs/gallium/screen.rst
index 01d5fb3b22a..ff8d36086ba 100644
--- a/docs/gallium/screen.rst
+++ b/docs/gallium/screen.rst
@@ -210,7 +210,7 @@ The integer capabilities:
hardware implements the SM5 features, component selection,
shadow comparison, and run-time offsets.
* ``PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT``: Whether
- PIPE_TRANSFER_PERSISTENT and PIPE_TRANSFER_COHERENT are supported
+ PIPE_MAP_PERSISTENT and PIPE_MAP_COHERENT are supported
for buffers.
* ``PIPE_CAP_TEXTURE_QUERY_LOD``: Whether the ``LODQ`` instruction is
supported.
diff --git a/src/gallium/auxiliary/driver_rbug/rbug_core.c b/src/gallium/auxiliary/driver_rbug/rbug_core.c
index 53cc941c2ca..9bf9d212e68 100644
--- a/src/gallium/auxiliary/driver_rbug/rbug_core.c
+++ b/src/gallium/auxiliary/driver_rbug/rbug_core.c
@@ -272,7 +272,7 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
tex = tr_tex->resource;
map = pipe_transfer_map(context, tex,
gptr->level, gptr->face + gptr->zslice,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
gptr->x, gptr->y, gptr->w, gptr->h, &t);
rbug_send_texture_read_reply(tr_rbug->con, serial,
diff --git a/src/gallium/auxiliary/driver_trace/tr_context.c b/src/gallium/auxiliary/driver_trace/tr_context.c
index 4b19b6c31fa..b0bd53ee6a9 100644
--- a/src/gallium/auxiliary/driver_trace/tr_context.c
+++ b/src/gallium/auxiliary/driver_trace/tr_context.c
@@ -1440,7 +1440,7 @@ trace_context_transfer_map(struct pipe_context *_context,
*transfer = trace_transfer_create(tr_context, resource, result);
if (map) {
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
trace_transfer(*transfer)->map = map;
}
}
diff --git a/src/gallium/auxiliary/hud/font.c b/src/gallium/auxiliary/hud/font.c
index 88b0349fda2..c7f8aef0d1c 100644
--- a/src/gallium/auxiliary/hud/font.c
+++ b/src/gallium/auxiliary/hud/font.c
@@ -417,7 +417,7 @@ util_font_create_fixed_8x13(struct pipe_context *pipe,
return FALSE;
}
- map = pipe_transfer_map(pipe, tex, 0, 0, PIPE_TRANSFER_WRITE, 0, 0,
+ map = pipe_transfer_map(pipe, tex, 0, 0, PIPE_MAP_WRITE, 0, 0,
tex->width0, tex->height0, &transfer);
if (!map) {
pipe_resource_reference(&tex, NULL);
diff --git a/src/gallium/auxiliary/indices/u_primconvert.c b/src/gallium/auxiliary/indices/u_primconvert.c
index d0a1a78e40e..337ee5b9460 100644
--- a/src/gallium/auxiliary/indices/u_primconvert.c
+++ b/src/gallium/auxiliary/indices/u_primconvert.c
@@ -130,7 +130,7 @@ util_primconvert_draw_vbo(struct primconvert_context *pc,
src = info->has_user_indices ? info->index.user : NULL;
if (!src) {
src = pipe_buffer_map(pc->pipe, info->index.resource,
- PIPE_TRANSFER_READ, &src_transfer);
+ PIPE_MAP_READ, &src_transfer);
}
src = (const uint8_t *)src;
}
diff --git a/src/gallium/auxiliary/postprocess/pp_mlaa.c b/src/gallium/auxiliary/postprocess/pp_mlaa.c
index 51e3e0260fa..ac56560a2b9 100644
--- a/src/gallium/auxiliary/postprocess/pp_mlaa.c
+++ b/src/gallium/auxiliary/postprocess/pp_mlaa.c
@@ -256,7 +256,7 @@ pp_jimenezmlaa_init_run(struct pp_queue_t *ppq, unsigned int n,
u_box_2d(0, 0, 165, 165, &box);
ppq->p->pipe->texture_subdata(ppq->p->pipe, ppq->areamaptex, 0,
- PIPE_TRANSFER_WRITE, &box,
+ PIPE_MAP_WRITE, &box,
areamap, 165 * 2, sizeof(areamap));
ppq->shaders[n][1] = pp_tgsi_to_state(ppq->p->pipe, offsetvs, true,
diff --git a/src/gallium/auxiliary/util/u_debug_flush.c b/src/gallium/auxiliary/util/u_debug_flush.c
index b8f6870f0a1..4f3c98aec2f 100644
--- a/src/gallium/auxiliary/util/u_debug_flush.c
+++ b/src/gallium/auxiliary/util/u_debug_flush.c
@@ -216,9 +216,9 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
return;
mtx_lock(&fbuf->mutex);
- map_sync = !(flags & PIPE_TRANSFER_UNSYNCHRONIZED);
+ map_sync = !(flags & PIPE_MAP_UNSYNCHRONIZED);
persistent = !map_sync || fbuf->supports_persistent ||
- !!(flags & PIPE_TRANSFER_PERSISTENT);
+ !!(flags & PIPE_MAP_PERSISTENT);
/* Recursive maps are allowed if previous maps are persistent,
* or if the current map is unsync. In other cases we might flush
diff --git a/src/gallium/auxiliary/util/u_debug_image.c b/src/gallium/auxiliary/util/u_debug_image.c
index 550fc86ce81..91bfa10af88 100644
--- a/src/gallium/auxiliary/util/u_debug_image.c
+++ b/src/gallium/auxiliary/util/u_debug_image.c
@@ -115,7 +115,7 @@ debug_dump_surface(struct pipe_context *pipe,
data = pipe_transfer_map(pipe, texture, surface->u.tex.level,
surface->u.tex.first_layer,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
0, 0, surface->width, surface->height, &transfer);
if (!data)
return;
@@ -193,7 +193,7 @@ debug_dump_surface_bmp(struct pipe_context *pipe,
void *ptr;
ptr = pipe_transfer_map(pipe, texture, surface->u.tex.level,
- surface->u.tex.first_layer, PIPE_TRANSFER_READ,
+ surface->u.tex.first_layer, PIPE_MAP_READ,
0, 0, surface->width, surface->height, &transfer);
debug_dump_transfer_bmp(pipe, filename, transfer, ptr);
diff --git a/src/gallium/auxiliary/util/u_draw.c b/src/gallium/auxiliary/util/u_draw.c
index f4ac2b107a9..90d01297c49 100644
--- a/src/gallium/auxiliary/util/u_draw.c
+++ b/src/gallium/auxiliary/util/u_draw.c
@@ -150,7 +150,7 @@ util_draw_indirect(struct pipe_context *pipe,
uint32_t *dc_param = pipe_buffer_map_range(pipe,
info_in->indirect->indirect_draw_count,
info_in->indirect->indirect_draw_count_offset,
- 4, PIPE_TRANSFER_READ, &dc_transfer);
+ 4, PIPE_MAP_READ, &dc_transfer);
if (!dc_transfer) {
debug_printf("%s: failed to map indirect draw count buffer\n", __FUNCTION__);
return;
@@ -167,7 +167,7 @@ util_draw_indirect(struct pipe_context *pipe,
info_in->indirect->buffer,
info_in->indirect->offset,
(num_params * info_in->indirect->draw_count) * sizeof(uint32_t),
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&transfer);
if (!transfer) {
debug_printf("%s: failed to map indirect buffer\n", __FUNCTION__);
diff --git a/src/gallium/auxiliary/util/u_dump_defines.c b/src/gallium/auxiliary/util/u_dump_defines.c
index 41108c7248a..c228ed5a938 100644
--- a/src/gallium/auxiliary/util/u_dump_defines.c
+++ b/src/gallium/auxiliary/util/u_dump_defines.c
@@ -512,16 +512,16 @@ util_dump_query_value_type(FILE *stream, unsigned value)
static const char * const
util_transfer_usage_names[] = {
- "PIPE_TRANSFER_READ",
- "PIPE_TRANSFER_WRITE",
- "PIPE_TRANSFER_MAP_DIRECTLY",
- "PIPE_TRANSFER_DISCARD_RANGE",
- "PIPE_TRANSFER_DONTBLOCK",
- "PIPE_TRANSFER_UNSYNCHRONIZED",
- "PIPE_TRANSFER_FLUSH_EXPLICIT",
- "PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE",
- "PIPE_TRANSFER_PERSISTENT",
- "PIPE_TRANSFER_COHERENT",
+ "PIPE_MAP_READ",
+ "PIPE_MAP_WRITE",
+ "PIPE_MAP_DIRECTLY",
+ "PIPE_MAP_DISCARD_RANGE",
+ "PIPE_MAP_DONTBLOCK",
+ "PIPE_MAP_UNSYNCHRONIZED",
+ "PIPE_MAP_FLUSH_EXPLICIT",
+ "PIPE_MAP_DISCARD_WHOLE_RESOURCE",
+ "PIPE_MAP_PERSISTENT",
+ "PIPE_MAP_COHERENT",
};
DEFINE_UTIL_DUMP_FLAGS_CONTINUOUS(transfer_usage)
diff --git a/src/gallium/auxiliary/util/u_index_modify.c b/src/gallium/auxiliary/util/u_index_modify.c
index 4e9349a7db6..017d4c3ab74 100644
--- a/src/gallium/auxiliary/util/u_index_modify.c
+++ b/src/gallium/auxiliary/util/u_index_modify.c
@@ -43,7 +43,7 @@ void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
in_map = info->index.user;
} else {
in_map = pipe_buffer_map(context, info->index.resource,
- PIPE_TRANSFER_READ |
+ PIPE_MAP_READ |
add_transfer_flags,
&src_transfer);
}
@@ -77,7 +77,7 @@ void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
in_map = info->index.user;
} else {
in_map = pipe_buffer_map(context, info->index.resource,
- PIPE_TRANSFER_READ |
+ PIPE_MAP_READ |
add_transfer_flags,
&in_transfer);
}
@@ -111,7 +111,7 @@ void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
in_map = info->index.user;
} else {
in_map = pipe_buffer_map(context, info->index.resource,
- PIPE_TRANSFER_READ |
+ PIPE_MAP_READ |
add_transfer_flags,
&in_transfer);
}
diff --git a/src/gallium/auxiliary/util/u_inlines.h b/src/gallium/auxiliary/util/u_inlines.h
index 9a1315d01ec..c20e90e45ba 100644
--- a/src/gallium/auxiliary/util/u_inlines.h
+++ b/src/gallium/auxiliary/util/u_inlines.h
@@ -321,7 +321,7 @@ pipe_buffer_create_const0(struct pipe_screen *screen,
* Map a range of a resource.
* \param offset start of region, in bytes
* \param length size of region, in bytes
- * \param access bitmask of PIPE_TRANSFER_x flags
+ * \param access bitmask of PIPE_MAP_x flags
* \param transfer returns a transfer object
*/
static inline void *
@@ -352,7 +352,7 @@ pipe_buffer_map_range(struct pipe_context *pipe,
/**
* Map whole resource.
- * \param access bitmask of PIPE_TRANSFER_x flags
+ * \param access bitmask of PIPE_MAP_x flags
* \param transfer returns a transfer object
*/
static inline void *
@@ -405,7 +405,7 @@ pipe_buffer_write(struct pipe_context *pipe,
const void *data)
{
/* Don't set any other usage bits. Drivers should derive them. */
- pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
+ pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
}
/**
@@ -421,8 +421,8 @@ pipe_buffer_write_nooverlap(struct pipe_context *pipe,
const void *data)
{
pipe->buffer_subdata(pipe, buf,
- (PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED),
+ (PIPE_MAP_WRITE |
+ PIPE_MAP_UNSYNCHRONIZED),
offset, size, data);
}
@@ -458,7 +458,7 @@ pipe_buffer_read(struct pipe_context *pipe,
map = (ubyte *) pipe_buffer_map_range(pipe,
buf,
offset, size,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&src_transfer);
if (!map)
return;
@@ -470,7 +470,7 @@ pipe_buffer_read(struct pipe_context *pipe,
/**
* Map a resource for reading/writing.
- * \param access bitmask of PIPE_TRANSFER_x flags
+ * \param access bitmask of PIPE_MAP_x flags
*/
static inline void *
pipe_transfer_map(struct pipe_context *context,
@@ -493,7 +493,7 @@ pipe_transfer_map(struct pipe_context *context,
/**
* Map a 3D (texture) resource for reading/writing.
- * \param access bitmask of PIPE_TRANSFER_x flags
+ * \param access bitmask of PIPE_MAP_x flags
*/
static inline void *
pipe_transfer_map_3d(struct pipe_context *context,
diff --git a/src/gallium/auxiliary/util/u_prim_restart.c b/src/gallium/auxiliary/util/u_prim_restart.c
index 844c5228775..eef2b4c622c 100644
--- a/src/gallium/auxiliary/util/u_prim_restart.c
+++ b/src/gallium/auxiliary/util/u_prim_restart.c
@@ -49,7 +49,7 @@ read_indirect_elements(struct pipe_context *context, struct pipe_draw_indirect_i
map = pipe_buffer_map_range(context, indirect->buffer,
indirect->offset,
read_size,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&transfer);
assert(map);
memcpy(&ret, map, read_size);
@@ -129,7 +129,7 @@ util_translate_prim_restart_ib(struct pipe_context *context,
/* Map new / dest index buffer */
dst_map = pipe_buffer_map(context, *dst_buffer,
- PIPE_TRANSFER_WRITE, &dst_transfer);
+ PIPE_MAP_WRITE, &dst_transfer);
if (!dst_map)
goto error;
@@ -140,7 +140,7 @@ util_translate_prim_restart_ib(struct pipe_context *context,
src_map = pipe_buffer_map_range(context, info->index.resource,
start * src_index_size,
count * src_index_size,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&src_transfer);
if (!src_map)
goto error;
@@ -248,7 +248,7 @@ util_draw_vbo_without_prim_restart(struct pipe_context *context,
src_map = pipe_buffer_map_range(context, info->index.resource,
info_start * info->index_size,
info_count * info->index_size,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&src_transfer);
if (!src_map) {
return PIPE_ERROR_OUT_OF_MEMORY;
diff --git a/src/gallium/auxiliary/util/u_pstipple.c b/src/gallium/auxiliary/util/u_pstipple.c
index c3c2ca81780..894ba275417 100644
--- a/src/gallium/auxiliary/util/u_pstipple.c
+++ b/src/gallium/auxiliary/util/u_pstipple.c
@@ -70,7 +70,7 @@ util_pstipple_update_stipple_texture(struct pipe_context *pipe,
/* map texture memory */
data = pipe_transfer_map(pipe, tex, 0, 0,
- PIPE_TRANSFER_WRITE, 0, 0, 32, 32, &transfer);
+ PIPE_MAP_WRITE, 0, 0, 32, 32, &transfer);
/*
* Load alpha texture.
diff --git a/src/gallium/auxiliary/util/u_suballoc.c b/src/gallium/auxiliary/util/u_suballoc.c
index d54026edf9e..51754087ef1 100644
--- a/src/gallium/auxiliary/util/u_suballoc.c
+++ b/src/gallium/auxiliary/util/u_suballoc.c
@@ -131,7 +131,7 @@ u_suballocator_alloc(struct u_suballocator *allocator, unsigned size,
} else {
struct pipe_transfer *transfer = NULL;
void *ptr = pipe_buffer_map(pipe, allocator->buffer,
- PIPE_TRANSFER_WRITE, &transfer);
+ PIPE_MAP_WRITE, &transfer);
memset(ptr, 0, allocator->size);
pipe_buffer_unmap(pipe, transfer);
}
diff --git a/src/gallium/auxiliary/util/u_surface.c b/src/gallium/auxiliary/util/u_surface.c
index fffa1493ae0..27d19492795 100644
--- a/src/gallium/auxiliary/util/u_surface.c
+++ b/src/gallium/auxiliary/util/u_surface.c
@@ -285,7 +285,7 @@ util_resource_copy_region(struct pipe_context *pipe,
src_map = pipe->transfer_map(pipe,
src,
src_level,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&src_box, &src_trans);
assert(src_map);
if (!src_map) {
@@ -295,8 +295,8 @@ util_resource_copy_region(struct pipe_context *pipe,
dst_map = pipe->transfer_map(pipe,
dst,
dst_level,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE, &dst_box,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE, &dst_box,
&dst_trans);
assert(dst_map);
if (!dst_map) {
@@ -358,7 +358,7 @@ util_clear_color_texture(struct pipe_context *pipe,
dst_map = pipe_transfer_map_3d(pipe,
texture,
level,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
dstx, dsty, dstz,
width, height, depth,
&dst_trans);
@@ -410,7 +410,7 @@ util_clear_render_target(struct pipe_context *pipe,
dst_map = pipe_transfer_map(pipe,
dst->texture,
0, 0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
dx, 0, w, 1,
&dst_trans);
if (dst_map) {
@@ -561,8 +561,8 @@ util_clear_depth_stencil_texture(struct pipe_context *pipe,
dst_map = pipe_transfer_map_3d(pipe,
texture,
level,
- (need_rmw ? PIPE_TRANSFER_READ_WRITE :
- PIPE_TRANSFER_WRITE),
+ (need_rmw ? PIPE_MAP_READ_WRITE :
+ PIPE_MAP_WRITE),
dstx, dsty, dstz,
width, height, depth, &dst_trans);
assert(dst_map);
diff --git a/src/gallium/auxiliary/util/u_tests.c b/src/gallium/auxiliary/util/u_tests.c
index a0e9561eeb6..c78f01d47a2 100644
--- a/src/gallium/auxiliary/util/u_tests.c
+++ b/src/gallium/auxiliary/util/u_tests.c
@@ -227,7 +227,7 @@ util_probe_rect_rgba_multi(struct pipe_context *ctx, struct pipe_resource *tex,
unsigned x,y,e,c;
bool pass = true;
- map = pipe_transfer_map(ctx, tex, 0, 0, PIPE_TRANSFER_READ,
+ map = pipe_transfer_map(ctx, tex, 0, 0, PIPE_MAP_READ,
offx, offy, w, h, &transfer);
pipe_get_tile_rgba(transfer, map, 0, 0, w, h, tex->format, pixels);
pipe_transfer_unmap(ctx, transfer);
diff --git a/src/gallium/auxiliary/util/u_threaded_context.c b/src/gallium/auxiliary/util/u_threaded_context.c
index 72111f34c01..1689f7b9d98 100644
--- a/src/gallium/auxiliary/util/u_threaded_context.c
+++ b/src/gallium/auxiliary/util/u_threaded_context.c
@@ -1359,17 +1359,17 @@ tc_improve_map_buffer_flags(struct threaded_context *tc,
return usage;
/* Use the staging upload if it's preferred. */
- if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
- !(usage & PIPE_TRANSFER_PERSISTENT) &&
+ if (usage & (PIPE_MAP_DISCARD_RANGE |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
+ !(usage & PIPE_MAP_PERSISTENT) &&
/* Try not to decrement the counter if it's not positive. Still racy,
* but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
tres->max_forced_staging_uploads > 0 &&
p_atomic_dec_return(&tres->max_forced_staging_uploads) >= 0) {
- usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
- PIPE_TRANSFER_UNSYNCHRONIZED);
+ usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE |
+ PIPE_MAP_UNSYNCHRONIZED);
- return usage | tc_flags | PIPE_TRANSFER_DISCARD_RANGE;
+ return usage | tc_flags | PIPE_MAP_DISCARD_RANGE;
}
/* Sparse buffers can't be mapped directly and can't be reallocated
@@ -1380,8 +1380,8 @@ tc_improve_map_buffer_flags(struct threaded_context *tc,
/* We can use DISCARD_RANGE instead of full discard. This is the only
* fast path for sparse buffers that doesn't need thread synchronization.
*/
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
+ usage |= PIPE_MAP_DISCARD_RANGE;
/* Allow DISCARD_WHOLE_RESOURCE and infering UNSYNCHRONIZED in drivers.
* The threaded context doesn't do unsychronized mappings and invalida-
@@ -1394,50 +1394,50 @@ tc_improve_map_buffer_flags(struct threaded_context *tc,
usage |= tc_flags;
/* Handle CPU reads trivially. */
- if (usage & PIPE_TRANSFER_READ) {
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ if (usage & PIPE_MAP_READ) {
+ if (usage & PIPE_MAP_UNSYNCHRONIZED)
usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* don't sync */
/* Drivers aren't allowed to do buffer invalidations. */
- return usage & ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ return usage & ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
!tres->is_shared &&
!util_ranges_intersect(&tres->valid_buffer_range, offset, offset + size))
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* If discarding the entire range, discard the whole resource instead. */
- if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
+ if (usage & PIPE_MAP_DISCARD_RANGE &&
offset == 0 && size == tres->b.width0)
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
/* Discard the whole resource if needed. */
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
if (tc_invalidate_buffer(tc, tres))
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
else
- usage |= PIPE_TRANSFER_DISCARD_RANGE; /* fallback */
+ usage |= PIPE_MAP_DISCARD_RANGE; /* fallback */
}
}
/* We won't need this flag anymore. */
/* TODO: We might not need TC_TRANSFER_MAP_NO_INVALIDATE with this. */
- usage &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
/* GL_AMD_pinned_memory and persistent mappings can't use staging
* buffers. */
- if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT) ||
+ if (usage & (PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_PERSISTENT) ||
tres->is_user_ptr)
- usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
+ usage &= ~PIPE_MAP_DISCARD_RANGE;
/* Unsychronized buffer mappings don't have to synchronize the thread. */
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
- usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
+ if (usage & PIPE_MAP_UNSYNCHRONIZED) {
+ usage &= ~PIPE_MAP_DISCARD_RANGE;
usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* notify the driver */
}
@@ -1460,7 +1460,7 @@ tc_transfer_map(struct pipe_context *_pipe,
/* Do a staging transfer within the threaded context. The driver should
* only get resource_copy_region.
*/
- if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
+ if (usage & PIPE_MAP_DISCARD_RANGE) {
struct threaded_transfer *ttrans = slab_alloc(&tc->pool_transfers);
uint8_t *map;
@@ -1488,8 +1488,8 @@ tc_transfer_map(struct pipe_context *_pipe,
/* Unsychronized buffer mappings don't have to synchronize the thread. */
if (!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC))
tc_sync_msg(tc, resource->target != PIPE_BUFFER ? " texture" :
- usage & PIPE_TRANSFER_DISCARD_RANGE ? " discard_range" :
- usage & PIPE_TRANSFER_READ ? " read" : " ??");
+ usage & PIPE_MAP_DISCARD_RANGE ? " discard_range" :
+ usage & PIPE_MAP_READ ? " read" : " ??");
tc->bytes_mapped_estimate += box->width;
@@ -1559,8 +1559,8 @@ tc_transfer_flush_region(struct pipe_context *_pipe,
struct threaded_context *tc = threaded_context(_pipe);
struct threaded_transfer *ttrans = threaded_transfer(transfer);
struct threaded_resource *tres = threaded_resource(transfer->resource);
- unsigned required_usage = PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_FLUSH_EXPLICIT;
+ unsigned required_usage = PIPE_MAP_WRITE |
+ PIPE_MAP_FLUSH_EXPLICIT;
if (tres->b.target == PIPE_BUFFER) {
if ((transfer->usage & required_usage) == required_usage) {
@@ -1599,13 +1599,13 @@ tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
struct threaded_transfer *ttrans = threaded_transfer(transfer);
struct threaded_resource *tres = threaded_resource(transfer->resource);
- /* PIPE_TRANSFER_THREAD_SAFE is only valid with UNSYNCHRONIZED. It can be
+ /* PIPE_MAP_THREAD_SAFE is only valid with UNSYNCHRONIZED. It can be
* called from any thread and bypasses all multithreaded queues.
*/
- if (transfer->usage & PIPE_TRANSFER_THREAD_SAFE) {
- assert(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED);
- assert(!(transfer->usage & (PIPE_TRANSFER_FLUSH_EXPLICIT |
- PIPE_TRANSFER_DISCARD_RANGE)));
+ if (transfer->usage & PIPE_MAP_THREAD_SAFE) {
+ assert(transfer->usage & PIPE_MAP_UNSYNCHRONIZED);
+ assert(!(transfer->usage & (PIPE_MAP_FLUSH_EXPLICIT |
+ PIPE_MAP_DISCARD_RANGE)));
struct pipe_context *pipe = tc->pipe;
pipe->transfer_unmap(pipe, transfer);
@@ -1615,8 +1615,8 @@ tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
}
if (tres->b.target == PIPE_BUFFER) {
- if (transfer->usage & PIPE_TRANSFER_WRITE &&
- !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
+ if (transfer->usage & PIPE_MAP_WRITE &&
+ !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
tc_buffer_do_flush_region(tc, ttrans, &transfer->box);
/* Staging transfers don't send the call to the driver. */
@@ -1669,19 +1669,19 @@ tc_buffer_subdata(struct pipe_context *_pipe,
if (!size)
return;
- usage |= PIPE_TRANSFER_WRITE;
+ usage |= PIPE_MAP_WRITE;
- /* PIPE_TRANSFER_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
- if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ /* PIPE_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
+ if (!(usage & PIPE_MAP_DIRECTLY))
+ usage |= PIPE_MAP_DISCARD_RANGE;
usage = tc_improve_map_buffer_flags(tc, tres, usage, offset, size);
/* Unsychronized and big transfers should use transfer_map. Also handle
* full invalidations, because drivers aren't allowed to do them.
*/
- if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) ||
+ if (usage & (PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE) ||
size > TC_MAX_SUBDATA_BYTES) {
struct pipe_transfer *transfer;
struct pipe_box box;
diff --git a/src/gallium/auxiliary/util/u_threaded_context.h b/src/gallium/auxiliary/util/u_threaded_context.h
index c54dec0b1d7..e0e76ec527a 100644
--- a/src/gallium/auxiliary/util/u_threaded_context.h
+++ b/src/gallium/auxiliary/util/u_threaded_context.h
@@ -85,9 +85,9 @@
* Transfer_map rules for buffer mappings
* --------------------------------------
*
- * 1) If transfer_map has PIPE_TRANSFER_UNSYNCHRONIZED, the call is made
+ * 1) If transfer_map has PIPE_MAP_UNSYNCHRONIZED, the call is made
* in the non-driver thread without flushing the queue. The driver will
- * receive TC_TRANSFER_MAP_THREADED_UNSYNC in addition to PIPE_TRANSFER_-
+ * receive TC_TRANSFER_MAP_THREADED_UNSYNC in addition to PIPE_MAP_-
* UNSYNCHRONIZED to indicate this.
* Note that transfer_unmap is always enqueued and called from the driver
* thread.
diff --git a/src/gallium/auxiliary/util/u_transfer.c b/src/gallium/auxiliary/util/u_transfer.c
index 5bc47b09f38..84b80d40032 100644
--- a/src/gallium/auxiliary/util/u_transfer.c
+++ b/src/gallium/auxiliary/util/u_transfer.c
@@ -13,19 +13,19 @@ void u_default_buffer_subdata(struct pipe_context *pipe,
struct pipe_box box;
uint8_t *map = NULL;
- assert(!(usage & PIPE_TRANSFER_READ));
+ assert(!(usage & PIPE_MAP_READ));
/* the write flag is implicit by the nature of buffer_subdata */
- usage |= PIPE_TRANSFER_WRITE;
+ usage |= PIPE_MAP_WRITE;
/* buffer_subdata implicitly discards the rewritten buffer range.
- * PIPE_TRANSFER_MAP_DIRECTLY supresses that.
+ * PIPE_MAP_DIRECTLY supresses that.
*/
- if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
+ if (!(usage & PIPE_MAP_DIRECTLY)) {
if (offset == 0 && size == resource->width0) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
} else {
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage |= PIPE_MAP_DISCARD_RANGE;
}
}
@@ -52,13 +52,13 @@ void u_default_texture_subdata(struct pipe_context *pipe,
const uint8_t *src_data = data;
uint8_t *map = NULL;
- assert(!(usage & PIPE_TRANSFER_READ));
+ assert(!(usage & PIPE_MAP_READ));
/* the write flag is implicit by the nature of texture_subdata */
- usage |= PIPE_TRANSFER_WRITE;
+ usage |= PIPE_MAP_WRITE;
/* texture_subdata implicitly discards the rewritten buffer range */
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage |= PIPE_MAP_DISCARD_RANGE;
map = pipe->transfer_map(pipe,
resource,
diff --git a/src/gallium/auxiliary/util/u_transfer_helper.c b/src/gallium/auxiliary/util/u_transfer_helper.c
index 0dc9ecf5afc..2d9822f11ee 100644
--- a/src/gallium/auxiliary/util/u_transfer_helper.c
+++ b/src/gallium/auxiliary/util/u_transfer_helper.c
@@ -148,8 +148,8 @@ u_transfer_helper_resource_destroy(struct pipe_screen *pscreen,
static bool needs_pack(unsigned usage)
{
- return (usage & PIPE_TRANSFER_READ) &&
- !(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE));
+ return (usage & PIPE_MAP_READ) &&
+ !(usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE));
}
/* In the case of transfer_map of a multi-sample resource, call back into
@@ -358,7 +358,7 @@ flush_region(struct pipe_context *pctx, struct pipe_transfer *ptrans,
unsigned height = box->height;
void *src, *dst;
- if (!(ptrans->usage & PIPE_TRANSFER_WRITE))
+ if (!(ptrans->usage & PIPE_MAP_WRITE))
return;
if (trans->ss) {
@@ -495,7 +495,7 @@ u_transfer_helper_transfer_unmap(struct pipe_context *pctx,
if (handle_transfer(ptrans->resource)) {
struct u_transfer *trans = u_transfer(ptrans);
- if (!(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ if (!(ptrans->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
struct pipe_box box;
u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
flush_region(pctx, ptrans, &box);
@@ -589,13 +589,13 @@ u_transfer_helper_deinterleave_transfer_map(struct pipe_context *pctx,
if (!trans->staging)
goto fail;
- trans->ptr = helper->vtbl->transfer_map(pctx, prsc, level, usage | PIPE_TRANSFER_DEPTH_ONLY, box,
+ trans->ptr = helper->vtbl->transfer_map(pctx, prsc, level, usage | PIPE_MAP_DEPTH_ONLY, box,
&trans->trans);
if (!trans->ptr)
goto fail;
trans->ptr2 = helper->vtbl->transfer_map(pctx, prsc, level,
- usage | PIPE_TRANSFER_STENCIL_ONLY, box, &trans->trans2);
+ usage | PIPE_MAP_STENCIL_ONLY, box, &trans->trans2);
if (needs_pack(usage)) {
switch (prsc->format) {
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
@@ -649,7 +649,7 @@ u_transfer_helper_deinterleave_transfer_unmap(struct pipe_context *pctx,
(format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT && helper->separate_z32s8)) {
struct u_transfer *trans = (struct u_transfer *)ptrans;
- if (!(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ if (!(ptrans->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
struct pipe_box box;
u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
flush_region(pctx, ptrans, &box);
diff --git a/src/gallium/auxiliary/util/u_upload_mgr.c b/src/gallium/auxiliary/util/u_upload_mgr.c
index 375fad005b4..6eab49fdab0 100644
--- a/src/gallium/auxiliary/util/u_upload_mgr.c
+++ b/src/gallium/auxiliary/util/u_upload_mgr.c
@@ -45,7 +45,7 @@ struct u_upload_mgr {
unsigned bind; /* Bitmask of PIPE_BIND_* flags. */
enum pipe_resource_usage usage;
unsigned flags;
- unsigned map_flags; /* Bitmask of PIPE_TRANSFER_* flags. */
+ unsigned map_flags; /* Bitmask of PIPE_MAP_* flags. */
boolean map_persistent; /* If persistent mappings are supported. */
struct pipe_resource *buffer; /* Upload buffer. */
@@ -77,15 +77,15 @@ u_upload_create(struct pipe_context *pipe, unsigned default_size,
PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT);
if (upload->map_persistent) {
- upload->map_flags = PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT |
- PIPE_TRANSFER_COHERENT;
+ upload->map_flags = PIPE_MAP_WRITE |
+ PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_PERSISTENT |
+ PIPE_MAP_COHERENT;
}
else {
- upload->map_flags = PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_FLUSH_EXPLICIT;
+ upload->map_flags = PIPE_MAP_WRITE |
+ PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_FLUSH_EXPLICIT;
}
return upload;
@@ -110,7 +110,7 @@ u_upload_clone(struct pipe_context *pipe, struct u_upload_mgr *upload)
if (!upload->map_persistent && result->map_persistent)
u_upload_disable_persistent(result);
else if (upload->map_persistent &&
- upload->map_flags & PIPE_TRANSFER_FLUSH_EXPLICIT)
+ upload->map_flags & PIPE_MAP_FLUSH_EXPLICIT)
u_upload_enable_flush_explicit(result);
return result;
@@ -120,16 +120,16 @@ void
u_upload_enable_flush_explicit(struct u_upload_mgr *upload)
{
assert(upload->map_persistent);
- upload->map_flags &= ~PIPE_TRANSFER_COHERENT;
- upload->map_flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
+ upload->map_flags &= ~PIPE_MAP_COHERENT;
+ upload->map_flags |= PIPE_MAP_FLUSH_EXPLICIT;
}
void
u_upload_disable_persistent(struct u_upload_mgr *upload)
{
upload->map_persistent = FALSE;
- upload->map_flags &= ~(PIPE_TRANSFER_COHERENT | PIPE_TRANSFER_PERSISTENT);
- upload->map_flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
+ upload->map_flags &= ~(PIPE_MAP_COHERENT | PIPE_MAP_PERSISTENT);
+ upload->map_flags |= PIPE_MAP_FLUSH_EXPLICIT;
}
static void
@@ -138,7 +138,7 @@ upload_unmap_internal(struct u_upload_mgr *upload, boolean destroying)
if (!upload->transfer)
return;
- if (upload->map_flags & PIPE_TRANSFER_FLUSH_EXPLICIT) {
+ if (upload->map_flags & PIPE_MAP_FLUSH_EXPLICIT) {
struct pipe_box *box = &upload->transfer->box;
unsigned flush_offset = box->x + upload->flushed_size;
diff --git a/src/gallium/auxiliary/util/u_vbuf.c b/src/gallium/auxiliary/util/u_vbuf.c
index 7e2631c2e86..511a97dd806 100644
--- a/src/gallium/auxiliary/util/u_vbuf.c
+++ b/src/gallium/auxiliary/util/u_vbuf.c
@@ -461,7 +461,7 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
}
map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size,
- PIPE_TRANSFER_READ, &vb_transfer[i]);
+ PIPE_MAP_READ, &vb_transfer[i]);
}
/* Subtract min_index so that indexing with the index buffer works. */
@@ -491,7 +491,7 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
} else {
map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
info->count * info->index_size,
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
}
switch (info->index_size) {
@@ -1228,7 +1228,7 @@ void u_vbuf_get_minmax_index(struct pipe_context *pipe,
indices = pipe_buffer_map_range(pipe, info->index.resource,
info->start * info->index_size,
info->count * info->index_size,
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
}
u_vbuf_get_minmax_index_mapped(info, indices, out_min_index, out_max_index);
@@ -1386,7 +1386,7 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
indices = (uint8_t*)info->index.user;
} else {
indices = (uint8_t*)pipe_buffer_map(pipe, info->index.resource,
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
}
for (unsigned i = 0; i < draw_count; i++) {
diff --git a/src/gallium/auxiliary/vl/vl_compositor.c b/src/gallium/auxiliary/vl/vl_compositor.c
index f82c418009e..37f64885ad5 100644
--- a/src/gallium/auxiliary/vl/vl_compositor.c
+++ b/src/gallium/auxiliary/vl/vl_compositor.c
@@ -475,7 +475,7 @@ vl_compositor_set_csc_matrix(struct vl_compositor_state *s,
assert(s);
float *ptr = pipe_buffer_map(s->pipe, s->shader_params,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buf_transfer);
if (!ptr)
diff --git a/src/gallium/auxiliary/vl/vl_compositor_cs.c b/src/gallium/auxiliary/vl/vl_compositor_cs.c
index 029449f9dd1..b6039cb64e3 100644
--- a/src/gallium/auxiliary/vl/vl_compositor_cs.c
+++ b/src/gallium/auxiliary/vl/vl_compositor_cs.c
@@ -654,7 +654,7 @@ set_viewport(struct vl_compositor_state *s,
assert(s && drawn);
void *ptr = pipe_buffer_map(s->pipe, s->shader_params,
- PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE,
+ PIPE_MAP_READ | PIPE_MAP_WRITE,
&buf_transfer);
if (!ptr)
diff --git a/src/gallium/auxiliary/vl/vl_idct.c b/src/gallium/auxiliary/vl/vl_idct.c
index 91ff02b3911..1c35ef9e30f 100644
--- a/src/gallium/auxiliary/vl/vl_idct.c
+++ b/src/gallium/auxiliary/vl/vl_idct.c
@@ -711,8 +711,8 @@ vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
goto error_matrix;
f = pipe->transfer_map(pipe, matrix, 0,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE,
&rect, &buf_transfer);
if (!f)
goto error_map;
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
index 8a04158145a..58ddef9f418 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
@@ -629,8 +629,8 @@ vl_mpeg12_begin_frame(struct pipe_video_codec *decoder,
buf->texels =
dec->context->transfer_map(dec->context, tex, 0,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE,
&rect, &buf->tex_transfer);
buf->block_num = 0;
diff --git a/src/gallium/auxiliary/vl/vl_vertex_buffers.c b/src/gallium/auxiliary/vl/vl_vertex_buffers.c
index 7e6fdfaaf56..0cf8582f810 100644
--- a/src/gallium/auxiliary/vl/vl_vertex_buffers.c
+++ b/src/gallium/auxiliary/vl/vl_vertex_buffers.c
@@ -66,7 +66,7 @@ vl_vb_upload_quads(struct pipe_context *pipe)
(
pipe,
quad.buffer.resource,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buf_transfer
);
@@ -111,7 +111,7 @@ vl_vb_upload_pos(struct pipe_context *pipe, unsigned width, unsigned height)
(
pipe,
pos.buffer.resource,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buf_transfer
);
@@ -301,7 +301,7 @@ vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
(
pipe,
buffer->ycbcr[i].resource,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buffer->ycbcr[i].transfer
);
}
@@ -311,7 +311,7 @@ vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
(
pipe,
buffer->mv[i].resource,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&buffer->mv[i].transfer
);
}
diff --git a/src/gallium/auxiliary/vl/vl_zscan.c b/src/gallium/auxiliary/vl/vl_zscan.c
index 769fc71ff42..82f7a5eab23 100644
--- a/src/gallium/auxiliary/vl/vl_zscan.c
+++ b/src/gallium/auxiliary/vl/vl_zscan.c
@@ -410,7 +410,7 @@ vl_zscan_layout(struct pipe_context *pipe, const int layout[64], unsigned blocks
goto error_resource;
f = pipe->transfer_map(pipe, res,
- 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ 0, PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&rect, &buf_transfer);
if (!f)
goto error_map;
@@ -576,8 +576,8 @@ vl_zscan_upload_quant(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer,
rect.width *= zscan->blocks_per_line;
data = pipe->transfer_map(pipe, buffer->quant->texture,
- 0, PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE,
+ 0, PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE,
&rect, &buf_transfer);
if (!data)
return;
diff --git a/src/gallium/drivers/etnaviv/etnaviv_transfer.c b/src/gallium/drivers/etnaviv/etnaviv_transfer.c
index 27f3ebe585b..e448f4aed07 100644
--- a/src/gallium/drivers/etnaviv/etnaviv_transfer.c
+++ b/src/gallium/drivers/etnaviv/etnaviv_transfer.c
@@ -123,7 +123,7 @@ etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
if (trans->rsc)
etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
- if (ptrans->usage & PIPE_TRANSFER_WRITE) {
+ if (ptrans->usage & PIPE_MAP_WRITE) {
if (trans->rsc) {
/* We have a temporary resource due to either tile status or
* tiling format. Write back the updated buffer contents.
@@ -171,11 +171,11 @@ etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
* are not mapped unsynchronized. If they are, must push them back into GPU
* domain after CPU access is finished.
*/
- if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
+ if (!trans->rsc && !(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED))
etna_bo_cpu_fini(rsc->bo);
if ((ptrans->resource->target == PIPE_BUFFER) &&
- (ptrans->usage & PIPE_TRANSFER_WRITE)) {
+ (ptrans->usage & PIPE_MAP_WRITE)) {
util_range_add(&rsc->base,
&rsc->valid_buffer_range,
ptrans->box.x,
@@ -211,26 +211,26 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
/*
* Upgrade to UNSYNCHRONIZED if target is PIPE_BUFFER and range is uninitialized.
*/
- if ((usage & PIPE_TRANSFER_WRITE) &&
+ if ((usage & PIPE_MAP_WRITE) &&
(prsc->target == PIPE_BUFFER) &&
!util_ranges_intersect(&rsc->valid_buffer_range,
box->x,
box->x + box->width)) {
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
/* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
* being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
* check needs to be extended to coherent mappings and shared resources.
*/
- if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ if ((usage & PIPE_MAP_DISCARD_RANGE) &&
+ !(usage & PIPE_MAP_UNSYNCHRONIZED) &&
prsc->last_level == 0 &&
prsc->width0 == box->width &&
prsc->height0 == box->height &&
prsc->depth0 == box->depth &&
prsc->array_size == 1) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
ptrans = &trans->base;
@@ -268,7 +268,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* depth buffer, filling in the "holes" where the tile status
* indicates that it's clear. We also do this for tiled
* resources, but only if the RS can blit them. */
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
+ if (usage & PIPE_MAP_DIRECTLY) {
slab_free(&ctx->transfer_pool, trans);
BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
return NULL;
@@ -313,7 +313,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
ptrans->box.height = align(ptrans->box.height, ETNA_RS_HEIGHT_MASK + 1);
}
- if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
+ if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE))
etna_copy_resource_box(pctx, trans->rsc, &rsc->base, level, &ptrans->box);
/* Switch to using the temporary resource instead */
@@ -322,7 +322,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
struct etna_resource_level *res_level = &rsc->levels[level];
- /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
+ /* XXX we don't handle PIPE_MAP_FLUSH_EXPLICIT; this flag can be ignored
* when mapping in-place,
* but when not in place we need to fire off the copy operation in
* transfer_flush_region (currently
@@ -345,7 +345,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
pipeline?
Is it necessary at all? Only in case we want to provide a fast path and
map the resource directly
- (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
+ (and for PIPE_MAP_DIRECTLY) and we don't want to force a sync.
We also need to know whether the resource is in use to determine if a sync
is needed (or just do it
always, but that comes at the expense of performance).
@@ -356,8 +356,8 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
resources that have
been bound but are no longer in use for a while still carry a performance
penalty. On the other hand,
- the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
- PIPE_TRANSFER_UNSYNCHRONIZED to
+ the program could be using PIPE_MAP_DISCARD_WHOLE_RESOURCE or
+ PIPE_MAP_UNSYNCHRONIZED to
avoid this in the first place...
A) We use an in-pipe copy engine, and queue the copy operation after unmap
@@ -365,18 +365,18 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
will be performed when all current commands have been executed.
Using the RS is possible, not sure if always efficient. This can also
do any kind of tiling for us.
- Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
+ Only possible when PIPE_MAP_DISCARD_RANGE is set.
B) We discard the entire resource (or at least, the mipmap level) and
allocate new memory for it.
Only possible when mapping the entire resource or
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE is set.
*/
/*
* Pull resources into the CPU domain. Only skipped for unsynchronized
* transfers without a temporary resource.
*/
- if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (trans->rsc || !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
uint32_t prep_flags = 0;
/*
@@ -389,8 +389,8 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
(!trans->rsc &&
- (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
- ((usage & PIPE_TRANSFER_WRITE) && rsc->status)))) {
+ (((usage & PIPE_MAP_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
+ ((usage & PIPE_MAP_WRITE) && rsc->status)))) {
mtx_lock(&rsc->lock);
set_foreach(rsc->pending_ctx, entry) {
struct etna_context *pend_ctx = (struct etna_context *)entry->key;
@@ -403,9 +403,9 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
mtx_unlock(&ctx->lock);
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
prep_flags |= DRM_ETNA_PREP_READ;
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
prep_flags |= DRM_ETNA_PREP_WRITE;
/*
@@ -413,7 +413,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* get written even on read-only transfers. This blocks the GPU to sample
* from this resource.
*/
- if ((usage & PIPE_TRANSFER_READ) && etna_etc2_needs_patching(prsc))
+ if ((usage & PIPE_MAP_READ) && etna_etc2_needs_patching(prsc))
prep_flags |= DRM_ETNA_PREP_WRITE;
if (etna_bo_cpu_prep(rsc->bo, prep_flags))
@@ -436,7 +436,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
res_level->layer_stride);
/* We need to have the unpatched data ready for the gfx stack. */
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
etna_unpatch_data(trans->mapped, ptrans);
return trans->mapped;
@@ -447,7 +447,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
/* No direct mappings of tiled, since we need to manually
* tile/untile.
*/
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (usage & PIPE_MAP_DIRECTLY)
goto fail;
trans->mapped += res_level->offset;
@@ -459,7 +459,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
if (!trans->staging)
goto fail;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
if (rsc->layout == ETNA_LAYOUT_TILED) {
for (unsigned z = 0; z < ptrans->box.depth; z++) {
etna_texture_untile(trans->staging + z * ptrans->layer_stride,
diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c b/src/gallium/drivers/freedreno/freedreno_resource.c
index 803054c4f2f..e25b58cc4ff 100644
--- a/src/gallium/drivers/freedreno/freedreno_resource.c
+++ b/src/gallium/drivers/freedreno/freedreno_resource.c
@@ -524,7 +524,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
fd_batch_reference_locked(&write_batch, rsc->write_batch);
fd_screen_unlock(ctx->screen);
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
struct fd_batch *batch, *batches[32] = {};
uint32_t batch_mask;
@@ -558,7 +558,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
static void
fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
+ flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_MAP_READ);
}
static void
@@ -570,12 +570,12 @@ fd_resource_transfer_unmap(struct pipe_context *pctx,
struct fd_transfer *trans = fd_transfer(ptrans);
if (trans->staging_prsc) {
- if (ptrans->usage & PIPE_TRANSFER_WRITE)
+ if (ptrans->usage & PIPE_MAP_WRITE)
fd_blit_from_staging(ctx, trans);
pipe_resource_reference(&trans->staging_prsc, NULL);
}
- if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED)) {
fd_bo_cpu_fini(rsc->bo);
}
@@ -607,7 +607,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
box->width, box->height, box->x, box->y);
- if ((usage & PIPE_TRANSFER_MAP_DIRECTLY) && rsc->layout.tile_mode) {
+ if ((usage & PIPE_MAP_DIRECTLY) && rsc->layout.tile_mode) {
DBG("CANNOT MAP DIRECTLY!\n");
return NULL;
}
@@ -638,7 +638,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
if (staging_rsc) {
- // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
+ // TODO for PIPE_MAP_READ, need to do untiling blit..
trans->staging_prsc = &staging_rsc->base;
trans->base.stride = fd_resource_pitch(staging_rsc, 0);
trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
@@ -647,7 +647,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
trans->staging_box.y = 0;
trans->staging_box.z = 0;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
fd_blit_to_staging(ctx, trans);
fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
@@ -665,30 +665,30 @@ fd_resource_transfer_map(struct pipe_context *pctx,
}
}
- if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ if (ctx->in_shadow && !(usage & PIPE_MAP_READ))
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
op |= DRM_FREEDRENO_PREP_READ;
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
op |= DRM_FREEDRENO_PREP_WRITE;
- bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
+ bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
if (needs_flush || fd_resource_busy(rsc, op)) {
rebind_resource(rsc);
realloc_bo(rsc, fd_bo_size(rsc->bo));
}
- } else if ((usage & PIPE_TRANSFER_WRITE) &&
+ } else if ((usage & PIPE_MAP_WRITE) &&
prsc->target == PIPE_BUFFER &&
!util_ranges_intersect(&rsc->valid_buffer_range,
box->x, box->x + box->width)) {
/* We are trying to write to a previously uninitialized range. No need
* to wait.
*/
- } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
struct fd_batch *write_batch = NULL;
/* hold a reference, so it doesn't disappear under us: */
@@ -696,7 +696,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
fd_batch_reference_locked(&write_batch, rsc->write_batch);
fd_context_unlock(ctx);
- if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
+ if ((usage & PIPE_MAP_WRITE) && write_batch &&
write_batch->back_blit) {
/* if only thing pending is a back-blit, we can discard it: */
fd_batch_reset(write_batch);
@@ -714,8 +714,8 @@ fd_resource_transfer_map(struct pipe_context *pctx,
* ie. we only *don't* want to go down this path if the blit
* will trigger a flush!
*/
- if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
- (usage & PIPE_TRANSFER_DISCARD_RANGE)) {
+ if (ctx->screen->reorder && busy && !(usage & PIPE_MAP_READ) &&
+ (usage & PIPE_MAP_DISCARD_RANGE)) {
/* try shadowing only if it avoids a flush, otherwise staging would
* be better:
*/
@@ -784,7 +784,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
fd_resource_offset(rsc, level, box->z);
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
rsc->valid = true;
*pptrans = ptrans;
diff --git a/src/gallium/drivers/i915/i915_resource_texture.c b/src/gallium/drivers/i915/i915_resource_texture.c
index e16d2ac2f6a..afc42371618 100644
--- a/src/gallium/drivers/i915/i915_resource_texture.c
+++ b/src/gallium/drivers/i915/i915_resource_texture.c
@@ -744,8 +744,8 @@ i915_texture_transfer_map(struct pipe_context *pipe,
* because we need that for u_blitter */
if (i915->blitter &&
util_blitter_is_copy_supported(i915->blitter, resource, resource) &&
- (usage & PIPE_TRANSFER_WRITE) &&
- !(usage & (PIPE_TRANSFER_READ | PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_UNSYNCHRONIZED)))
+ (usage & PIPE_MAP_WRITE) &&
+ !(usage & (PIPE_MAP_READ | PIPE_MAP_DONTBLOCK | PIPE_MAP_UNSYNCHRONIZED)))
use_staging_texture = TRUE;
use_staging_texture = FALSE;
@@ -773,7 +773,7 @@ i915_texture_transfer_map(struct pipe_context *pipe,
offset = i915_texture_offset(tex, transfer->b.level, box->z);
map = iws->buffer_map(iws, tex->buffer,
- (transfer->b.usage & PIPE_TRANSFER_WRITE) ? TRUE : FALSE);
+ (transfer->b.usage & PIPE_MAP_WRITE) ? TRUE : FALSE);
if (!map) {
pipe_resource_reference(&transfer->staging_texture, NULL);
FREE(transfer);
@@ -802,7 +802,7 @@ i915_texture_transfer_unmap(struct pipe_context *pipe,
iws->buffer_unmap(iws, tex->buffer);
if ((itransfer->staging_texture) &&
- (transfer->usage & PIPE_TRANSFER_WRITE)) {
+ (transfer->usage & PIPE_MAP_WRITE)) {
struct pipe_box sbox;
u_box_origin_2d(itransfer->b.box.width, itransfer->b.box.height, &sbox);
diff --git a/src/gallium/drivers/iris/iris_bufmgr.h b/src/gallium/drivers/iris/iris_bufmgr.h
index b78794c9be2..7755919b9c0 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.h
+++ b/src/gallium/drivers/iris/iris_bufmgr.h
@@ -292,11 +292,11 @@ iris_bo_reference(struct iris_bo *bo)
*/
void iris_bo_unreference(struct iris_bo *bo);
-#define MAP_READ PIPE_TRANSFER_READ
-#define MAP_WRITE PIPE_TRANSFER_WRITE
-#define MAP_ASYNC PIPE_TRANSFER_UNSYNCHRONIZED
-#define MAP_PERSISTENT PIPE_TRANSFER_PERSISTENT
-#define MAP_COHERENT PIPE_TRANSFER_COHERENT
+#define MAP_READ PIPE_MAP_READ
+#define MAP_WRITE PIPE_MAP_WRITE
+#define MAP_ASYNC PIPE_MAP_UNSYNCHRONIZED
+#define MAP_PERSISTENT PIPE_MAP_PERSISTENT
+#define MAP_COHERENT PIPE_MAP_COHERENT
/* internal */
#define MAP_INTERNAL_MASK (0xffu << 24)
#define MAP_RAW (0x01 << 24)
diff --git a/src/gallium/drivers/iris/iris_resource.c b/src/gallium/drivers/iris/iris_resource.c
index c12a287307f..63e35506087 100644
--- a/src/gallium/drivers/iris/iris_resource.c
+++ b/src/gallium/drivers/iris/iris_resource.c
@@ -1351,7 +1351,7 @@ static void
iris_flush_staging_region(struct pipe_transfer *xfer,
const struct pipe_box *flush_box)
{
- if (!(xfer->usage & PIPE_TRANSFER_WRITE))
+ if (!(xfer->usage & PIPE_MAP_WRITE))
return;
struct iris_transfer *map = (void *) xfer;
@@ -1422,7 +1422,7 @@ iris_map_copy_region(struct iris_transfer *map)
xfer->layer_stride = isl_surf_get_array_pitch(surf);
}
- if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
+ if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
iris_copy_region(map->blorp, map->batch, map->staging, 0, extra, 0, 0,
xfer->resource, xfer->level, box);
/* Ensure writes to the staging BO land before we map it below. */
@@ -1610,7 +1610,7 @@ iris_unmap_s8(struct iris_transfer *map)
struct iris_resource *res = (struct iris_resource *) xfer->resource;
struct isl_surf *surf = &res->surf;
- if (xfer->usage & PIPE_TRANSFER_WRITE) {
+ if (xfer->usage & PIPE_MAP_WRITE) {
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@@ -1657,7 +1657,7 @@ iris_map_s8(struct iris_transfer *map)
* invalidate is set, since we'll be writing the whole rectangle from our
* temporary buffer back out.
*/
- if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
+ if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@@ -1716,7 +1716,7 @@ iris_unmap_tiled_memcpy(struct iris_transfer *map)
const bool has_swizzling = false;
- if (xfer->usage & PIPE_TRANSFER_WRITE) {
+ if (xfer->usage & PIPE_MAP_WRITE) {
char *dst =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@@ -1760,7 +1760,7 @@ iris_map_tiled_memcpy(struct iris_transfer *map)
const bool has_swizzling = false;
- if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
+ if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
char *src =
iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
@@ -1819,7 +1819,7 @@ can_promote_to_async(const struct iris_resource *res,
* initialized with useful data, then we can safely promote this write
* to be unsynchronized. This helps the common pattern of appending data.
*/
- return res->base.target == PIPE_BUFFER && (usage & PIPE_TRANSFER_WRITE) &&
+ return res->base.target == PIPE_BUFFER && (usage & PIPE_MAP_WRITE) &&
!(usage & TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED) &&
!util_ranges_intersect(&res->valid_buffer_range, box->x,
box->x + box->width);
@@ -1840,35 +1840,35 @@ iris_transfer_map(struct pipe_context *ctx,
if (iris_resource_unfinished_aux_import(res))
iris_resource_finish_aux_import(ctx->screen, res);
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
/* Replace the backing storage with a fresh buffer for non-async maps */
- if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
+ if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INVALIDATE)))
iris_invalidate_resource(ctx, resource);
/* If we can discard the whole resource, we can discard the range. */
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage |= PIPE_MAP_DISCARD_RANGE;
}
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
can_promote_to_async(res, box, usage)) {
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
bool map_would_stall = false;
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
map_would_stall =
resource_is_busy(ice, res) ||
iris_has_invalid_primary(res, level, 1, box->z, box->depth);
- if (map_would_stall && (usage & PIPE_TRANSFER_DONTBLOCK) &&
- (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ if (map_would_stall && (usage & PIPE_MAP_DONTBLOCK) &&
+ (usage & PIPE_MAP_DIRECTLY))
return NULL;
}
if (surf->tiling != ISL_TILING_LINEAR &&
- (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ (usage & PIPE_MAP_DIRECTLY))
return NULL;
struct iris_transfer *map = slab_alloc(&ice->transfer_pool);
@@ -1890,7 +1890,7 @@ iris_transfer_map(struct pipe_context *ctx,
util_ranges_intersect(&res->valid_buffer_range, box->x,
box->x + box->width);
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
util_range_add(&res->base, &res->valid_buffer_range, box->x, box->x + box->width);
/* Avoid using GPU copies for persistent/coherent buffers, as the idea
@@ -1899,9 +1899,9 @@ iris_transfer_map(struct pipe_context *ctx,
* contain state we're constructing for a GPU draw call, which would
* kill us with infinite stack recursion.
*/
- bool no_gpu = usage & (PIPE_TRANSFER_PERSISTENT |
- PIPE_TRANSFER_COHERENT |
- PIPE_TRANSFER_MAP_DIRECTLY);
+ bool no_gpu = usage & (PIPE_MAP_PERSISTENT |
+ PIPE_MAP_COHERENT |
+ PIPE_MAP_DIRECTLY);
/* GPU copies are not useful for buffer reads. Instead of stalling to
* read from the original buffer, we'd simply copy it to a temporary...
@@ -1912,7 +1912,7 @@ iris_transfer_map(struct pipe_context *ctx,
* temporary and map that, to avoid the resolve. (It might be better to
* a tiled temporary and use the tiled_memcpy paths...)
*/
- if (!(usage & PIPE_TRANSFER_DISCARD_RANGE) &&
+ if (!(usage & PIPE_MAP_DISCARD_RANGE) &&
!iris_has_invalid_primary(res, level, 1, box->z, box->depth)) {
no_gpu = true;
}
@@ -1939,10 +1939,10 @@ iris_transfer_map(struct pipe_context *ctx,
if (resource->target != PIPE_BUFFER) {
iris_resource_access_raw(ice, res, level, box->z, box->depth,
- usage & PIPE_TRANSFER_WRITE);
+ usage & PIPE_MAP_WRITE);
}
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
if (iris_batch_references(&ice->batches[i], res->bo))
iris_batch_flush(&ice->batches[i]);
@@ -2010,8 +2010,8 @@ iris_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *xfer)
struct iris_context *ice = (struct iris_context *)ctx;
struct iris_transfer *map = (void *) xfer;
- if (!(xfer->usage & (PIPE_TRANSFER_FLUSH_EXPLICIT |
- PIPE_TRANSFER_COHERENT))) {
+ if (!(xfer->usage & (PIPE_MAP_FLUSH_EXPLICIT |
+ PIPE_MAP_COHERENT))) {
struct pipe_box flush_box = {
.x = 0, .y = 0, .z = 0,
.width = xfer->box.width,
@@ -2068,7 +2068,7 @@ iris_texture_subdata(struct pipe_context *ctx,
data, stride, layer_stride);
}
- /* No state trackers pass any flags other than PIPE_TRANSFER_WRITE */
+ /* No state trackers pass any flags other than PIPE_MAP_WRITE */
iris_resource_access_raw(ice, res, level, box->z, box->depth, true);
diff --git a/src/gallium/drivers/lima/lima_resource.c b/src/gallium/drivers/lima/lima_resource.c
index 5ee9c78ad0b..3e743892ead 100644
--- a/src/gallium/drivers/lima/lima_resource.c
+++ b/src/gallium/drivers/lima/lima_resource.c
@@ -559,12 +559,12 @@ lima_transfer_map(struct pipe_context *pctx,
/* No direct mappings of tiled, since we need to manually
* tile/untile.
*/
- if (res->tiled && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ if (res->tiled && (usage & PIPE_MAP_DIRECTLY))
return NULL;
/* bo might be in use in a previous stream draw. Allocate a new
* one for the resource to avoid overwriting data in use. */
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
struct lima_bo *new_bo;
assert(res->bo && res->bo->size);
@@ -580,13 +580,13 @@ lima_transfer_map(struct pipe_context *pctx,
bo = res->bo;
}
- else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
- (usage & PIPE_TRANSFER_READ_WRITE)) {
+ else if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
+ (usage & PIPE_MAP_READ_WRITE)) {
/* use once buffers are made sure to not read/write overlapped
* range, so no need to sync */
- lima_flush_job_accessing_bo(ctx, bo, usage & PIPE_TRANSFER_WRITE);
+ lima_flush_job_accessing_bo(ctx, bo, usage & PIPE_MAP_WRITE);
- unsigned op = usage & PIPE_TRANSFER_WRITE ?
+ unsigned op = usage & PIPE_MAP_WRITE ?
LIMA_GEM_WAIT_WRITE : LIMA_GEM_WAIT_READ;
lima_bo_wait(bo, op, PIPE_TIMEOUT_INFINITE);
}
@@ -614,7 +614,7 @@ lima_transfer_map(struct pipe_context *pctx,
trans->staging = malloc(ptrans->stride * ptrans->box.height * ptrans->box.depth);
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
unsigned i;
for (i = 0; i < ptrans->box.depth; i++)
panfrost_load_tiled_image(
@@ -629,15 +629,15 @@ lima_transfer_map(struct pipe_context *pctx,
return trans->staging;
} else {
- unsigned dpw = PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_PERSISTENT;
+ unsigned dpw = PIPE_MAP_DIRECTLY | PIPE_MAP_WRITE |
+ PIPE_MAP_PERSISTENT;
if ((usage & dpw) == dpw && res->index_cache)
return NULL;
ptrans->stride = res->levels[level].stride;
ptrans->layer_stride = res->levels[level].layer_stride;
- if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ if ((usage & PIPE_MAP_WRITE) && (usage & PIPE_MAP_DIRECTLY))
panfrost_minmax_cache_invalidate(res->index_cache, ptrans);
return bo->map + res->levels[level].offset +
@@ -668,7 +668,7 @@ lima_transfer_unmap_inner(struct lima_context *ctx,
if (trans->staging) {
pres = &res->base;
- if (trans->base.usage & PIPE_TRANSFER_WRITE) {
+ if (trans->base.usage & PIPE_MAP_WRITE) {
unsigned i;
for (i = 0; i < trans->base.box.depth; i++)
panfrost_store_tiled_image(
@@ -779,12 +779,12 @@ lima_texture_subdata(struct pipe_context *pctx,
return;
}
- assert(!(usage & PIPE_TRANSFER_READ));
+ assert(!(usage & PIPE_MAP_READ));
struct lima_transfer t = {
.base = {
.resource = prsc,
- .usage = PIPE_TRANSFER_WRITE,
+ .usage = PIPE_MAP_WRITE,
.level = level,
.box = *box,
.stride = stride,
diff --git a/src/gallium/drivers/llvmpipe/lp_setup.c b/src/gallium/drivers/llvmpipe/lp_setup.c
index 57d0acb41b1..23ccdff6e7d 100644
--- a/src/gallium/drivers/llvmpipe/lp_setup.c
+++ b/src/gallium/drivers/llvmpipe/lp_setup.c
@@ -1006,7 +1006,7 @@ lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
struct sw_winsys *winsys = screen->winsys;
jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
jit_tex->row_stride[0] = lp_tex->row_stride[0];
jit_tex->img_stride[0] = lp_tex->img_stride[0];
jit_tex->mip_offsets[0] = 0;
diff --git a/src/gallium/drivers/llvmpipe/lp_state_cs.c b/src/gallium/drivers/llvmpipe/lp_state_cs.c
index 3a437185ad6..d41f5199fce 100644
--- a/src/gallium/drivers/llvmpipe/lp_state_cs.c
+++ b/src/gallium/drivers/llvmpipe/lp_state_cs.c
@@ -993,7 +993,7 @@ lp_csctx_set_sampler_views(struct lp_cs_context *csctx,
struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
struct sw_winsys *winsys = screen->winsys;
jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
jit_tex->row_stride[0] = lp_tex->row_stride[0];
jit_tex->img_stride[0] = lp_tex->img_stride[0];
jit_tex->mip_offsets[0] = 0;
@@ -1300,7 +1300,7 @@ fill_grid_size(struct pipe_context *pipe,
params = pipe_buffer_map_range(pipe, info->indirect,
info->indirect_offset,
3 * sizeof(uint32_t),
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&transfer);
if (!transfer)
diff --git a/src/gallium/drivers/llvmpipe/lp_state_sampler.c b/src/gallium/drivers/llvmpipe/lp_state_sampler.c
index f802af45f5a..46805e257fe 100644
--- a/src/gallium/drivers/llvmpipe/lp_state_sampler.c
+++ b/src/gallium/drivers/llvmpipe/lp_state_sampler.c
@@ -333,7 +333,7 @@ prepare_shader_sampling(
struct llvmpipe_screen *screen = llvmpipe_screen(tex->screen);
struct sw_winsys *winsys = screen->winsys;
addr = winsys->displaytarget_map(winsys, lp_tex->dt,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
row_stride[0] = lp_tex->row_stride[0];
img_stride[0] = lp_tex->img_stride[0];
mip_offsets[0] = 0;
@@ -474,7 +474,7 @@ prepare_shader_images(
struct llvmpipe_screen *screen = llvmpipe_screen(img->screen);
struct sw_winsys *winsys = screen->winsys;
addr = winsys->displaytarget_map(winsys, lp_img->dt,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
row_stride = lp_img->row_stride[0];
img_stride = lp_img->img_stride[0];
sample_stride = 0;
diff --git a/src/gallium/drivers/llvmpipe/lp_surface.c b/src/gallium/drivers/llvmpipe/lp_surface.c
index af8ada4d39b..02b72ae3f3a 100644
--- a/src/gallium/drivers/llvmpipe/lp_surface.c
+++ b/src/gallium/drivers/llvmpipe/lp_surface.c
@@ -53,14 +53,14 @@ lp_resource_copy_ms(struct pipe_context *pipe,
for (unsigned i = 0; i < src->nr_samples; i++) {
struct pipe_transfer *src_trans, *dst_trans;
const uint8_t *src_map = llvmpipe_transfer_map_ms(pipe,
- src, 0, PIPE_TRANSFER_READ, i,
+ src, 0, PIPE_MAP_READ, i,
src_box,
&src_trans);
if (!src_map)
return;
uint8_t *dst_map = llvmpipe_transfer_map_ms(pipe,
- dst, 0, PIPE_TRANSFER_WRITE, i,
+ dst, 0, PIPE_MAP_WRITE, i,
&dst_box,
&dst_trans);
if (!dst_map) {
@@ -285,7 +285,7 @@ lp_clear_color_texture_msaa(struct pipe_context *pipe,
struct pipe_transfer *dst_trans;
ubyte *dst_map;
- dst_map = llvmpipe_transfer_map_ms(pipe, texture, 0, PIPE_TRANSFER_WRITE,
+ dst_map = llvmpipe_transfer_map_ms(pipe, texture, 0, PIPE_MAP_WRITE,
sample, box, &dst_trans);
if (!dst_map)
return;
@@ -347,8 +347,8 @@ lp_clear_depth_stencil_texture_msaa(struct pipe_context *pipe,
dst_map = llvmpipe_transfer_map_ms(pipe,
texture,
0,
- (need_rmw ? PIPE_TRANSFER_READ_WRITE :
- PIPE_TRANSFER_WRITE),
+ (need_rmw ? PIPE_MAP_READ_WRITE :
+ PIPE_MAP_WRITE),
sample, box, &dst_trans);
assert(dst_map);
if (!dst_map)
diff --git a/src/gallium/drivers/llvmpipe/lp_texture.c b/src/gallium/drivers/llvmpipe/lp_texture.c
index e93b8a666ac..bb7e3ed34f3 100644
--- a/src/gallium/drivers/llvmpipe/lp_texture.c
+++ b/src/gallium/drivers/llvmpipe/lp_texture.c
@@ -230,7 +230,7 @@ llvmpipe_displaytarget_layout(struct llvmpipe_screen *screen,
if (!map_front_private) {
void *map = winsys->displaytarget_map(winsys, lpr->dt,
- PIPE_TRANSFER_WRITE);
+ PIPE_MAP_WRITE);
if (map)
memset(map, 0, height * lpr->row_stride[0]);
@@ -408,10 +408,10 @@ llvmpipe_resource_map(struct pipe_resource *resource,
unsigned dt_usage;
if (tex_usage == LP_TEX_USAGE_READ) {
- dt_usage = PIPE_TRANSFER_READ;
+ dt_usage = PIPE_MAP_READ;
}
else {
- dt_usage = PIPE_TRANSFER_READ_WRITE;
+ dt_usage = PIPE_MAP_READ_WRITE;
}
assert(level == 0);
@@ -566,9 +566,9 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
* Transfers, like other pipe operations, must happen in order, so flush the
* context if necessary.
*/
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- boolean read_only = !(usage & PIPE_TRANSFER_WRITE);
- boolean do_not_block = !!(usage & PIPE_TRANSFER_DONTBLOCK);
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
+ boolean read_only = !(usage & PIPE_MAP_WRITE);
+ boolean do_not_block = !!(usage & PIPE_MAP_DONTBLOCK);
if (!llvmpipe_flush_resource(pipe, resource,
level,
read_only,
@@ -584,7 +584,7 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
}
/* Check if we're mapping a current constant buffer */
- if ((usage & PIPE_TRANSFER_WRITE) &&
+ if ((usage & PIPE_MAP_WRITE) &&
(resource->bind & PIPE_BIND_CONSTANT_BUFFER)) {
unsigned i;
for (i = 0; i < ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_FRAGMENT]); ++i) {
@@ -618,7 +618,7 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
transfer->usage);
*/
- if (usage == PIPE_TRANSFER_READ) {
+ if (usage == PIPE_MAP_READ) {
tex_usage = LP_TEX_USAGE_READ;
mode = "read";
}
@@ -642,7 +642,7 @@ llvmpipe_transfer_map_ms( struct pipe_context *pipe,
/* May want to do different things here depending on read/write nature
* of the map:
*/
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
/* Do something to notify sharing contexts of a texture change.
*/
screen->timestamp++;
diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.c b/src/gallium/drivers/nouveau/nouveau_buffer.c
index acc0e8c8d29..beef87de0d6 100644
--- a/src/gallium/drivers/nouveau/nouveau_buffer.c
+++ b/src/gallium/drivers/nouveau/nouveau_buffer.c
@@ -228,7 +228,7 @@ static inline bool
nouveau_buffer_sync(struct nouveau_context *nv,
struct nv04_resource *buf, unsigned rw)
{
- if (rw == PIPE_TRANSFER_READ) {
+ if (rw == PIPE_MAP_READ) {
if (!buf->fence_wr)
return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
@@ -253,7 +253,7 @@ nouveau_buffer_sync(struct nouveau_context *nv,
static inline bool
nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
{
- if (rw == PIPE_TRANSFER_READ)
+ if (rw == PIPE_MAP_READ)
return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
else
return (buf->fence && !nouveau_fence_signalled(buf->fence));
@@ -331,7 +331,7 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
#define NOUVEAU_TRANSFER_DISCARD \
- (PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
+ (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE)
/* Checks whether it is possible to completely discard the memory backing this
* resource. This can be useful if we would otherwise have to wait for a read
@@ -340,13 +340,13 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
static inline bool
nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
{
- if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
+ if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE))
return false;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return false;
- if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
+ if (unlikely(usage & PIPE_MAP_PERSISTENT))
return false;
- return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
+ return buf->mm && nouveau_buffer_busy(buf, PIPE_MAP_WRITE);
}
/* Returns a pointer to a memory area representing a window into the
@@ -390,9 +390,9 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
nouveau_buffer_transfer_init(tx, resource, box, usage);
*ptransfer = &tx->base;
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
/* If we are trying to write to an uninitialized range, the user shouldn't
@@ -402,15 +402,15 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
* uninitialized, the GPU can't care what was there, and so we can treat
* the write as being unsynchronized.
*/
- if ((usage & PIPE_TRANSFER_WRITE) &&
+ if ((usage & PIPE_MAP_WRITE) &&
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
- usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_DISCARD_RANGE | PIPE_MAP_UNSYNCHRONIZED;
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
/* Set up a staging area for the user to write to. It will be copied
* back into VRAM on unmap. */
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
nouveau_transfer_staging(nv, tx, true);
} else {
@@ -428,7 +428,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
} else {
/* The buffer is currently idle. Create a staging area for writes,
* and make sure that the cached data is up-to-date. */
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
nouveau_transfer_staging(nv, tx, true);
if (!buf->data)
nouveau_buffer_cache(nv, buf);
@@ -465,31 +465,31 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
map = (uint8_t *)buf->bo->map + buf->offset + box->x;
/* using kernel fences only if !buf->mm */
- if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
+ if ((usage & PIPE_MAP_UNSYNCHRONIZED) || !buf->mm)
return map;
/* If the GPU is currently reading/writing this buffer, we shouldn't
* interfere with its progress. So instead we either wait for the GPU to
* complete its operation, or set up a staging area to perform our work in.
*/
- if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
- if (unlikely(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
- PIPE_TRANSFER_PERSISTENT))) {
+ if (nouveau_buffer_busy(buf, usage & PIPE_MAP_READ_WRITE)) {
+ if (unlikely(usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE |
+ PIPE_MAP_PERSISTENT))) {
/* Discarding was not possible, must sync because
* subsequent transfers might use UNSYNCHRONIZED. */
- nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
+ nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE);
} else
- if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
+ if (usage & PIPE_MAP_DISCARD_RANGE) {
/* The whole range is being discarded, so it doesn't matter what was
* there before. No need to copy anything over. */
nouveau_transfer_staging(nv, tx, true);
map = tx->map;
} else
- if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK)
+ if (nouveau_buffer_busy(buf, PIPE_MAP_READ)) {
+ if (usage & PIPE_MAP_DONTBLOCK)
map = NULL;
else
- nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
+ nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE);
} else {
/* It is expected that the returned buffer be a representation of the
* data in question, so we must copy it over from the buffer. */
@@ -536,8 +536,8 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
struct nouveau_transfer *tx = nouveau_transfer(transfer);
struct nv04_resource *buf = nv04_resource(transfer->resource);
- if (tx->base.usage & PIPE_TRANSFER_WRITE) {
- if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ if (tx->base.usage & PIPE_MAP_WRITE) {
+ if (!(tx->base.usage & PIPE_MAP_FLUSH_EXPLICIT)) {
if (tx->map)
nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
@@ -553,7 +553,7 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
}
}
- if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
+ if (!tx->bo && (tx->base.usage & PIPE_MAP_WRITE))
NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
nouveau_buffer_transfer_del(nv, tx);
@@ -617,7 +617,7 @@ nouveau_resource_map_offset(struct nouveau_context *nv,
if (res->mm) {
unsigned rw;
- rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
+ rw = (flags & NOUVEAU_BO_WR) ? PIPE_MAP_WRITE : PIPE_MAP_READ;
nouveau_buffer_sync(nv, res, rw);
if (nouveau_bo_map(res->bo, 0, NULL))
return NULL;
@@ -931,7 +931,7 @@ nouveau_buffer_invalidate(struct pipe_context *pipe,
* wipe the valid buffer range. Otherwise we have to create fresh
* storage. (We don't keep track of fences for non-sub-allocated BO's.)
*/
- if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) {
+ if (buf->mm && !nouveau_buffer_busy(buf, PIPE_MAP_WRITE)) {
util_range_set_empty(&buf->valid_buffer_range);
} else {
nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
diff --git a/src/gallium/drivers/nouveau/nouveau_winsys.h b/src/gallium/drivers/nouveau/nouveau_winsys.h
index 792c9ea1547..94116cccfae 100644
--- a/src/gallium/drivers/nouveau/nouveau_winsys.h
+++ b/src/gallium/drivers/nouveau/nouveau_winsys.h
@@ -75,12 +75,12 @@ nouveau_screen_transfer_flags(unsigned pipe)
{
uint32_t flags = 0;
- if (!(pipe & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- if (pipe & PIPE_TRANSFER_READ)
+ if (!(pipe & PIPE_MAP_UNSYNCHRONIZED)) {
+ if (pipe & PIPE_MAP_READ)
flags |= NOUVEAU_BO_RD;
- if (pipe & PIPE_TRANSFER_WRITE)
+ if (pipe & PIPE_MAP_WRITE)
flags |= NOUVEAU_BO_WR;
- if (pipe & PIPE_TRANSFER_DONTBLOCK)
+ if (pipe & PIPE_MAP_DONTBLOCK)
flags |= NOUVEAU_BO_NOBLOCK;
}
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_draw.c b/src/gallium/drivers/nouveau/nv30/nv30_draw.c
index 798ec1423d2..5f3b697a37b 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_draw.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_draw.c
@@ -94,8 +94,8 @@ nv30_render_map_vertices(struct vbuf_render *render)
char *map = pipe_buffer_map_range(
&r->nv30->base.pipe, r->buffer,
r->offset, r->length,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE,
&r->transfer);
assert(map);
return map;
@@ -424,8 +424,8 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (!map) {
if (nv30->vtxbuf[i].buffer.resource)
map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer.resource,
- PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_READ, &transfer[i]);
+ PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_READ, &transfer[i]);
}
draw_set_mapped_vertex_buffer(draw, i, map, ~0);
}
@@ -434,8 +434,8 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
const void *map = info->has_user_indices ? info->index.user : NULL;
if (!map)
map = pipe_buffer_map(pipe, info->index.resource,
- PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_READ, &transferi);
+ PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_READ, &transferi);
draw_set_indexes(draw,
(ubyte *) map,
info->index_size, ~0);
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_fragprog.c b/src/gallium/drivers/nouveau/nv30/nv30_fragprog.c
index 065a39921a4..74134bb2f0c 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_fragprog.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_fragprog.c
@@ -50,7 +50,7 @@ nv30_fragprog_upload(struct nv30_context *nv30)
int i;
map = pipe_buffer_map(pipe, fp->buffer,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_WHOLE_RESOURCE,
&transfer);
for (i = 0; i < fp->insn_len; i++)
*map++ = (fp->insn[i] >> 16) | (fp->insn[i] << 16);
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_miptree.c b/src/gallium/drivers/nouveau/nv30/nv30_miptree.c
index e4ed0dbd2a8..2cc069b0c21 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_miptree.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_miptree.c
@@ -324,7 +324,7 @@ nv30_miptree_transfer_map(struct pipe_context *pipe, struct pipe_resource *pt,
tx->tmp.y1 = tx->tmp.h;
tx->tmp.z = 0;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
unsigned offset = tx->img.offset;
unsigned z = tx->img.z;
@@ -349,9 +349,9 @@ nv30_miptree_transfer_map(struct pipe_context *pipe, struct pipe_resource *pt,
return tx->tmp.bo->map;
}
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
access |= NOUVEAU_BO_RD;
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
access |= NOUVEAU_BO_WR;
ret = nouveau_bo_map(tx->tmp.bo, access, nv30->base.client);
@@ -374,7 +374,7 @@ nv30_miptree_transfer_unmap(struct pipe_context *pipe,
struct nv30_miptree *mt = nv30_miptree(tx->base.resource);
unsigned i;
- if (ptx->usage & PIPE_TRANSFER_WRITE) {
+ if (ptx->usage & PIPE_MAP_WRITE) {
bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
for (i = 0; i < tx->base.box.depth; ++i) {
nv30_transfer_rect(nv30, NEAREST, &tx->tmp, &tx->img);
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_transfer.c b/src/gallium/drivers/nouveau/nv30/nv30_transfer.c
index 6f06ee6b1c4..a79fd1f4545 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_transfer.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_transfer.c
@@ -120,7 +120,7 @@ nv30_transfer_rect_fragprog(struct nv30_context *nv30)
if (nv30->blit_fp) {
struct pipe_transfer *transfer;
u32 *map = pipe_buffer_map(pipe, nv30->blit_fp,
- PIPE_TRANSFER_WRITE, &transfer);
+ PIPE_MAP_WRITE, &transfer);
if (map) {
map[0] = 0x17009e00; /* texr r0, i[tex0], texture[0]; end; */
map[1] = 0x1c9dc801;
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_transfer.c b/src/gallium/drivers/nouveau/nv50/nv50_transfer.c
index 28dd02e7e56..4a75ea536b3 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_transfer.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_transfer.c
@@ -258,7 +258,7 @@ nv50_miptree_transfer_map(struct pipe_context *pctx,
int ret;
unsigned flags = 0;
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (usage & PIPE_MAP_DIRECTLY)
return NULL;
tx = CALLOC_STRUCT(nv50_transfer);
@@ -300,7 +300,7 @@ nv50_miptree_transfer_map(struct pipe_context *pctx,
tx->rect[1].pitch = tx->base.stride;
tx->rect[1].domain = NOUVEAU_BO_GART;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
unsigned base = tx->rect[0].base;
unsigned z = tx->rect[0].z;
unsigned i;
@@ -323,9 +323,9 @@ nv50_miptree_transfer_map(struct pipe_context *pctx,
return tx->rect[1].bo->map;
}
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
flags = NOUVEAU_BO_RD;
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
flags |= NOUVEAU_BO_WR;
ret = nouveau_bo_map(tx->rect[1].bo, flags, screen->base.client);
@@ -348,7 +348,7 @@ nv50_miptree_transfer_unmap(struct pipe_context *pctx,
struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
unsigned i;
- if (tx->base.usage & PIPE_TRANSFER_WRITE) {
+ if (tx->base.usage & PIPE_MAP_WRITE) {
for (i = 0; i < tx->base.box.depth; ++i) {
nv50_m2mf_transfer_rect(nv50, &tx->rect[0], &tx->rect[1],
tx->nblocksx, tx->nblocksy);
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c b/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c
index cdfd1d670e7..48a27f6050b 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c
@@ -360,11 +360,11 @@ static inline bool
nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage)
{
if (!mt->base.mm) {
- uint32_t access = (usage & PIPE_TRANSFER_WRITE) ?
+ uint32_t access = (usage & PIPE_MAP_WRITE) ?
NOUVEAU_BO_WR : NOUVEAU_BO_RD;
return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client);
}
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug);
return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
}
@@ -390,12 +390,12 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
if (!ret)
ret = nouveau_bo_map(mt->base.bo, 0, NULL);
if (ret &&
- (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ (usage & PIPE_MAP_DIRECTLY))
return NULL;
if (!ret)
- usage |= PIPE_TRANSFER_MAP_DIRECTLY;
+ usage |= PIPE_MAP_DIRECTLY;
} else
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (usage & PIPE_MAP_DIRECTLY)
return NULL;
tx = CALLOC_STRUCT(nvc0_transfer);
@@ -417,7 +417,7 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
}
tx->nlayers = box->depth;
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
+ if (usage & PIPE_MAP_DIRECTLY) {
tx->base.stride = mt->level[level].pitch;
tx->base.layer_stride = mt->layer_stride;
uint32_t offset = box->y * tx->base.stride +
@@ -452,7 +452,7 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
tx->rect[1].pitch = tx->base.stride;
tx->rect[1].domain = NOUVEAU_BO_GART;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
unsigned base = tx->rect[0].base;
unsigned z = tx->rect[0].z;
unsigned i;
@@ -475,9 +475,9 @@ nvc0_miptree_transfer_map(struct pipe_context *pctx,
return tx->rect[1].bo->map;
}
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
flags = NOUVEAU_BO_RD;
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
flags |= NOUVEAU_BO_WR;
ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
@@ -501,14 +501,14 @@ nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
unsigned i;
- if (tx->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
+ if (tx->base.usage & PIPE_MAP_DIRECTLY) {
pipe_resource_reference(&transfer->resource, NULL);
FREE(tx);
return;
}
- if (tx->base.usage & PIPE_TRANSFER_WRITE) {
+ if (tx->base.usage & PIPE_MAP_WRITE) {
for (i = 0; i < tx->nlayers; ++i) {
nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
tx->nblocksx, tx->nblocksy);
@@ -526,7 +526,7 @@ nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
} else {
nouveau_bo_ref(NULL, &tx->rect[1].bo);
}
- if (tx->base.usage & PIPE_TRANSFER_READ)
+ if (tx->base.usage & PIPE_MAP_READ)
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);
pipe_resource_reference(&transfer->resource, NULL);
diff --git a/src/gallium/drivers/panfrost/pan_resource.c b/src/gallium/drivers/panfrost/pan_resource.c
index 54802efe996..b9078c3bde0 100644
--- a/src/gallium/drivers/panfrost/pan_resource.c
+++ b/src/gallium/drivers/panfrost/pan_resource.c
@@ -772,7 +772,7 @@ static void *
panfrost_transfer_map(struct pipe_context *pctx,
struct pipe_resource *resource,
unsigned level,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ unsigned usage, /* a combination of PIPE_MAP_x */
const struct pipe_box *box,
struct pipe_transfer **out_transfer)
{
@@ -783,7 +783,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
struct panfrost_bo *bo = rsrc->bo;
/* Can't map tiled/compressed directly */
- if ((usage & PIPE_TRANSFER_MAP_DIRECTLY) && rsrc->modifier != DRM_FORMAT_MOD_LINEAR)
+ if ((usage & PIPE_MAP_DIRECTLY) && rsrc->modifier != DRM_FORMAT_MOD_LINEAR)
return NULL;
struct panfrost_gtransfer *transfer = rzalloc(pctx, struct panfrost_gtransfer);
@@ -814,7 +814,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
* from a pending batch XXX */
panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, true);
- if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
+ if ((usage & PIPE_MAP_READ) && rsrc->slices[level].initialized) {
pan_blit_to_staging(pctx, transfer);
panfrost_flush_batches_accessing_bo(ctx, staging->bo, true);
panfrost_bo_wait(staging->bo, INT64_MAX, false);
@@ -830,12 +830,12 @@ panfrost_transfer_map(struct pipe_context *pctx,
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
- bool create_new_bo = usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ bool create_new_bo = usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE;
bool copy_resource = false;
if (!create_new_bo &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
- (usage & PIPE_TRANSFER_WRITE) &&
+ !(usage & PIPE_MAP_UNSYNCHRONIZED) &&
+ (usage & PIPE_MAP_WRITE) &&
!(resource->target == PIPE_BUFFER
&& !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) &&
panfrost_pending_batches_access_bo(ctx, bo)) {
@@ -887,15 +887,15 @@ panfrost_transfer_map(struct pipe_context *pctx,
panfrost_bo_wait(bo, INT64_MAX, true);
}
}
- } else if ((usage & PIPE_TRANSFER_WRITE)
+ } else if ((usage & PIPE_MAP_WRITE)
&& resource->target == PIPE_BUFFER
&& !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
/* No flush for writes to uninitialized */
- } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- if (usage & PIPE_TRANSFER_WRITE) {
+ } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
+ if (usage & PIPE_MAP_WRITE) {
panfrost_flush_batches_accessing_bo(ctx, bo, true);
panfrost_bo_wait(bo, INT64_MAX, true);
- } else if (usage & PIPE_TRANSFER_READ) {
+ } else if (usage & PIPE_MAP_READ) {
panfrost_flush_batches_accessing_bo(ctx, bo, false);
panfrost_bo_wait(bo, INT64_MAX, false);
}
@@ -907,7 +907,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
transfer->map = ralloc_size(transfer, transfer->base.layer_stride * box->depth);
assert(box->depth == 1);
- if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
+ if ((usage & PIPE_MAP_READ) && rsrc->slices[level].initialized) {
panfrost_load_tiled_image(
transfer->map,
bo->cpu + rsrc->slices[level].offset,
@@ -925,7 +925,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
* caching... I don't know if this is actually possible but we
* should still get it right */
- unsigned dpw = PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_WRITE | PIPE_TRANSFER_PERSISTENT;
+ unsigned dpw = PIPE_MAP_DIRECTLY | PIPE_MAP_WRITE | PIPE_MAP_PERSISTENT;
if ((usage & dpw) == dpw && rsrc->index_cache)
return NULL;
@@ -938,7 +938,7 @@ panfrost_transfer_map(struct pipe_context *pctx,
/* By mapping direct-write, we're implicitly already
* initialized (maybe), so be conservative */
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
rsrc->slices[level].initialized = true;
panfrost_minmax_cache_invalidate(rsrc->index_cache, &transfer->base);
}
@@ -994,7 +994,7 @@ panfrost_transfer_unmap(struct pipe_context *pctx,
* malformed AFBC data if uninitialized */
if (trans->staging.rsrc) {
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ if (transfer->usage & PIPE_MAP_WRITE) {
if (panfrost_should_linear_convert(prsrc, transfer)) {
panfrost_bo_unreference(prsrc->bo);
@@ -1018,7 +1018,7 @@ panfrost_transfer_unmap(struct pipe_context *pctx,
if (trans->map) {
struct panfrost_bo *bo = prsrc->bo;
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ if (transfer->usage & PIPE_MAP_WRITE) {
prsrc->slices[transfer->level].initialized = true;
if (prsrc->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) {
diff --git a/src/gallium/drivers/r300/r300_query.c b/src/gallium/drivers/r300/r300_query.c
index f9e4aca0e92..842b9219a66 100644
--- a/src/gallium/drivers/r300/r300_query.c
+++ b/src/gallium/drivers/r300/r300_query.c
@@ -160,8 +160,8 @@ static bool r300_get_query_result(struct pipe_context* pipe,
}
map = r300->rws->buffer_map(q->buf, r300->cs,
- PIPE_TRANSFER_READ |
- (!wait ? PIPE_TRANSFER_DONTBLOCK : 0));
+ PIPE_MAP_READ |
+ (!wait ? PIPE_MAP_DONTBLOCK : 0));
if (!map)
return FALSE;
diff --git a/src/gallium/drivers/r300/r300_render.c b/src/gallium/drivers/r300/r300_render.c
index 18e3e3b4579..b2fb8c37d9c 100644
--- a/src/gallium/drivers/r300/r300_render.c
+++ b/src/gallium/drivers/r300/r300_render.c
@@ -374,7 +374,7 @@ static void r300_draw_arrays_immediate(struct r300_context *r300,
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
r300_resource(vbuf->buffer.resource)->buf,
- r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
+ r300->cs, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
mapelem[i] = map[vbi] + (velem->src_offset / 4);
@@ -609,8 +609,8 @@ static void r300_draw_elements(struct r300_context *r300,
/* If we got here, then orgIndexBuffer == indexBuffer. */
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
r300->cs,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED);
+ PIPE_MAP_READ |
+ PIPE_MAP_UNSYNCHRONIZED);
if (info->mode == PIPE_PRIM_TRIANGLES) {
memcpy(indices3, ptr + start, 6);
@@ -922,7 +922,7 @@ static boolean r300_render_allocate_vertices(struct vbuf_render* render,
}
r300->draw_vbo_offset = 0;
r300render->vbo_ptr = rws->buffer_map(r300->vbo, r300->cs,
- PIPE_TRANSFER_WRITE);
+ PIPE_MAP_WRITE);
}
r300render->vertex_size = vertex_size;
diff --git a/src/gallium/drivers/r300/r300_render_translate.c b/src/gallium/drivers/r300/r300_render_translate.c
index 7dc49d35298..f3749815773 100644
--- a/src/gallium/drivers/r300/r300_render_translate.c
+++ b/src/gallium/drivers/r300/r300_render_translate.c
@@ -41,7 +41,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
- &r300->context, info, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
+ &r300->context, info, PIPE_MAP_UNSYNCHRONIZED, index_offset,
*start, count, ptr);
*index_size = 2;
@@ -55,7 +55,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr);
util_rebuild_ushort_elts_to_userptr(&r300->context, info,
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
@@ -70,7 +70,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr);
util_rebuild_uint_elts_to_userptr(&r300->context, info,
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
diff --git a/src/gallium/drivers/r300/r300_screen_buffer.c b/src/gallium/drivers/r300/r300_screen_buffer.c
index c946cfc8d03..721d8c82b80 100644
--- a/src/gallium/drivers/r300/r300_screen_buffer.c
+++ b/src/gallium/drivers/r300/r300_screen_buffer.c
@@ -90,9 +90,9 @@ r300_buffer_transfer_map( struct pipe_context *context,
return rbuf->malloced_buffer + box->x;
}
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- assert(usage & PIPE_TRANSFER_WRITE);
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
+ !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
+ assert(usage & PIPE_MAP_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
@@ -123,8 +123,8 @@ r300_buffer_transfer_map( struct pipe_context *context,
/* Buffers are never used for write, therefore mapping for read can be
* unsynchronized. */
- if (!(usage & PIPE_TRANSFER_WRITE)) {
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ if (!(usage & PIPE_MAP_WRITE)) {
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
map = rws->buffer_map(rbuf->buf, r300->cs, usage);
diff --git a/src/gallium/drivers/r300/r300_transfer.c b/src/gallium/drivers/r300/r300_transfer.c
index 95cea726551..e438923e89f 100644
--- a/src/gallium/drivers/r300/r300_transfer.c
+++ b/src/gallium/drivers/r300/r300_transfer.c
@@ -135,7 +135,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
* for this transfer.
* Also make write transfers pipelined. */
if (tex->tex.microtile || tex->tex.macrotile[level] ||
- (referenced_hw && !(usage & PIPE_TRANSFER_READ) &&
+ (referenced_hw && !(usage & PIPE_MAP_READ) &&
r300_is_blit_supported(texture->format))) {
struct pipe_resource base;
@@ -194,7 +194,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
trans->transfer.layer_stride =
trans->linear_texture->tex.layer_size_in_bytes[0];
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
/* We cannot map a tiled texture directly because the data is
* in a different order, therefore we do detiling using a blit. */
r300_copy_from_tiled_texture(ctx, trans);
@@ -209,7 +209,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
trans->offset = r300_texture_get_offset(tex, level, box->z);
if (referenced_cs &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
r300_flush(ctx, 0, NULL);
}
}
@@ -249,7 +249,7 @@ void r300_texture_transfer_unmap(struct pipe_context *ctx,
struct r300_transfer *trans = r300_transfer(transfer);
if (trans->linear_texture) {
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ if (transfer->usage & PIPE_MAP_WRITE) {
r300_copy_into_tiled_texture(ctx, trans);
}
diff --git a/src/gallium/drivers/r600/compute_memory_pool.c b/src/gallium/drivers/r600/compute_memory_pool.c
index 685c2b6d21b..6b64b3da0b1 100644
--- a/src/gallium/drivers/r600/compute_memory_pool.c
+++ b/src/gallium/drivers/r600/compute_memory_pool.c
@@ -479,7 +479,7 @@ static void compute_memory_move_item(struct compute_memory_pool *pool,
u_box_1d(new_start_in_dw * 4, (offset + item->size_in_dw) * 4, &box);
- map = pipe->transfer_map(pipe, src, 0, PIPE_TRANSFER_READ_WRITE,
+ map = pipe->transfer_map(pipe, src, 0, PIPE_MAP_READ_WRITE,
&box, &trans);
assert(map);
@@ -614,7 +614,7 @@ static void compute_memory_transfer(
offset_in_chunk, size);
if (device_to_host) {
- map = pipe->transfer_map(pipe, gart, 0, PIPE_TRANSFER_READ,
+ map = pipe->transfer_map(pipe, gart, 0, PIPE_MAP_READ,
&(struct pipe_box) { .width = aligned_size * 4,
.height = 1, .depth = 1 }, &xfer);
assert(xfer);
@@ -622,7 +622,7 @@ static void compute_memory_transfer(
memcpy(data, map + internal_offset, size);
pipe->transfer_unmap(pipe, xfer);
} else {
- map = pipe->transfer_map(pipe, gart, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, gart, 0, PIPE_MAP_WRITE,
&(struct pipe_box) { .width = aligned_size * 4,
.height = 1, .depth = 1 }, &xfer);
assert(xfer);
diff --git a/src/gallium/drivers/r600/eg_debug.c b/src/gallium/drivers/r600/eg_debug.c
index 996e1ab8eaf..54f5ce70fa5 100644
--- a/src/gallium/drivers/r600/eg_debug.c
+++ b/src/gallium/drivers/r600/eg_debug.c
@@ -334,8 +334,8 @@ static void eg_dump_last_ib(struct r600_context *rctx, FILE *f)
*/
uint32_t *map = rctx->b.ws->buffer_map(rctx->last_trace_buf->buf,
NULL,
- PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_READ);
+ PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_READ);
if (map)
last_trace_id = *map;
}
diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c
index 98d4b97d7fc..52f94e6af18 100644
--- a/src/gallium/drivers/r600/evergreen_compute.c
+++ b/src/gallium/drivers/r600/evergreen_compute.c
@@ -458,7 +458,7 @@ static void *evergreen_create_compute_state(struct pipe_context *ctx,
shader->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(
&rctx->b, shader->code_bo,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
@@ -557,7 +557,7 @@ static void evergreen_compute_upload_input(struct pipe_context *ctx,
u_box_1d(0, input_size, &box);
num_work_groups_start = ctx->transfer_map(ctx,
(struct pipe_resource*)shader->kernel_param,
- 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ 0, PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
&box, &transfer);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
@@ -758,7 +758,7 @@ static void compute_emit_cs(struct r600_context *rctx,
if (info->indirect) {
struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
- unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_TRANSFER_READ);
+ unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_MAP_READ);
unsigned offset = info->indirect_offset / 4;
indirect_grid[0] = data[offset];
indirect_grid[1] = data[offset + 1];
@@ -1258,7 +1258,7 @@ static void *r600_compute_global_transfer_map(struct pipe_context *ctx,
dst = (struct pipe_resource*)item->real_buffer;
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
buffer->chunk->status |= ITEM_MAPPED_FOR_READING;
COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
diff --git a/src/gallium/drivers/r600/r600_asm.c b/src/gallium/drivers/r600/r600_asm.c
index 8f710052c68..010bd3366ad 100644
--- a/src/gallium/drivers/r600/r600_asm.c
+++ b/src/gallium/drivers/r600/r600_asm.c
@@ -2776,7 +2776,7 @@ void *r600_create_vertex_fetch_shader(struct pipe_context *ctx,
bytecode = r600_buffer_map_sync_with_rings
(&rctx->b, shader->buffer,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
bytecode += shader->offset / 4;
if (R600_BIG_ENDIAN) {
diff --git a/src/gallium/drivers/r600/r600_blit.c b/src/gallium/drivers/r600/r600_blit.c
index 32154b11b7c..b8924f82674 100644
--- a/src/gallium/drivers/r600/r600_blit.c
+++ b/src/gallium/drivers/r600/r600_blit.c
@@ -661,7 +661,7 @@ static void r600_clear_buffer(struct pipe_context *ctx, struct pipe_resource *ds
r600_blitter_end(ctx);
} else {
uint32_t *map = r600_buffer_map_sync_with_rings(&rctx->b, r600_resource(dst),
- PIPE_TRANSFER_WRITE);
+ PIPE_MAP_WRITE);
map += offset / 4;
size /= 4;
for (unsigned i = 0; i < size; i++)
diff --git a/src/gallium/drivers/r600/r600_buffer_common.c b/src/gallium/drivers/r600/r600_buffer_common.c
index d0f44dcb662..dbf7757296f 100644
--- a/src/gallium/drivers/r600/r600_buffer_common.c
+++ b/src/gallium/drivers/r600/r600_buffer_common.c
@@ -53,11 +53,11 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
assert(!(resource->flags & RADEON_FLAG_SPARSE));
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ if (usage & PIPE_MAP_UNSYNCHRONIZED) {
return ctx->ws->buffer_map(resource->buf, NULL, usage);
}
- if (!(usage & PIPE_TRANSFER_WRITE)) {
+ if (!(usage & PIPE_MAP_WRITE)) {
/* have to wait for the last write */
rusage = RADEON_USAGE_WRITE;
}
@@ -65,7 +65,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
@@ -76,7 +76,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
@@ -86,7 +86,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
}
if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
return NULL;
} else {
/* We will be wait for the GPU. Wait for any offloaded
@@ -365,45 +365,45 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
* So don't ever use staging buffers.
*/
if (rbuffer->b.is_user_ptr)
- usage |= PIPE_TRANSFER_PERSISTENT;
+ usage |= PIPE_MAP_PERSISTENT;
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
- if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
+ if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
- usage & PIPE_TRANSFER_WRITE &&
+ usage & PIPE_MAP_WRITE &&
!rbuffer->b.is_shared &&
!util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
/* If discarding the entire range, discard the whole resource instead. */
- if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
+ if (usage & PIPE_MAP_DISCARD_RANGE &&
box->x == 0 && box->width == resource->width0) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
- !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
+ !(usage & (PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INVALIDATE))) {
- assert(usage & PIPE_TRANSFER_WRITE);
+ assert(usage & PIPE_MAP_WRITE);
if (r600_invalidate_buffer(rctx, rbuffer)) {
/* At this point, the buffer is always idle. */
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
} else {
/* Fall back to a temporary buffer. */
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage |= PIPE_MAP_DISCARD_RANGE;
}
}
- if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
+ if ((usage & PIPE_MAP_DISCARD_RANGE) &&
!(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
- ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT)) &&
+ ((!(usage & (PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_PERSISTENT)) &&
r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
(rbuffer->flags & RADEON_FLAG_SPARSE))) {
- assert(usage & PIPE_TRANSFER_WRITE);
+ assert(usage & PIPE_MAP_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU.
*/
@@ -429,12 +429,12 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
}
} else {
/* At this point, the buffer is always idle (we checked it above). */
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
}
/* Use a staging buffer in cached GTT for reads. */
- else if (((usage & PIPE_TRANSFER_READ) &&
- !(usage & PIPE_TRANSFER_PERSISTENT) &&
+ else if (((usage & PIPE_MAP_READ) &&
+ !(usage & PIPE_MAP_PERSISTENT) &&
(rbuffer->domains & RADEON_DOMAIN_VRAM ||
rbuffer->flags & RADEON_FLAG_GTT_WC) &&
r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
@@ -452,7 +452,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
0, 0, resource, 0, box);
data = r600_buffer_map_sync_with_rings(rctx, staging,
- usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
+ usage & ~PIPE_MAP_UNSYNCHRONIZED);
if (!data) {
r600_resource_reference(&staging, NULL);
return NULL;
@@ -506,8 +506,8 @@ static void r600_buffer_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *rel_box)
{
- unsigned required_usage = PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_FLUSH_EXPLICIT;
+ unsigned required_usage = PIPE_MAP_WRITE |
+ PIPE_MAP_FLUSH_EXPLICIT;
if ((transfer->usage & required_usage) == required_usage) {
struct pipe_box box;
@@ -523,8 +523,8 @@ static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- if (transfer->usage & PIPE_TRANSFER_WRITE &&
- !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
+ if (transfer->usage & PIPE_MAP_WRITE &&
+ !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
r600_resource_reference(&rtransfer->staging, NULL);
@@ -545,10 +545,10 @@ void r600_buffer_subdata(struct pipe_context *ctx,
struct pipe_box box;
uint8_t *map = NULL;
- usage |= PIPE_TRANSFER_WRITE;
+ usage |= PIPE_MAP_WRITE;
- if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ if (!(usage & PIPE_MAP_DIRECTLY))
+ usage |= PIPE_MAP_DISCARD_RANGE;
u_box_1d(offset, size, &box);
map = r600_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);
diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c
index eb26074221a..9983a50f404 100644
--- a/src/gallium/drivers/r600/r600_pipe.c
+++ b/src/gallium/drivers/r600/r600_pipe.c
@@ -811,7 +811,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws,
templ.usage = PIPE_USAGE_DEFAULT;
struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
- unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_TRANSFER_WRITE);
+ unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_MAP_WRITE);
memset(map, 0, 256);
diff --git a/src/gallium/drivers/r600/r600_query.c b/src/gallium/drivers/r600/r600_query.c
index 082052a4a7d..4935da1723d 100644
--- a/src/gallium/drivers/r600/r600_query.c
+++ b/src/gallium/drivers/r600/r600_query.c
@@ -527,8 +527,8 @@ static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
{
/* Callers ensure that the buffer is currently unused by the GPU. */
uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED);
+ PIPE_MAP_WRITE |
+ PIPE_MAP_UNSYNCHRONIZED);
if (!results)
return false;
@@ -1337,8 +1337,8 @@ bool r600_query_hw_get_result(struct r600_common_context *rctx,
query->ops->clear_result(query, result);
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
- unsigned usage = PIPE_TRANSFER_READ |
- (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
+ unsigned usage = PIPE_MAP_READ |
+ (wait ? 0 : PIPE_MAP_DONTBLOCK);
unsigned results_base = 0;
void *map;
@@ -1896,7 +1896,7 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
return;
/* initialize buffer with zeroes */
- results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
+ results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_WRITE);
if (results) {
memset(results, 0, max_rbs * 4 * 4);
@@ -1910,7 +1910,7 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
/* analyze results */
- results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
+ results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_READ);
if (results) {
for(i = 0; i < max_rbs; i++) {
/* at least highest bit will be set if backend is used */
diff --git a/src/gallium/drivers/r600/r600_shader.c b/src/gallium/drivers/r600/r600_shader.c
index f58f8ccbe8b..f01c5361324 100644
--- a/src/gallium/drivers/r600/r600_shader.c
+++ b/src/gallium/drivers/r600/r600_shader.c
@@ -149,7 +149,7 @@ static int store_shader(struct pipe_context *ctx,
}
ptr = r600_buffer_map_sync_with_rings(
&rctx->b, shader->bo,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (R600_BIG_ENDIAN) {
for (i = 0; i < shader->shader.bc.ndw; ++i) {
ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
diff --git a/src/gallium/drivers/r600/r600_state.c b/src/gallium/drivers/r600/r600_state.c
index b84aad6b745..28e253069f6 100644
--- a/src/gallium/drivers/r600/r600_state.c
+++ b/src/gallium/drivers/r600/r600_state.c
@@ -998,7 +998,7 @@ static void r600_init_color_surface(struct r600_context *rctx,
}
/* Set the contents to 0xCC. */
- ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer);
+ ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_MAP_WRITE, &transfer);
memset(ptr, 0xCC, cmask.size);
pipe_buffer_unmap(&rctx->b.b, transfer);
}
diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c
index 89429dd504e..b036bf58b58 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -2143,7 +2143,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
/* Have to get start/count from indirect buffer, slow path ahead... */
struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
if (data) {
data += info->indirect->offset / sizeof(unsigned);
start = data[2] * index_size;
diff --git a/src/gallium/drivers/r600/r600_test_dma.c b/src/gallium/drivers/r600/r600_test_dma.c
index 512e7742021..e8e54fb99f6 100644
--- a/src/gallium/drivers/r600/r600_test_dma.c
+++ b/src/gallium/drivers/r600/r600_test_dma.c
@@ -59,7 +59,7 @@ static void set_random_pixels(struct pipe_context *ctx,
uint8_t *map;
unsigned x,y,z;
- map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE,
+ map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_WRITE,
0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);
@@ -94,7 +94,7 @@ static bool compare_textures(struct pipe_context *ctx,
int y,z;
bool pass = true;
- map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ,
+ map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_READ,
0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);
diff --git a/src/gallium/drivers/r600/r600_texture.c b/src/gallium/drivers/r600/r600_texture.c
index edbe9f97efa..5cc35334feb 100644
--- a/src/gallium/drivers/r600/r600_texture.c
+++ b/src/gallium/drivers/r600/r600_texture.c
@@ -1259,7 +1259,7 @@ static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
/* r600g doesn't react to dirty_tex_descriptor_counter */
return rscreen->chip_class >= GFX6 &&
!rtex->resource.b.is_shared &&
- !(transfer_usage & PIPE_TRANSFER_READ) &&
+ !(transfer_usage & PIPE_MAP_READ) &&
rtex->resource.b.b.last_level == 0 &&
util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
box->x, box->y, box->z,
@@ -1336,7 +1336,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
*/
if (!rtex->surface.is_linear)
use_staging_texture = true;
- else if (usage & PIPE_TRANSFER_READ)
+ else if (usage & PIPE_MAP_READ)
use_staging_texture =
rtex->resource.domains & RADEON_DOMAIN_VRAM ||
rtex->resource.flags & RADEON_FLAG_GTT_WC;
@@ -1386,7 +1386,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
return NULL;
}
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
if (!temp) {
R600_ERR("failed to create a temporary depth texture\n");
@@ -1432,7 +1432,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
r600_init_temp_resource_from_box(&resource, texture, box, level,
R600_RESOURCE_FLAG_TRANSFER);
- resource.usage = (usage & PIPE_TRANSFER_READ) ?
+ resource.usage = (usage & PIPE_MAP_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
@@ -1449,10 +1449,10 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
&trans->b.b.stride,
&trans->b.b.layer_stride);
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
r600_copy_to_staging_texture(ctx, trans);
else
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
buf = trans->staging;
} else {
@@ -1481,7 +1481,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_resource *texture = transfer->resource;
struct r600_texture *rtex = (struct r600_texture*)texture;
- if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
+ if ((transfer->usage & PIPE_MAP_WRITE) && rtransfer->staging) {
if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
diff --git a/src/gallium/drivers/r600/radeon_uvd.c b/src/gallium/drivers/r600/radeon_uvd.c
index e9e959a7cf4..e220c36bca2 100644
--- a/src/gallium/drivers/r600/radeon_uvd.c
+++ b/src/gallium/drivers/r600/radeon_uvd.c
@@ -153,7 +153,7 @@ static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
/* and map it for CPU access */
ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
@@ -842,7 +842,7 @@ static void ruvd_begin_frame(struct pipe_video_codec *decoder,
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(
dec->bs_buffers[dec->cur_buffer].res->buf,
- dec->cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
}
/**
@@ -896,7 +896,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_TRANSFER_WRITE |
+ PIPE_MAP_WRITE |
RADEON_TRANSFER_TEMPORARY);
if (!dec->bs_ptr)
return;
diff --git a/src/gallium/drivers/r600/radeon_vce.c b/src/gallium/drivers/r600/radeon_vce.c
index cd7cdb24b89..ee82d7d365f 100644
--- a/src/gallium/drivers/r600/radeon_vce.c
+++ b/src/gallium/drivers/r600/radeon_vce.c
@@ -71,7 +71,7 @@ static void flush(struct rvce_encoder *enc)
#if 0
static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
{
- uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
+ uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE);
unsigned i = 0;
fprintf(stderr, "\n");
fprintf(stderr, "encStatus:\t\t\t%08x\n", ptr[i++]);
@@ -359,7 +359,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder,
if (size) {
uint32_t *ptr = enc->ws->buffer_map(
fb->res->buf, enc->cs,
- PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (ptr[1]) {
*size = ptr[4] - ptr[9];
diff --git a/src/gallium/drivers/r600/radeon_video.c b/src/gallium/drivers/r600/radeon_video.c
index 81c1a5e511b..b795fe5b874 100644
--- a/src/gallium/drivers/r600/radeon_video.c
+++ b/src/gallium/drivers/r600/radeon_video.c
@@ -98,12 +98,12 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
goto error;
src = ws->buffer_map(old_buf.res->buf, cs,
- PIPE_TRANSFER_READ | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
if (!src)
goto error;
dst = ws->buffer_map(new_buf->res->buf, cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dst)
goto error;
diff --git a/src/gallium/drivers/radeon/radeon_uvd.c b/src/gallium/drivers/radeon/radeon_uvd.c
index 41f900076e0..1520fda6f3c 100644
--- a/src/gallium/drivers/radeon/radeon_uvd.c
+++ b/src/gallium/drivers/radeon/radeon_uvd.c
@@ -144,7 +144,7 @@ static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
/* and map it for CPU access */
ptr =
- dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
@@ -1014,7 +1014,7 @@ static void ruvd_begin_frame(struct pipe_video_codec *decoder, struct pipe_video
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
}
/**
@@ -1058,7 +1058,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dec->bs_ptr)
return;
diff --git a/src/gallium/drivers/radeon/radeon_uvd_enc.c b/src/gallium/drivers/radeon/radeon_uvd_enc.c
index 82aebc401bc..4633e1580a9 100644
--- a/src/gallium/drivers/radeon/radeon_uvd_enc.c
+++ b/src/gallium/drivers/radeon/radeon_uvd_enc.c
@@ -247,7 +247,7 @@ static void radeon_uvd_enc_get_feedback(struct pipe_video_codec *encoder, void *
if (NULL != size) {
radeon_uvd_enc_feedback_t *fb_data = (radeon_uvd_enc_feedback_t *)enc->ws->buffer_map(
- fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!fb_data->status)
*size = fb_data->bitstream_size;
diff --git a/src/gallium/drivers/radeon/radeon_vce.c b/src/gallium/drivers/radeon/radeon_vce.c
index 416240b6d1e..8f4312e708c 100644
--- a/src/gallium/drivers/radeon/radeon_vce.c
+++ b/src/gallium/drivers/radeon/radeon_vce.c
@@ -59,7 +59,7 @@ static void flush(struct rvce_encoder *enc)
#if 0
static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
{
- uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
+ uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_MAP_READ_WRITE);
unsigned i = 0;
fprintf(stderr, "\n");
fprintf(stderr, "encStatus:\t\t\t%08x\n", ptr[i++]);
@@ -346,7 +346,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder, void *feedback,
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs,
- PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (ptr[1]) {
*size = ptr[4] - ptr[9];
diff --git a/src/gallium/drivers/radeon/radeon_vcn_dec.c b/src/gallium/drivers/radeon/radeon_vcn_dec.c
index f0ecf1c7683..4c484c5bad8 100644
--- a/src/gallium/drivers/radeon/radeon_vcn_dec.c
+++ b/src/gallium/drivers/radeon/radeon_vcn_dec.c
@@ -961,7 +961,7 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
/* ctx needs probs table */
ptr = dec->ws->buffer_map(dec->ctx.res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
fill_probs_table(ptr);
dec->ws->buffer_unmap(dec->ctx.res->buf);
dec->bs_ptr = NULL;
@@ -1052,7 +1052,7 @@ static void map_msg_fb_it_probs_buf(struct radeon_decoder *dec)
/* and map it for CPU access */
ptr =
- dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
/* calc buffer offsets */
dec->msg = ptr;
@@ -1331,7 +1331,7 @@ static void radeon_dec_begin_frame(struct pipe_video_codec *decoder,
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
}
/**
@@ -1376,7 +1376,7 @@ static void radeon_dec_decode_bitstream(struct pipe_video_codec *decoder,
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dec->bs_ptr)
return;
@@ -1559,7 +1559,7 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context,
buf = &dec->msg_fb_it_probs_buffers[i];
ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
ptr += FB_BUFFER_OFFSET + FB_BUFFER_SIZE;
fill_probs_table(ptr);
dec->ws->buffer_unmap(buf->res->buf);
diff --git a/src/gallium/drivers/radeon/radeon_vcn_enc.c b/src/gallium/drivers/radeon/radeon_vcn_enc.c
index 331724edecb..d832da17efa 100644
--- a/src/gallium/drivers/radeon/radeon_vcn_enc.c
+++ b/src/gallium/drivers/radeon/radeon_vcn_enc.c
@@ -363,7 +363,7 @@ static void radeon_enc_get_feedback(struct pipe_video_codec *encoder, void *feed
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs,
- PIPE_TRANSFER_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | RADEON_TRANSFER_TEMPORARY);
if (ptr[1])
*size = ptr[6];
else
diff --git a/src/gallium/drivers/radeon/radeon_video.c b/src/gallium/drivers/radeon/radeon_video.c
index 8e2b1a3c87d..32f1ddd9710 100644
--- a/src/gallium/drivers/radeon/radeon_video.c
+++ b/src/gallium/drivers/radeon/radeon_video.c
@@ -86,11 +86,11 @@ bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
if (!si_vid_create_buffer(screen, new_buf, new_size, new_buf->usage))
goto error;
- src = ws->buffer_map(old_buf.res->buf, cs, PIPE_TRANSFER_READ | RADEON_TRANSFER_TEMPORARY);
+ src = ws->buffer_map(old_buf.res->buf, cs, PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
if (!src)
goto error;
- dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!dst)
goto error;
diff --git a/src/gallium/drivers/radeon/radeon_winsys.h b/src/gallium/drivers/radeon/radeon_winsys.h
index 3ac425aa094..98333e957be 100644
--- a/src/gallium/drivers/radeon/radeon_winsys.h
+++ b/src/gallium/drivers/radeon/radeon_winsys.h
@@ -101,7 +101,7 @@ enum radeon_transfer_flags
* Not unmapping buffers is an important performance optimization for
* OpenGL (avoids kernel overhead for frequently mapped buffers).
*/
- RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
+ RADEON_TRANSFER_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
};
#define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
@@ -310,7 +310,7 @@ struct radeon_winsys {
*
* \param buf A winsys buffer object to map.
* \param cs A command stream to flush if the buffer is referenced by it.
- * \param usage A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
+ * \param usage A bitmask of the PIPE_MAP_* and RADEON_TRANSFER_* flags.
* \return The pointer at the beginning of the buffer.
*/
void *(*buffer_map)(struct pb_buffer *buf, struct radeon_cmdbuf *cs,
diff --git a/src/gallium/drivers/radeonsi/gfx10_query.c b/src/gallium/drivers/radeonsi/gfx10_query.c
index aedf5090eed..5587460cc09 100644
--- a/src/gallium/drivers/radeonsi/gfx10_query.c
+++ b/src/gallium/drivers/radeonsi/gfx10_query.c
@@ -156,7 +156,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx)
* compatibility with the SET_PREDICATION packet.
*/
uint64_t *results = sctx->ws->buffer_map(qbuf->buf->buf, NULL,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);
+ PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
assert(results);
for (unsigned i = 0, e = qbuf->buf->b.b.width0 / sizeof(struct gfx10_sh_query_buffer_mem); i < e;
@@ -292,7 +292,7 @@ static bool gfx10_sh_query_get_result(struct si_context *sctx, struct si_query *
for (struct gfx10_sh_query_buffer *qbuf = query->last;;
qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.prev, list)) {
- unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
+ unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
void *map;
if (rquery->b.flushed)
diff --git a/src/gallium/drivers/radeonsi/si_buffer.c b/src/gallium/drivers/radeonsi/si_buffer.c
index e64b51d8d0a..3fa6c36b86d 100644
--- a/src/gallium/drivers/radeonsi/si_buffer.c
+++ b/src/gallium/drivers/radeonsi/si_buffer.c
@@ -51,18 +51,18 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource
assert(!(resource->flags & RADEON_FLAG_SPARSE));
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ if (usage & PIPE_MAP_UNSYNCHRONIZED) {
return sctx->ws->buffer_map(resource->buf, NULL, usage);
}
- if (!(usage & PIPE_TRANSFER_WRITE)) {
+ if (!(usage & PIPE_MAP_WRITE)) {
/* have to wait for the last write */
rusage = RADEON_USAGE_WRITE;
}
if (radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size) &&
sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, resource->buf, rusage)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
} else {
@@ -72,7 +72,7 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource
}
if (radeon_emitted(sctx->sdma_cs, 0) &&
sctx->ws->cs_is_buffer_referenced(sctx->sdma_cs, resource->buf, rusage)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
@@ -82,7 +82,7 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource
}
if (busy || !sctx->ws->buffer_wait(resource->buf, 0, rusage)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
return NULL;
} else {
/* We will be wait for the GPU. Wait for any offloaded
@@ -339,7 +339,7 @@ static void *si_buffer_get_transfer(struct pipe_context *ctx, struct pipe_resour
struct si_context *sctx = (struct si_context *)ctx;
struct si_transfer *transfer;
- if (usage & PIPE_TRANSFER_THREAD_SAFE)
+ if (usage & PIPE_MAP_THREAD_SAFE)
transfer = malloc(sizeof(*transfer));
else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
transfer = slab_alloc(&sctx->pool_transfers_unsync);
@@ -382,60 +382,60 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
* So don't ever use staging buffers.
*/
if (buf->b.is_user_ptr)
- usage |= PIPE_TRANSFER_PERSISTENT;
+ usage |= PIPE_MAP_PERSISTENT;
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
- if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
- usage & PIPE_TRANSFER_WRITE && !buf->b.is_shared &&
+ if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
+ usage & PIPE_MAP_WRITE && !buf->b.is_shared &&
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) {
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
/* If discarding the entire range, discard the whole resource instead. */
- if (usage & PIPE_TRANSFER_DISCARD_RANGE && box->x == 0 && box->width == resource->width0) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ if (usage & PIPE_MAP_DISCARD_RANGE && box->x == 0 && box->width == resource->width0) {
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
/* If a buffer in VRAM is too large and the range is discarded, don't
* map it directly. This makes sure that the buffer stays in VRAM.
*/
bool force_discard_range = false;
- if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE) &&
- !(usage & PIPE_TRANSFER_PERSISTENT) &&
+ if (usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE) &&
+ !(usage & PIPE_MAP_PERSISTENT) &&
/* Try not to decrement the counter if it's not positive. Still racy,
* but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
buf->max_forced_staging_uploads > 0 &&
p_atomic_dec_return(&buf->max_forced_staging_uploads) >= 0) {
- usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_UNSYNCHRONIZED);
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_UNSYNCHRONIZED);
+ usage |= PIPE_MAP_DISCARD_RANGE;
force_discard_range = true;
}
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
- !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) {
- assert(usage & PIPE_TRANSFER_WRITE);
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
+ !(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) {
+ assert(usage & PIPE_MAP_WRITE);
if (si_invalidate_buffer(sctx, buf)) {
/* At this point, the buffer is always idle. */
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
} else {
/* Fall back to a temporary buffer. */
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage |= PIPE_MAP_DISCARD_RANGE;
}
}
- if (usage & PIPE_TRANSFER_FLUSH_EXPLICIT &&
+ if (usage & PIPE_MAP_FLUSH_EXPLICIT &&
buf->b.b.flags & SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA) {
- usage &= ~(PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_PERSISTENT);
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ usage &= ~(PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT);
+ usage |= PIPE_MAP_DISCARD_RANGE;
force_discard_range = true;
}
- if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
- ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_PERSISTENT))) ||
+ if (usage & PIPE_MAP_DISCARD_RANGE &&
+ ((!(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT))) ||
(buf->flags & RADEON_FLAG_SPARSE))) {
- assert(usage & PIPE_TRANSFER_WRITE);
+ assert(usage & PIPE_MAP_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU.
*/
@@ -469,16 +469,16 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
}
} else {
/* At this point, the buffer is always idle (we checked it above). */
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
}
/* Use a staging buffer in cached GTT for reads. */
- else if (((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_PERSISTENT) &&
+ else if (((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) &&
(buf->domains & RADEON_DOMAIN_VRAM || buf->flags & RADEON_FLAG_GTT_WC)) ||
(buf->flags & RADEON_FLAG_SPARSE)) {
struct si_resource *staging;
- assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_TRANSFER_THREAD_SAFE)));
+ assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE)));
staging = si_aligned_buffer_create(ctx->screen, SI_RESOURCE_FLAG_UNCACHED,
PIPE_USAGE_STAGING,
box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), 256);
@@ -487,7 +487,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
si_sdma_copy_buffer(sctx, &staging->b.b, resource, box->x % SI_MAP_BUFFER_ALIGNMENT,
box->x, box->width);
- data = si_buffer_map_sync_with_rings(sctx, staging, usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
+ data = si_buffer_map_sync_with_rings(sctx, staging, usage & ~PIPE_MAP_UNSYNCHRONIZED);
if (!data) {
si_resource_reference(&staging, NULL);
return NULL;
@@ -570,7 +570,7 @@ static void si_buffer_do_flush_region(struct pipe_context *ctx, struct pipe_tran
static void si_buffer_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer,
const struct pipe_box *rel_box)
{
- unsigned required_usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT;
+ unsigned required_usage = PIPE_MAP_WRITE | PIPE_MAP_FLUSH_EXPLICIT;
if ((transfer->usage & required_usage) == required_usage) {
struct pipe_box box;
@@ -585,14 +585,14 @@ static void si_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_trans
struct si_context *sctx = (struct si_context *)ctx;
struct si_transfer *stransfer = (struct si_transfer *)transfer;
- if (transfer->usage & PIPE_TRANSFER_WRITE && !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
+ if (transfer->usage & PIPE_MAP_WRITE && !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
si_buffer_do_flush_region(ctx, transfer, &transfer->box);
si_resource_reference(&stransfer->staging, NULL);
assert(stransfer->b.staging == NULL); /* for threaded context only */
pipe_resource_reference(&transfer->resource, NULL);
- if (transfer->usage & PIPE_TRANSFER_THREAD_SAFE) {
+ if (transfer->usage & PIPE_MAP_THREAD_SAFE) {
free(transfer);
} else {
/* Don't use pool_transfers_unsync. We are always in the driver
@@ -609,10 +609,10 @@ static void si_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *bu
struct pipe_box box;
uint8_t *map = NULL;
- usage |= PIPE_TRANSFER_WRITE;
+ usage |= PIPE_MAP_WRITE;
- if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ if (!(usage & PIPE_MAP_DIRECTLY))
+ usage |= PIPE_MAP_DISCARD_RANGE;
u_box_1d(offset, size, &box);
map = si_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);
diff --git a/src/gallium/drivers/radeonsi/si_debug.c b/src/gallium/drivers/radeonsi/si_debug.c
index e5ae5e64d3c..ea535805c22 100644
--- a/src/gallium/drivers/radeonsi/si_debug.c
+++ b/src/gallium/drivers/radeonsi/si_debug.c
@@ -109,7 +109,7 @@ static void si_dump_shader(struct si_screen *sscreen, struct si_shader *shader,
const char *mapped = sscreen->ws->buffer_map(
shader->bo->buf, NULL,
- PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_READ | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ | RADEON_TRANSFER_TEMPORARY);
for (unsigned i = 0; i < size; i += 4) {
fprintf(f, " %4x: %08x\n", i, *(uint32_t *)(mapped + i));
@@ -403,7 +403,7 @@ static void si_log_chunk_type_cs_print(void *data, FILE *f)
* If the GPU is hung, there is no point in waiting for it.
*/
uint32_t *map = ctx->ws->buffer_map(scs->trace_buf->buf, NULL,
- PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_READ);
+ PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ);
if (map) {
last_trace_id = map[0];
last_compute_trace_id = map[1];
diff --git a/src/gallium/drivers/radeonsi/si_fence.c b/src/gallium/drivers/radeonsi/si_fence.c
index 0ab28c40c8f..ce9230bd647 100644
--- a/src/gallium/drivers/radeonsi/si_fence.c
+++ b/src/gallium/drivers/radeonsi/si_fence.c
@@ -222,7 +222,7 @@ struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
static bool si_fine_fence_signaled(struct radeon_winsys *rws, const struct si_fine_fence *fine)
{
char *map =
- rws->buffer_map(fine->buf->buf, NULL, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
+ rws->buffer_map(fine->buf->buf, NULL, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
if (!map)
return false;
diff --git a/src/gallium/drivers/radeonsi/si_perfcounter.c b/src/gallium/drivers/radeonsi/si_perfcounter.c
index be04536ad2a..ca7a80414c7 100644
--- a/src/gallium/drivers/radeonsi/si_perfcounter.c
+++ b/src/gallium/drivers/radeonsi/si_perfcounter.c
@@ -1053,7 +1053,7 @@ static bool si_pc_query_get_result(struct si_context *sctx, struct si_query *squ
memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
for (struct si_query_buffer *qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
- unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
+ unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
unsigned results_base = 0;
void *map;
diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
index 083449f340d..e044f295e56 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.c
+++ b/src/gallium/drivers/radeonsi/si_pipe.c
@@ -525,7 +525,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
goto fail;
sctx->border_color_map =
- ws->buffer_map(sctx->border_color_buffer->buf, NULL, PIPE_TRANSFER_WRITE);
+ ws->buffer_map(sctx->border_color_buffer->buf, NULL, PIPE_MAP_WRITE);
if (!sctx->border_color_map)
goto fail;
diff --git a/src/gallium/drivers/radeonsi/si_query.c b/src/gallium/drivers/radeonsi/si_query.c
index 50186b6e28d..9b6659f86e8 100644
--- a/src/gallium/drivers/radeonsi/si_query.c
+++ b/src/gallium/drivers/radeonsi/si_query.c
@@ -669,7 +669,7 @@ static bool si_query_hw_prepare_buffer(struct si_context *sctx, struct si_query_
/* The caller ensures that the buffer is currently unused by the GPU. */
uint32_t *results = screen->ws->buffer_map(qbuf->buf->buf, NULL,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);
+ PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
if (!results)
return false;
@@ -1408,7 +1408,7 @@ bool si_query_hw_get_result(struct si_context *sctx, struct si_query *squery, bo
query->ops->clear_result(query, result);
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
- unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
+ unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
unsigned results_base = 0;
void *map;
diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
index a83289ff1dd..7a5de0b8575 100644
--- a/src/gallium/drivers/radeonsi/si_shader.c
+++ b/src/gallium/drivers/radeonsi/si_shader.c
@@ -899,7 +899,7 @@ bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader
u.rx_va = shader->bo->gpu_address;
u.rx_ptr = sscreen->ws->buffer_map(
shader->bo->buf, NULL,
- PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
if (!u.rx_ptr)
return false;
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index 89f936e9fd7..caedda80c03 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -4727,7 +4727,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, unsigned count,
return NULL;
}
void *map =
- sscreen->ws->buffer_map(v->instance_divisor_factor_buffer->buf, NULL, PIPE_TRANSFER_WRITE);
+ sscreen->ws->buffer_map(v->instance_divisor_factor_buffer->buf, NULL, PIPE_MAP_WRITE);
memcpy(map, divisor_factors, num_divisors * sizeof(divisor_factors[0]));
}
return v;
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index fa203fa6116..20abe1dc6bb 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -1425,7 +1425,7 @@ static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_d
if (indirect->indirect_draw_count) {
data = pipe_buffer_map_range(&sctx->b, indirect->indirect_draw_count,
indirect->indirect_draw_count_offset, sizeof(unsigned),
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
indirect_count = *data;
@@ -1441,7 +1441,7 @@ static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_d
map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
data = pipe_buffer_map_range(&sctx->b, indirect->buffer, indirect->offset, map_size,
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
begin = UINT_MAX;
end = 0;
diff --git a/src/gallium/drivers/radeonsi/si_test_dma.c b/src/gallium/drivers/radeonsi/si_test_dma.c
index 7b4ecedbcba..70e811db853 100644
--- a/src/gallium/drivers/radeonsi/si_test_dma.c
+++ b/src/gallium/drivers/radeonsi/si_test_dma.c
@@ -58,7 +58,7 @@ static void set_random_pixels(struct pipe_context *ctx, struct pipe_resource *te
uint8_t *map;
int x, y, z;
- map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE, 0, 0, 0, tex->width0, tex->height0,
+ map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_WRITE, 0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);
@@ -89,7 +89,7 @@ static bool compare_textures(struct pipe_context *ctx, struct pipe_resource *tex
bool pass = true;
unsigned stride = util_format_get_stride(tex->format, tex->width0);
- map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ, 0, 0, 0, tex->width0, tex->height0,
+ map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_READ, 0, 0, 0, tex->width0, tex->height0,
tex->array_size, &t);
assert(map);
diff --git a/src/gallium/drivers/radeonsi/si_texture.c b/src/gallium/drivers/radeonsi/si_texture.c
index b66f8eba6d2..26fc22af4e1 100644
--- a/src/gallium/drivers/radeonsi/si_texture.c
+++ b/src/gallium/drivers/radeonsi/si_texture.c
@@ -1171,7 +1171,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
struct si_resource *buf = si_aligned_buffer_create(screen, 0, PIPE_USAGE_STREAM,
dcc_retile_map_size,
sscreen->info.tcc_cache_line_size);
- void *map = sscreen->ws->buffer_map(buf->buf, NULL, PIPE_TRANSFER_WRITE);
+ void *map = sscreen->ws->buffer_map(buf->buf, NULL, PIPE_MAP_WRITE);
/* Upload the retile map into the staging buffer. */
memcpy(map, tex->surface.u.gfx9.dcc_retile_map, dcc_retile_map_size);
@@ -1593,7 +1593,7 @@ static bool si_can_invalidate_texture(struct si_screen *sscreen, struct si_textu
unsigned transfer_usage, const struct pipe_box *box)
{
return !tex->buffer.b.is_shared && !(tex->surface.flags & RADEON_SURF_IMPORTED) &&
- !(transfer_usage & PIPE_TRANSFER_READ) && tex->buffer.b.b.last_level == 0 &&
+ !(transfer_usage & PIPE_MAP_READ) && tex->buffer.b.b.last_level == 0 &&
util_texrange_covers_whole_level(&tex->buffer.b.b, 0, box->x, box->y, box->z, box->width,
box->height, box->depth);
}
@@ -1658,7 +1658,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
*/
if (!tex->surface.is_linear || (tex->buffer.flags & RADEON_FLAG_ENCRYPTED))
use_staging_texture = true;
- else if (usage & PIPE_TRANSFER_READ)
+ else if (usage & PIPE_MAP_READ)
use_staging_texture =
tex->buffer.domains & RADEON_DOMAIN_VRAM || tex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
@@ -1683,7 +1683,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
if (use_staging_texture) {
struct pipe_resource resource;
struct si_texture *staging;
- unsigned bo_usage = usage & PIPE_TRANSFER_READ ? PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
+ unsigned bo_usage = usage & PIPE_MAP_READ ? PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
unsigned bo_flags = SI_RESOURCE_FLAG_FORCE_LINEAR;
/* The pixel shader has a bad access pattern for linear textures.
@@ -1696,7 +1696,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
!tex->is_depth &&
!util_format_is_compressed(texture->format) &&
/* Texture uploads with DCC use the pixel shader to blit */
- (!(usage & PIPE_TRANSFER_WRITE) || !vi_dcc_enabled(tex, level)))
+ (!(usage & PIPE_MAP_WRITE) || !vi_dcc_enabled(tex, level)))
bo_flags |= SI_RESOURCE_FLAG_UNCACHED;
si_init_temp_resource_from_box(&resource, texture, box, level, bo_usage,
@@ -1721,10 +1721,10 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
si_texture_get_offset(sctx->screen, staging, 0, NULL, &trans->b.b.stride,
&trans->b.b.layer_stride);
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
si_copy_to_staging_texture(ctx, trans);
else
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
buf = trans->staging;
} else {
@@ -1769,7 +1769,7 @@ static void si_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_tran
sctx->ws->buffer_unmap(buf->buf);
}
- if ((transfer->usage & PIPE_TRANSFER_WRITE) && stransfer->staging)
+ if ((transfer->usage & PIPE_MAP_WRITE) && stransfer->staging)
si_copy_from_staging_texture(ctx, stransfer);
if (stransfer->staging) {
diff --git a/src/gallium/drivers/softpipe/sp_compute.c b/src/gallium/drivers/softpipe/sp_compute.c
index 31252255bd4..7e9a9455e90 100644
--- a/src/gallium/drivers/softpipe/sp_compute.c
+++ b/src/gallium/drivers/softpipe/sp_compute.c
@@ -152,7 +152,7 @@ fill_grid_size(struct pipe_context *context,
params = pipe_buffer_map_range(context, info->indirect,
info->indirect_offset,
3 * sizeof(uint32_t),
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&transfer);
if (!transfer)
diff --git a/src/gallium/drivers/softpipe/sp_state_sampler.c b/src/gallium/drivers/softpipe/sp_state_sampler.c
index 2755f26bea2..70d34ed790c 100644
--- a/src/gallium/drivers/softpipe/sp_state_sampler.c
+++ b/src/gallium/drivers/softpipe/sp_state_sampler.c
@@ -254,7 +254,7 @@ prepare_shader_sampling(
struct softpipe_screen *screen = softpipe_screen(tex->screen);
struct sw_winsys *winsys = screen->winsys;
addr = winsys->displaytarget_map(winsys, sp_tex->dt,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
row_stride[0] = sp_tex->stride[0];
img_stride[0] = sp_tex->img_stride[0];
mip_offsets[0] = 0;
diff --git a/src/gallium/drivers/softpipe/sp_tex_tile_cache.c b/src/gallium/drivers/softpipe/sp_tex_tile_cache.c
index 18b0331bcea..7a1009a8c9c 100644
--- a/src/gallium/drivers/softpipe/sp_tex_tile_cache.c
+++ b/src/gallium/drivers/softpipe/sp_tex_tile_cache.c
@@ -249,7 +249,7 @@ sp_find_cached_tile_tex(struct softpipe_tex_tile_cache *tc,
pipe_transfer_map(tc->pipe, tc->texture,
addr.bits.level,
layer,
- PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED,
0, 0, width, height, &tc->tex_trans);
tc->tex_level = addr.bits.level;
diff --git a/src/gallium/drivers/softpipe/sp_texture.c b/src/gallium/drivers/softpipe/sp_texture.c
index c9a22a97891..b4624f50e5d 100644
--- a/src/gallium/drivers/softpipe/sp_texture.c
+++ b/src/gallium/drivers/softpipe/sp_texture.c
@@ -348,7 +348,7 @@ softpipe_surface_destroy(struct pipe_context *pipe,
* \param pipe rendering context
* \param resource the resource to transfer in/out of
* \param level which mipmap level
- * \param usage bitmask of PIPE_TRANSFER_x flags
+ * \param usage bitmask of PIPE_MAP_x flags
* \param box the 1D/2D/3D region of interest
*/
static void *
@@ -394,9 +394,9 @@ softpipe_transfer_map(struct pipe_context *pipe,
* Transfers, like other pipe operations, must happen in order, so flush the
* context if necessary.
*/
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- boolean read_only = !(usage & PIPE_TRANSFER_WRITE);
- boolean do_not_block = !!(usage & PIPE_TRANSFER_DONTBLOCK);
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
+ boolean read_only = !(usage & PIPE_MAP_WRITE);
+ boolean do_not_block = !!(usage & PIPE_MAP_DONTBLOCK);
if (!softpipe_flush_resource(pipe, resource,
level, box->depth > 1 ? -1 : box->z,
0, /* flush_flags */
@@ -468,7 +468,7 @@ softpipe_transfer_unmap(struct pipe_context *pipe,
winsys->displaytarget_unmap(winsys, spr->dt);
}
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ if (transfer->usage & PIPE_MAP_WRITE) {
/* Mark the texture as dirty to expire the tile caches. */
spr->timestamp++;
}
diff --git a/src/gallium/drivers/softpipe/sp_tile_cache.c b/src/gallium/drivers/softpipe/sp_tile_cache.c
index 7617add03f6..54814ca4fc2 100644
--- a/src/gallium/drivers/softpipe/sp_tile_cache.c
+++ b/src/gallium/drivers/softpipe/sp_tile_cache.c
@@ -202,8 +202,8 @@ sp_tile_cache_set_surface(struct softpipe_tile_cache *tc,
for (i = 0; i < tc->num_maps; i++) {
tc->transfer_map[i] = pipe_transfer_map(pipe, ps->texture,
ps->u.tex.level, ps->u.tex.first_layer + i,
- PIPE_TRANSFER_READ_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_READ_WRITE |
+ PIPE_MAP_UNSYNCHRONIZED,
0, 0, ps->width, ps->height,
&tc->transfer[i]);
}
diff --git a/src/gallium/drivers/svga/svga_draw_arrays.c b/src/gallium/drivers/svga/svga_draw_arrays.c
index af27e038bc8..30212fae080 100644
--- a/src/gallium/drivers/svga/svga_draw_arrays.c
+++ b/src/gallium/drivers/svga/svga_draw_arrays.c
@@ -56,7 +56,7 @@ generate_indices(struct svga_hwtnl *hwtnl,
if (!dst)
goto fail;
- dst_map = pipe_buffer_map(pipe, dst, PIPE_TRANSFER_WRITE, &transfer);
+ dst_map = pipe_buffer_map(pipe, dst, PIPE_MAP_WRITE, &transfer);
if (!dst_map)
goto fail;
diff --git a/src/gallium/drivers/svga/svga_draw_elements.c b/src/gallium/drivers/svga/svga_draw_elements.c
index dd977b1bc2d..b17e301793b 100644
--- a/src/gallium/drivers/svga/svga_draw_elements.c
+++ b/src/gallium/drivers/svga/svga_draw_elements.c
@@ -115,14 +115,14 @@ translate_indices(struct svga_hwtnl *hwtnl,
if (!dst)
goto fail;
- dst_map = pipe_buffer_map(pipe, dst, PIPE_TRANSFER_WRITE, &dst_transfer);
+ dst_map = pipe_buffer_map(pipe, dst, PIPE_MAP_WRITE, &dst_transfer);
if (!dst_map)
goto fail;
*out_offset = 0;
src_map = pipe_buffer_map(pipe, info->index.resource,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_READ |
+ PIPE_MAP_UNSYNCHRONIZED,
&src_transfer);
if (!src_map)
goto fail;
diff --git a/src/gallium/drivers/svga/svga_pipe_query.c b/src/gallium/drivers/svga/svga_pipe_query.c
index 77be3692ba0..a454ff7a132 100644
--- a/src/gallium/drivers/svga/svga_pipe_query.c
+++ b/src/gallium/drivers/svga/svga_pipe_query.c
@@ -101,7 +101,7 @@ define_query_vgpu9(struct svga_context *svga,
return PIPE_ERROR_OUT_OF_MEMORY;
sq->queryResult = (SVGA3dQueryResult *)
- sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
+ sws->buffer_map(sws, sq->hwbuf, PIPE_MAP_WRITE);
if (!sq->queryResult) {
sws->buffer_destroy(sws, sq->hwbuf);
return PIPE_ERROR_OUT_OF_MEMORY;
diff --git a/src/gallium/drivers/svga/svga_pipe_streamout.c b/src/gallium/drivers/svga/svga_pipe_streamout.c
index dcf54f1ce49..9e487ad52f9 100644
--- a/src/gallium/drivers/svga/svga_pipe_streamout.c
+++ b/src/gallium/drivers/svga/svga_pipe_streamout.c
@@ -92,7 +92,7 @@ svga_define_stream_output(struct svga_context *svga,
bufSize);
if (!declBuf)
return PIPE_ERROR;
- map = sws->buffer_map(sws, declBuf, PIPE_TRANSFER_WRITE);
+ map = sws->buffer_map(sws, declBuf, PIPE_MAP_WRITE);
if (!map) {
sws->buffer_destroy(sws, declBuf);
return PIPE_ERROR;
diff --git a/src/gallium/drivers/svga/svga_resource_buffer.c b/src/gallium/drivers/svga/svga_resource_buffer.c
index 80f91a9ef65..f36f0c8fb9f 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer.c
@@ -122,14 +122,14 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
transfer->stride = 0;
transfer->layer_stride = 0;
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
/* If we write to the buffer for any reason, free any saved translated
* vertices.
*/
pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
}
- if ((usage & PIPE_TRANSFER_READ) && sbuf->dirty &&
+ if ((usage & PIPE_MAP_READ) && sbuf->dirty &&
!sbuf->key.coherent && !svga->swc->force_coherent) {
/* Host-side buffers can only be dirtied with vgpu10 features
@@ -157,8 +157,8 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
sbuf->dirty = FALSE;
}
- if (usage & PIPE_TRANSFER_WRITE) {
- if ((usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
+ if (usage & PIPE_MAP_WRITE) {
+ if ((usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
!(resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)) {
/*
* Flush any pending primitives, finish writing any pending DMA
@@ -175,7 +175,7 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
* Instead of flushing the context command buffer, simply discard
* the current hwbuf, and start a new one.
* With GB objects, the map operation takes care of this
- * if passed the PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE flag,
+ * if passed the PIPE_MAP_DISCARD_WHOLE_RESOURCE flag,
* and the old backing store is busy.
*/
@@ -187,7 +187,7 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
sbuf->dma.flags.discard = TRUE;
}
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ if (usage & PIPE_MAP_UNSYNCHRONIZED) {
if (!sbuf->map.num_ranges) {
/*
* No pending ranges to upload so far, so we can tell the host to
@@ -223,7 +223,7 @@ svga_buffer_transfer_map(struct pipe_context *pipe,
* without having to do a DMA download from the host.
*/
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
/*
* Flushing the command buffer here will most likely cause
* the map of the hwbuf below to block, so preemptively
@@ -316,8 +316,8 @@ svga_buffer_transfer_flush_region(struct pipe_context *pipe,
unsigned offset = transfer->box.x + box->x;
unsigned length = box->width;
- assert(transfer->usage & PIPE_TRANSFER_WRITE);
- assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
+ assert(transfer->usage & PIPE_MAP_WRITE);
+ assert(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT);
if (!(svga->swc->force_coherent || sbuf->key.coherent) || sbuf->swbuf) {
mtx_lock(&ss->swc_mutex);
@@ -352,8 +352,8 @@ svga_buffer_transfer_unmap(struct pipe_context *pipe,
svga_buffer_hw_storage_unmap(svga, sbuf);
}
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
- if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ if (transfer->usage & PIPE_MAP_WRITE) {
+ if (!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
/*
* Mapped range not flushed explicitly, so flush the whole buffer,
* and tell the host to discard the contents when processing the DMA
diff --git a/src/gallium/drivers/svga/svga_resource_buffer.h b/src/gallium/drivers/svga/svga_resource_buffer.h
index 09648d2ec09..131b2ebf940 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer.h
+++ b/src/gallium/drivers/svga/svga_resource_buffer.h
@@ -278,7 +278,7 @@ svga_buffer_has_hw_storage(struct svga_buffer *sbuf)
/**
* Map the hardware storage of a buffer.
- * \param flags bitmask of PIPE_TRANSFER_* flags
+ * \param flags bitmask of PIPE_MAP_* flags
*/
static inline void *
svga_buffer_hw_storage_map(struct svga_context *svga,
@@ -295,7 +295,7 @@ svga_buffer_hw_storage_map(struct svga_context *svga,
void *map;
if (swc->force_coherent) {
- flags |= PIPE_TRANSFER_PERSISTENT | PIPE_TRANSFER_COHERENT;
+ flags |= PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT;
}
map = swc->surface_map(swc, sbuf->handle, flags, retry, &rebind);
if (map && rebind) {
diff --git a/src/gallium/drivers/svga/svga_resource_buffer_upload.c b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
index fecc1ae54d0..b092ae270c6 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer_upload.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
@@ -789,7 +789,7 @@ svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
* Note that it is not this function's task to prevent overlapping
* ranges, as the GMR was already given so it is too late to do
* anything. If the ranges overlap here it must surely be because
- * PIPE_TRANSFER_UNSYNCHRONIZED was set.
+ * PIPE_MAP_UNSYNCHRONIZED was set.
*/
sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
@@ -869,7 +869,7 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
return ret;
mtx_lock(&ss->swc_mutex);
- map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
+ map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_MAP_WRITE, &retry);
assert(map);
assert(!retry);
if (!map) {
@@ -955,8 +955,8 @@ svga_buffer_upload_piecewise(struct svga_screen *ss,
offset, offset + size);
map = sws->buffer_map(sws, hwbuf,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE);
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE);
assert(map);
if (map) {
memcpy(map, (const char *) sbuf->swbuf + offset, size);
diff --git a/src/gallium/drivers/svga/svga_resource_texture.c b/src/gallium/drivers/svga/svga_resource_texture.c
index 137d15bcb4f..5ca6772f462 100644
--- a/src/gallium/drivers/svga/svga_resource_texture.c
+++ b/src/gallium/drivers/svga/svga_resource_texture.c
@@ -151,13 +151,13 @@ svga_transfer_dma(struct svga_context *svga,
sw = (uint8_t *) st->swbuf + offset;
if (transfer == SVGA3D_WRITE_HOST_VRAM) {
- unsigned usage = PIPE_TRANSFER_WRITE;
+ unsigned usage = PIPE_MAP_WRITE;
/* Wait for the previous DMAs to complete */
/* TODO: keep one DMA (at half the size) in the background */
if (y) {
svga_context_flush(svga, NULL);
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
hw = sws->buffer_map(sws, st->hwbuf, usage);
@@ -183,7 +183,7 @@ svga_transfer_dma(struct svga_context *svga,
svga_context_flush(svga, &fence);
sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
- hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
+ hw = sws->buffer_map(sws, st->hwbuf, PIPE_MAP_READ);
assert(hw);
if (hw) {
memcpy(sw, hw, length);
@@ -255,11 +255,11 @@ svga_texture_destroy(struct pipe_screen *screen,
static inline boolean
need_tex_readback(struct svga_transfer *st)
{
- if (st->base.usage & PIPE_TRANSFER_READ)
+ if (st->base.usage & PIPE_MAP_READ)
return TRUE;
- if ((st->base.usage & PIPE_TRANSFER_WRITE) &&
- ((st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
+ if ((st->base.usage & PIPE_MAP_WRITE) &&
+ ((st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) == 0)) {
return svga_was_texture_rendered_to(svga_texture(st->base.resource),
st->slice, st->base.level);
}
@@ -347,7 +347,7 @@ svga_texture_transfer_map_dma(struct svga_context *svga,
}
}
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
SVGA3dSurfaceDMAFlags flags;
memset(&flags, 0, sizeof flags);
svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
@@ -396,14 +396,14 @@ svga_texture_transfer_map_direct(struct svga_context *svga,
svga_context_flush(svga, NULL);
}
/*
- * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
+ * Note: if PIPE_MAP_DISCARD_WHOLE_RESOURCE were specified
* we could potentially clear the flag for all faces/layers/mips.
*/
svga_clear_texture_rendered_to(tex, st->slice, level);
}
else {
- assert(usage & PIPE_TRANSFER_WRITE);
- if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
+ assert(usage & PIPE_MAP_WRITE);
+ if ((usage & PIPE_MAP_UNSYNCHRONIZED) == 0) {
if (svga_is_texture_dirty(tex, st->slice, level)) {
/*
* do a surface flush if the subresource has been modified
@@ -439,7 +439,7 @@ svga_texture_transfer_map_direct(struct svga_context *svga,
struct svga_winsys_context *swc = svga->swc;
if (swc->force_coherent) {
- usage |= PIPE_TRANSFER_PERSISTENT | PIPE_TRANSFER_COHERENT;
+ usage |= PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT;
}
map = SVGA_TRY_MAP(svga->swc->surface_map
@@ -529,7 +529,7 @@ svga_texture_transfer_map(struct pipe_context *pipe,
struct svga_transfer *st;
struct svga_winsys_surface *surf = tex->handle;
boolean use_direct_map = svga_have_gb_objects(svga) &&
- (!svga_have_gb_dma(svga) || (usage & PIPE_TRANSFER_WRITE));
+ (!svga_have_gb_dma(svga) || (usage & PIPE_MAP_WRITE));
void *map = NULL;
int64_t begin = svga_get_time(svga);
@@ -539,7 +539,7 @@ svga_texture_transfer_map(struct pipe_context *pipe,
goto done;
/* We can't map texture storage directly unless we have GB objects */
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
+ if (usage & PIPE_MAP_DIRECTLY) {
if (svga_have_gb_objects(svga))
use_direct_map = TRUE;
else
@@ -611,7 +611,7 @@ svga_texture_transfer_map(struct pipe_context *pipe,
}
else {
boolean can_use_upload = tex->can_use_upload &&
- !(st->base.usage & PIPE_TRANSFER_READ);
+ !(st->base.usage & PIPE_MAP_READ);
boolean was_rendered_to =
svga_was_texture_rendered_to(svga_texture(texture),
st->slice, st->base.level);
@@ -630,7 +630,7 @@ svga_texture_transfer_map(struct pipe_context *pipe,
/* First try directly map to the GB surface */
if (can_use_upload)
- st->base.usage |= PIPE_TRANSFER_DONTBLOCK;
+ st->base.usage |= PIPE_MAP_DONTBLOCK;
map = svga_texture_transfer_map_direct(svga, st);
st->base.usage = orig_usage;
@@ -656,7 +656,7 @@ svga_texture_transfer_map(struct pipe_context *pipe,
else {
*ptransfer = &st->base;
svga->hud.num_textures_mapped++;
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
/* record texture upload for HUD */
svga->hud.num_bytes_uploaded +=
st->base.layer_stride * st->box.d;
@@ -734,7 +734,7 @@ svga_texture_transfer_unmap_dma(struct svga_context *svga,
if (!st->swbuf)
sws->buffer_unmap(sws, st->hwbuf);
- if (st->base.usage & PIPE_TRANSFER_WRITE) {
+ if (st->base.usage & PIPE_MAP_WRITE) {
/* Use DMA to transfer texture data */
SVGA3dSurfaceDMAFlags flags;
struct pipe_resource *texture = st->base.resource;
@@ -742,10 +742,10 @@ svga_texture_transfer_unmap_dma(struct svga_context *svga,
memset(&flags, 0, sizeof flags);
- if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
flags.discard = TRUE;
}
- if (st->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ if (st->base.usage & PIPE_MAP_UNSYNCHRONIZED) {
flags.unsynchronized = TRUE;
}
@@ -771,7 +771,7 @@ svga_texture_transfer_unmap_direct(struct svga_context *svga,
svga_texture_surface_unmap(svga, transfer);
/* Now send an update command to update the content in the backend. */
- if (st->base.usage & PIPE_TRANSFER_WRITE) {
+ if (st->base.usage & PIPE_MAP_WRITE) {
struct svga_winsys_surface *surf = tex->handle;
assert(svga_have_gb_objects(svga));
@@ -840,7 +840,7 @@ svga_texture_transfer_unmap(struct pipe_context *pipe,
svga_texture_transfer_unmap_direct(svga, st);
}
- if (st->base.usage & PIPE_TRANSFER_WRITE) {
+ if (st->base.usage & PIPE_MAP_WRITE) {
svga->hud.num_resource_updates++;
/* Mark the texture level as dirty */
diff --git a/src/gallium/drivers/svga/svga_state_constants.c b/src/gallium/drivers/svga/svga_state_constants.c
index 7e245baf22c..26a9cc7efe6 100644
--- a/src/gallium/drivers/svga/svga_state_constants.c
+++ b/src/gallium/drivers/svga/svga_state_constants.c
@@ -515,7 +515,7 @@ emit_consts_vgpu9(struct svga_context *svga, enum pipe_shader_type shader)
/* emit user-provided constants */
data = (const float (*)[4])
pipe_buffer_map(&svga->pipe, svga->curr.constbufs[shader][0].buffer,
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
if (!data) {
return PIPE_ERROR_OUT_OF_MEMORY;
}
@@ -603,7 +603,7 @@ emit_constbuf(struct svga_context *svga,
src_map = pipe_buffer_map_range(&svga->pipe,
(struct pipe_resource *)buffer,
buffer_offset, buffer_size,
- PIPE_TRANSFER_READ, &src_transfer);
+ PIPE_MAP_READ, &src_transfer);
assert(src_map);
if (!src_map) {
return PIPE_ERROR_OUT_OF_MEMORY;
diff --git a/src/gallium/drivers/svga/svga_swtnl_backend.c b/src/gallium/drivers/svga/svga_swtnl_backend.c
index 5887a9ad7d7..4f0319d9bb0 100644
--- a/src/gallium/drivers/svga/svga_swtnl_backend.c
+++ b/src/gallium/drivers/svga/svga_swtnl_backend.c
@@ -138,10 +138,10 @@ svga_vbuf_render_map_vertices(struct vbuf_render *render)
if (svga_render->vbuf) {
char *ptr = (char*)pipe_buffer_map(&svga->pipe,
svga_render->vbuf,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_FLUSH_EXPLICIT |
- PIPE_TRANSFER_DISCARD_RANGE |
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_FLUSH_EXPLICIT |
+ PIPE_MAP_DISCARD_RANGE |
+ PIPE_MAP_UNSYNCHRONIZED,
&svga_render->vbuf_transfer);
if (ptr) {
svga_render->vbuf_ptr = ptr;
diff --git a/src/gallium/drivers/svga/svga_swtnl_draw.c b/src/gallium/drivers/svga/svga_swtnl_draw.c
index b719dd400c6..e3f6fdfd47d 100644
--- a/src/gallium/drivers/svga/svga_swtnl_draw.c
+++ b/src/gallium/drivers/svga/svga_swtnl_draw.c
@@ -70,8 +70,8 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
if (svga->curr.vb[i].buffer.resource) {
map = pipe_buffer_map(&svga->pipe,
svga->curr.vb[i].buffer.resource,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_READ |
+ PIPE_MAP_UNSYNCHRONIZED,
&vb_transfer[i]);
draw_set_mapped_vertex_buffer(draw, i, map, ~0);
@@ -86,8 +86,8 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
map = (ubyte *) info->index.user;
} else {
map = pipe_buffer_map(&svga->pipe, info->index.resource,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer);
+ PIPE_MAP_READ |
+ PIPE_MAP_UNSYNCHRONIZED, &ib_transfer);
}
draw_set_indexes(draw,
(const ubyte *) map,
@@ -102,8 +102,8 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
map = pipe_buffer_map(&svga->pipe,
svga->curr.constbufs[PIPE_SHADER_VERTEX][i].buffer,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED,
+ PIPE_MAP_READ |
+ PIPE_MAP_UNSYNCHRONIZED,
&cb_transfer[i]);
assert(map);
draw_set_mapped_constant_buffer(
diff --git a/src/gallium/drivers/svga/svga_winsys.h b/src/gallium/drivers/svga/svga_winsys.h
index 55534953d0c..e0ed9886d6c 100644
--- a/src/gallium/drivers/svga/svga_winsys.h
+++ b/src/gallium/drivers/svga/svga_winsys.h
@@ -400,13 +400,13 @@ struct svga_winsys_context
* Map a guest-backed surface.
* \param swc The winsys context
* \param surface The surface to map
- * \param flags bitmaks of PIPE_TRANSFER_x flags
+ * \param flags bitmaks of PIPE_MAP_x flags
* \param retry Whether to flush and retry the map
* \param rebind Whether to issue an immediate rebind and flush.
*
* The surface_map() member is allowed to fail due to a
* shortage of command buffer space, if the
- * PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE bit is set in flags.
+ * PIPE_MAP_DISCARD_WHOLE_RESOURCE bit is set in flags.
* In that case, the caller must flush the current command
* buffer and reissue the map.
*/
@@ -623,7 +623,7 @@ struct svga_winsys_screen
/**
* Map the entire data store of a buffer object into the client's address.
- * usage is a bitmask of PIPE_TRANSFER_*
+ * usage is a bitmask of PIPE_MAP_*
*/
void *
(*buffer_map)( struct svga_winsys_screen *sws,
diff --git a/src/gallium/drivers/swr/swr_context.cpp b/src/gallium/drivers/swr/swr_context.cpp
index e552bc6bd0e..e1b1712f22e 100644
--- a/src/gallium/drivers/swr/swr_context.cpp
+++ b/src/gallium/drivers/swr/swr_context.cpp
@@ -111,10 +111,10 @@ swr_transfer_map(struct pipe_context *pipe,
* and nothing needs to be done at unmap. */
swr_store_dirty_resource(pipe, resource, SWR_TILE_INVALID);
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* If resource is in use, finish fence before mapping.
* Unless requested not to block, then if not done return NULL map */
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
if (swr_is_fence_pending(screen->flush_fence))
return NULL;
} else {
@@ -143,7 +143,7 @@ swr_transfer_map(struct pipe_context *pipe,
/* if we're mapping the depth/stencil, copy in stencil for the section
* being read in
*/
- if (usage & PIPE_TRANSFER_READ && spr->has_depth && spr->has_stencil) {
+ if (usage & PIPE_MAP_READ && spr->has_depth && spr->has_stencil) {
size_t zbase, sbase;
for (int z = box->z; z < box->z + box->depth; z++) {
zbase = (z * spr->swr.qpitch + box->y) * spr->swr.pitch +
@@ -181,7 +181,7 @@ swr_transfer_flush_region(struct pipe_context *pipe,
const struct pipe_box *flush_box)
{
assert(transfer->resource);
- assert(transfer->usage & PIPE_TRANSFER_WRITE);
+ assert(transfer->usage & PIPE_MAP_WRITE);
struct swr_resource *spr = swr_resource(transfer->resource);
if (!spr->has_depth || !spr->has_stencil)
@@ -222,8 +222,8 @@ swr_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer)
/* if we're mapping the depth/stencil, copy in stencil for the section
* being written out
*/
- if (transfer->usage & PIPE_TRANSFER_WRITE &&
- !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) &&
+ if (transfer->usage & PIPE_MAP_WRITE &&
+ !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT) &&
spr->has_depth && spr->has_stencil) {
struct pipe_box box;
u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height,
diff --git a/src/gallium/drivers/swr/swr_screen.cpp b/src/gallium/drivers/swr/swr_screen.cpp
index 1d0bedddb3c..4a3a1eea7c9 100644
--- a/src/gallium/drivers/swr/swr_screen.cpp
+++ b/src/gallium/drivers/swr/swr_screen.cpp
@@ -1013,7 +1013,7 @@ swr_flush_frontbuffer(struct pipe_screen *p_screen,
SWR_SURFACE_STATE *resolve = &swr_resource(resolve_target)->swr;
void *map = winsys->displaytarget_map(winsys, spr->display_target,
- PIPE_TRANSFER_WRITE);
+ PIPE_MAP_WRITE);
memcpy(map, (void*)(resolve->xpBaseAddress), resolve->pitch * resolve->height);
winsys->displaytarget_unmap(winsys, spr->display_target);
}
diff --git a/src/gallium/drivers/v3d/v3d_resource.c b/src/gallium/drivers/v3d/v3d_resource.c
index c7b4790f8be..c3990cd05e1 100644
--- a/src/gallium/drivers/v3d/v3d_resource.c
+++ b/src/gallium/drivers/v3d/v3d_resource.c
@@ -122,7 +122,7 @@ v3d_resource_transfer_unmap(struct pipe_context *pctx,
struct v3d_resource *rsc = v3d_resource(ptrans->resource);
struct v3d_resource_slice *slice = &rsc->slices[ptrans->level];
- if (ptrans->usage & PIPE_TRANSFER_WRITE) {
+ if (ptrans->usage & PIPE_MAP_WRITE) {
for (int z = 0; z < ptrans->box.depth; z++) {
void *dst = rsc->bo->map +
v3d_layer_offset(&rsc->base,
@@ -154,7 +154,7 @@ v3d_map_usage_prep(struct pipe_context *pctx,
struct v3d_context *v3d = v3d_context(pctx);
struct v3d_resource *rsc = v3d_resource(prsc);
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
if (v3d_resource_bo_alloc(rsc)) {
/* If it might be bound as one of our vertex buffers
* or UBOs, make sure we re-emit vertex buffer state
@@ -172,12 +172,12 @@ v3d_map_usage_prep(struct pipe_context *pctx,
V3D_FLUSH_DEFAULT,
false);
}
- } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* If we're writing and the buffer is being used by the CL, we
* have to flush the CL first. If we're only reading, we need
* to flush if the CL has written our buffer.
*/
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
v3d_flush_jobs_reading_resource(v3d, prsc,
V3D_FLUSH_ALWAYS,
false);
@@ -188,7 +188,7 @@ v3d_map_usage_prep(struct pipe_context *pctx,
}
}
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
rsc->writes++;
rsc->initialized_buffers = ~0;
}
@@ -214,8 +214,8 @@ v3d_resource_transfer_map(struct pipe_context *pctx,
/* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
* being mapped.
*/
- if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ if ((usage & PIPE_MAP_DISCARD_RANGE) &&
+ !(usage & PIPE_MAP_UNSYNCHRONIZED) &&
!(prsc->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) &&
prsc->last_level == 0 &&
prsc->width0 == box->width &&
@@ -223,7 +223,7 @@ v3d_resource_transfer_map(struct pipe_context *pctx,
prsc->depth0 == box->depth &&
prsc->array_size == 1 &&
rsc->bo->private) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
v3d_map_usage_prep(pctx, prsc, usage);
@@ -247,7 +247,7 @@ v3d_resource_transfer_map(struct pipe_context *pctx,
* need to do syncing stuff here yet.
*/
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ if (usage & PIPE_MAP_UNSYNCHRONIZED)
buf = v3d_bo_map_unsynchronized(rsc->bo);
else
buf = v3d_bo_map(rsc->bo);
@@ -271,7 +271,7 @@ v3d_resource_transfer_map(struct pipe_context *pctx,
/* No direct mappings of tiled, since we need to manually
* tile/untile.
*/
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (usage & PIPE_MAP_DIRECTLY)
return NULL;
ptrans->stride = ptrans->box.width * rsc->cpp;
@@ -279,7 +279,7 @@ v3d_resource_transfer_map(struct pipe_context *pctx,
trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
for (int z = 0; z < ptrans->box.depth; z++) {
void *src = rsc->bo->map +
v3d_layer_offset(&rsc->base,
@@ -336,11 +336,11 @@ v3d_texture_subdata(struct pipe_context *pctx,
* texture. Note that gallium's texture_subdata may be called with
* obvious usage flags missing!
*/
- v3d_map_usage_prep(pctx, prsc, usage | (PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE));
+ v3d_map_usage_prep(pctx, prsc, usage | (PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE));
void *buf;
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ if (usage & PIPE_MAP_UNSYNCHRONIZED)
buf = v3d_bo_map_unsynchronized(rsc->bo);
else
buf = v3d_bo_map(rsc->bo);
diff --git a/src/gallium/drivers/v3d/v3dx_draw.c b/src/gallium/drivers/v3d/v3dx_draw.c
index 9a5da07ec80..968918ab70f 100644
--- a/src/gallium/drivers/v3d/v3dx_draw.c
+++ b/src/gallium/drivers/v3d/v3dx_draw.c
@@ -1497,7 +1497,7 @@ v3d_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
uint32_t *map = pipe_buffer_map_range(pctx, info->indirect,
info->indirect_offset,
3 * sizeof(uint32_t),
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&transfer);
memcpy(v3d->compute_num_workgroups, map, 3 * sizeof(uint32_t));
pipe_buffer_unmap(pctx, transfer);
diff --git a/src/gallium/drivers/vc4/vc4_resource.c b/src/gallium/drivers/vc4/vc4_resource.c
index b01feb0653e..eff757b3a20 100644
--- a/src/gallium/drivers/vc4/vc4_resource.c
+++ b/src/gallium/drivers/vc4/vc4_resource.c
@@ -80,7 +80,7 @@ vc4_resource_transfer_unmap(struct pipe_context *pctx,
struct vc4_resource *rsc = vc4_resource(ptrans->resource);
struct vc4_resource_slice *slice = &rsc->slices[ptrans->level];
- if (ptrans->usage & PIPE_TRANSFER_WRITE) {
+ if (ptrans->usage & PIPE_MAP_WRITE) {
vc4_store_tiled_image(rsc->bo->map + slice->offset +
ptrans->box.z * rsc->cube_map_stride,
slice->stride,
@@ -112,8 +112,8 @@ vc4_resource_transfer_map(struct pipe_context *pctx,
/* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
* being mapped.
*/
- if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ if ((usage & PIPE_MAP_DISCARD_RANGE) &&
+ !(usage & PIPE_MAP_UNSYNCHRONIZED) &&
!(prsc->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) &&
prsc->last_level == 0 &&
prsc->width0 == box->width &&
@@ -121,10 +121,10 @@ vc4_resource_transfer_map(struct pipe_context *pctx,
prsc->depth0 == box->depth &&
prsc->array_size == 1 &&
rsc->bo->private) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
if (vc4_resource_bo_alloc(rsc)) {
/* If it might be bound as one of our vertex buffers,
* make sure we re-emit vertex buffer state.
@@ -137,18 +137,18 @@ vc4_resource_transfer_map(struct pipe_context *pctx,
*/
vc4_flush_jobs_reading_resource(vc4, prsc);
}
- } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* If we're writing and the buffer is being used by the CL, we
* have to flush the CL first. If we're only reading, we need
* to flush if the CL has written our buffer.
*/
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
vc4_flush_jobs_reading_resource(vc4, prsc);
else
vc4_flush_jobs_writing_resource(vc4, prsc);
}
- if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_MAP_WRITE) {
rsc->writes++;
rsc->initialized_buffers = ~0;
}
@@ -168,7 +168,7 @@ vc4_resource_transfer_map(struct pipe_context *pctx,
ptrans->usage = usage;
ptrans->box = *box;
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ if (usage & PIPE_MAP_UNSYNCHRONIZED)
buf = vc4_bo_map_unsynchronized(rsc->bo);
else
buf = vc4_bo_map(rsc->bo);
@@ -184,7 +184,7 @@ vc4_resource_transfer_map(struct pipe_context *pctx,
/* No direct mappings of tiled, since we need to manually
* tile/untile.
*/
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (usage & PIPE_MAP_DIRECTLY)
return NULL;
if (format == PIPE_FORMAT_ETC1_RGB8) {
@@ -206,7 +206,7 @@ vc4_resource_transfer_map(struct pipe_context *pctx,
trans->map = malloc(ptrans->layer_stride * ptrans->box.depth);
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
vc4_load_tiled_image(trans->map, ptrans->stride,
buf + slice->offset +
ptrans->box.z * rsc->cube_map_stride,
@@ -247,7 +247,7 @@ vc4_texture_subdata(struct pipe_context *pctx,
/* For a direct mapping, we can just take the u_transfer path. */
if (!rsc->tiled ||
box->depth != 1 ||
- (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
+ (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)) {
return u_default_texture_subdata(pctx, prsc, level, usage, box,
data, stride, layer_stride);
}
@@ -256,7 +256,7 @@ vc4_texture_subdata(struct pipe_context *pctx,
* texture.
*/
void *buf;
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ if (usage & PIPE_MAP_UNSYNCHRONIZED)
buf = vc4_bo_map_unsynchronized(rsc->bo);
else
buf = vc4_bo_map(rsc->bo);
@@ -1089,7 +1089,7 @@ vc4_get_shadow_index_buffer(struct pipe_context *pctx,
src = pipe_buffer_map_range(pctx, &orig->base,
offset,
count * 4,
- PIPE_TRANSFER_READ, &src_transfer);
+ PIPE_MAP_READ, &src_transfer);
}
for (int i = 0; i < count; i++) {
diff --git a/src/gallium/drivers/virgl/virgl_buffer.c b/src/gallium/drivers/virgl/virgl_buffer.c
index ba32c29ce2c..061836aa5a1 100644
--- a/src/gallium/drivers/virgl/virgl_buffer.c
+++ b/src/gallium/drivers/virgl/virgl_buffer.c
@@ -34,8 +34,8 @@ static void virgl_buffer_transfer_unmap(struct pipe_context *ctx,
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_transfer *trans = virgl_transfer(transfer);
- if (trans->base.usage & PIPE_TRANSFER_WRITE) {
- if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
+ if (trans->base.usage & PIPE_MAP_WRITE) {
+ if (transfer->usage & PIPE_MAP_FLUSH_EXPLICIT) {
if (trans->range.end <= trans->range.start) {
virgl_resource_destroy_transfer(vctx, trans);
return;
diff --git a/src/gallium/drivers/virgl/virgl_query.c b/src/gallium/drivers/virgl/virgl_query.c
index 8ff9aa5b835..38ede54036a 100644
--- a/src/gallium/drivers/virgl/virgl_query.c
+++ b/src/gallium/drivers/virgl/virgl_query.c
@@ -210,7 +210,7 @@ static bool virgl_get_query_result(struct pipe_context *ctx,
}
host_state = pipe_buffer_map(ctx, &query->buf->u.b,
- PIPE_TRANSFER_READ, &transfer);
+ PIPE_MAP_READ, &transfer);
}
if (query->result_size == 8)
diff --git a/src/gallium/drivers/virgl/virgl_resource.c b/src/gallium/drivers/virgl/virgl_resource.c
index e6d29351660..a75236b7012 100644
--- a/src/gallium/drivers/virgl/virgl_resource.c
+++ b/src/gallium/drivers/virgl/virgl_resource.c
@@ -60,7 +60,7 @@ static bool virgl_res_needs_flush(struct virgl_context *vctx,
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
struct virgl_resource *res = virgl_resource(trans->base.resource);
- if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ if (trans->base.usage & PIPE_MAP_UNSYNCHRONIZED)
return false;
if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
@@ -75,16 +75,16 @@ static bool virgl_res_needs_flush(struct virgl_context *vctx,
* - the content can be discarded
* - the host storage is read-only
*
- * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
- * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
- * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
+ * Note that PIPE_MAP_WRITE without discard bits requires readback.
+ * PIPE_MAP_READ becomes irrelevant. PIPE_MAP_UNSYNCHRONIZED and
+ * PIPE_MAP_FLUSH_EXPLICIT are also irrelevant.
*/
static bool virgl_res_needs_readback(struct virgl_context *vctx,
struct virgl_resource *res,
unsigned usage, unsigned level)
{
- if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
+ if (usage & (PIPE_MAP_DISCARD_RANGE |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE))
return false;
if (res->clean_mask & (1 << level))
@@ -106,7 +106,7 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
bool wait;
/* there is no way to map the host storage currently */
- if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (xfer->base.usage & PIPE_MAP_DIRECTLY)
return VIRGL_TRANSFER_MAP_ERROR;
/* We break the logic down into four steps
@@ -123,12 +123,12 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
/* We need to wait for all cmdbufs, current or previous, that access the
* resource to finish unless synchronization is disabled.
*/
- wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED);
+ wait = !(xfer->base.usage & PIPE_MAP_UNSYNCHRONIZED);
/* When the transfer range consists of only uninitialized data, we can
* assume the GPU is not accessing the range and readback is unnecessary.
- * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
- * PIPE_TRANSFER_DISCARD_RANGE are set.
+ * We can proceed as if PIPE_MAP_UNSYNCHRONIZED and
+ * PIPE_MAP_DISCARD_RANGE are set.
*/
if (res->u.b.target == PIPE_BUFFER &&
!util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
@@ -143,19 +143,19 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
* replace its HW resource or use a staging buffer to avoid waiting.
*/
if (wait &&
- (xfer->base.usage & (PIPE_TRANSFER_DISCARD_RANGE |
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) &&
+ (xfer->base.usage & (PIPE_MAP_DISCARD_RANGE |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE)) &&
likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
bool can_realloc = false;
bool can_staging = false;
- /* A PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE transfer may be followed by
- * PIPE_TRANSFER_UNSYNCHRONIZED transfers to non-overlapping regions.
- * It cannot be treated as a PIPE_TRANSFER_DISCARD_RANGE transfer,
+ /* A PIPE_MAP_DISCARD_WHOLE_RESOURCE transfer may be followed by
+ * PIPE_MAP_UNSYNCHRONIZED transfers to non-overlapping regions.
+ * It cannot be treated as a PIPE_MAP_DISCARD_RANGE transfer,
* otherwise those following unsynchronized transfers may overwrite
* valid data.
*/
- if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (xfer->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
can_realloc = virgl_can_rebind_resource(vctx, &res->u.b);
} else {
can_staging = vctx->supports_staging;
@@ -190,7 +190,7 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
if (readback) {
/* Readback is yet another command and is transparent to the state
* trackers. It should be waited for in all cases, including when
- * PIPE_TRANSFER_UNSYNCHRONIZED is set.
+ * PIPE_MAP_UNSYNCHRONIZED is set.
*/
wait = true;
@@ -211,7 +211,7 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
* during which another unsynchronized map could write to the resource
* contents, leaving the contents in an undefined state.
*/
- if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
+ if ((xfer->base.usage & PIPE_MAP_DONTBLOCK) &&
(readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
return VIRGL_TRANSFER_MAP_ERROR;
@@ -440,12 +440,12 @@ virgl_resource_transfer_map(struct pipe_context *ctx,
* currently used for whole resource discards.
*/
if (map_type == VIRGL_TRANSFER_MAP_HW_RES &&
- (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
+ (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
(vres->clean_mask & 1)) {
util_range_set_empty(&vres->valid_buffer_range);
}
- if (usage & PIPE_TRANSFER_WRITE)
+ if (usage & PIPE_MAP_WRITE)
util_range_add(&vres->u.b, &vres->valid_buffer_range, box->x, box->x + box->width);
}
diff --git a/src/gallium/drivers/virgl/virgl_texture.c b/src/gallium/drivers/virgl/virgl_texture.c
index 23282587288..926cbe63a33 100644
--- a/src/gallium/drivers/virgl/virgl_texture.c
+++ b/src/gallium/drivers/virgl/virgl_texture.c
@@ -144,7 +144,7 @@ static void *texture_transfer_map_resolve(struct pipe_context *ctx,
struct pipe_box dst_box = *box;
dst_box.x = dst_box.y = dst_box.z = 0;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
/* readback should scale to the block size */
dst_box.width = align(dst_box.width,
util_format_get_blockwidth(resource->format));
@@ -158,7 +158,7 @@ static void *texture_transfer_map_resolve(struct pipe_context *ctx,
if (!resolve_tmp)
return NULL;
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
virgl_copy_region_with_blit(ctx, resolve_tmp, 0, &dst_box, resource,
level, box);
ctx->flush(ctx, NULL, 0);
@@ -178,7 +178,7 @@ static void *texture_transfer_map_resolve(struct pipe_context *ctx,
trans->base.layer_stride = trans->resolve_transfer->layer_stride;
return ptr;
} else {
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
struct virgl_winsys *vws = virgl_screen(ctx->screen)->vws;
void *src = ptr;
ptr = vws->resource_map(vws, vtex->hw_res);
@@ -205,7 +205,7 @@ static void *texture_transfer_map_resolve(struct pipe_context *ctx,
}
}
- if ((usage & PIPE_TRANSFER_WRITE) == 0)
+ if ((usage & PIPE_MAP_WRITE) == 0)
pipe_resource_reference(&trans->resolve_transfer->resource, NULL);
return ptr + trans->offset;
@@ -223,7 +223,7 @@ static bool needs_resolve(struct pipe_screen *screen,
if (resource->nr_samples > 1)
return true;
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_MAP_READ)
return !util_format_is_depth_or_stencil(resource->format) &&
!virgl_has_readback_format(screen, pipe_to_virgl_format(resource->format));
@@ -261,8 +261,8 @@ static void virgl_texture_transfer_unmap(struct pipe_context *ctx,
struct virgl_transfer *trans = virgl_transfer(transfer);
bool queue_unmap = false;
- if (transfer->usage & PIPE_TRANSFER_WRITE &&
- (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) == 0) {
+ if (transfer->usage & PIPE_MAP_WRITE &&
+ (transfer->usage & PIPE_MAP_FLUSH_EXPLICIT) == 0) {
if (trans->resolve_transfer && (trans->base.resource->format ==
trans->resolve_transfer->resource->format)) {
diff --git a/src/gallium/drivers/zink/zink_resource.c b/src/gallium/drivers/zink/zink_resource.c
index f093d7e9255..5c8f81436d3 100644
--- a/src/gallium/drivers/zink/zink_resource.c
+++ b/src/gallium/drivers/zink/zink_resource.c
@@ -391,15 +391,15 @@ zink_transfer_copy_bufimage(struct zink_context *ctx,
zink_batch_reference_resoure(batch, res);
zink_batch_reference_resoure(batch, staging_res);
- /* we're using u_transfer_helper_deinterleave, which means we'll be getting PIPE_TRANSFER_* usage
+ /* we're using u_transfer_helper_deinterleave, which means we'll be getting PIPE_MAP_* usage
* to indicate whether to copy either the depth or stencil aspects
*/
unsigned aspects = 0;
- assert((trans->base.usage & (PIPE_TRANSFER_DEPTH_ONLY | PIPE_TRANSFER_STENCIL_ONLY)) !=
- (PIPE_TRANSFER_DEPTH_ONLY | PIPE_TRANSFER_STENCIL_ONLY));
- if (trans->base.usage & PIPE_TRANSFER_DEPTH_ONLY)
+ assert((trans->base.usage & (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY)) !=
+ (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY));
+ if (trans->base.usage & PIPE_MAP_DEPTH_ONLY)
aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
- else if (trans->base.usage & PIPE_TRANSFER_STENCIL_ONLY)
+ else if (trans->base.usage & PIPE_MAP_STENCIL_ONLY)
aspects = VK_IMAGE_ASPECT_STENCIL_BIT;
else {
aspects = aspect_from_format(res->base.format);
@@ -451,7 +451,7 @@ zink_transfer_map(struct pipe_context *pctx,
void *ptr;
if (pres->target == PIPE_BUFFER) {
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
/* need to wait for rendering to finish
* TODO: optimize/fix this to be much less obtrusive
* mesa/mesa#2966
@@ -476,9 +476,9 @@ zink_transfer_map(struct pipe_context *pctx,
} else {
if (res->optimial_tiling || ((res->base.usage != PIPE_USAGE_STAGING))) {
enum pipe_format format = pres->format;
- if (usage & PIPE_TRANSFER_DEPTH_ONLY)
+ if (usage & PIPE_MAP_DEPTH_ONLY)
format = util_format_get_depth_only(pres->format);
- else if (usage & PIPE_TRANSFER_STENCIL_ONLY)
+ else if (usage & PIPE_MAP_STENCIL_ONLY)
format = PIPE_FORMAT_S8_UINT;
trans->base.stride = util_format_get_stride(format, box->width);
trans->base.layer_stride = util_format_get_2d_size(format,
@@ -502,7 +502,7 @@ zink_transfer_map(struct pipe_context *pctx,
struct zink_resource *staging_res = zink_resource(trans->staging_res);
- if (usage & PIPE_TRANSFER_READ) {
+ if (usage & PIPE_MAP_READ) {
struct zink_context *ctx = zink_context(pctx);
bool ret = zink_transfer_copy_bufimage(ctx, res,
staging_res, trans,
@@ -562,7 +562,7 @@ zink_transfer_unmap(struct pipe_context *pctx,
struct zink_resource *staging_res = zink_resource(trans->staging_res);
vkUnmapMemory(screen->dev, staging_res->mem);
- if (trans->base.usage & PIPE_TRANSFER_WRITE) {
+ if (trans->base.usage & PIPE_MAP_WRITE) {
struct zink_context *ctx = zink_context(pctx);
zink_transfer_copy_bufimage(ctx, res, staging_res, trans, true);
diff --git a/src/gallium/frontends/clover/core/resource.cpp b/src/gallium/frontends/clover/core/resource.cpp
index 89125c7656d..2fbed90e92c 100644
--- a/src/gallium/frontends/clover/core/resource.cpp
+++ b/src/gallium/frontends/clover/core/resource.cpp
@@ -167,10 +167,10 @@ root_resource::root_resource(clover::device &dev, memory_obj &obj,
unsigned cpp = util_format_get_blocksize(info.format);
if (pipe->target == PIPE_BUFFER)
- q.pipe->buffer_subdata(q.pipe, pipe, PIPE_TRANSFER_WRITE,
+ q.pipe->buffer_subdata(q.pipe, pipe, PIPE_MAP_WRITE,
0, info.width0, data_ptr);
else
- q.pipe->texture_subdata(q.pipe, pipe, 0, PIPE_TRANSFER_WRITE,
+ q.pipe->texture_subdata(q.pipe, pipe, 0, PIPE_MAP_WRITE,
rect, data_ptr, cpp * info.width0,
cpp * info.width0 * info.height0);
}
@@ -197,11 +197,11 @@ mapping::mapping(command_queue &q, resource &r,
const resource::vector &origin,
const resource::vector &region) :
pctx(q.pipe), pres(NULL) {
- unsigned usage = ((flags & CL_MAP_WRITE ? PIPE_TRANSFER_WRITE : 0 ) |
- (flags & CL_MAP_READ ? PIPE_TRANSFER_READ : 0 ) |
+ unsigned usage = ((flags & CL_MAP_WRITE ? PIPE_MAP_WRITE : 0 ) |
+ (flags & CL_MAP_READ ? PIPE_MAP_READ : 0 ) |
(flags & CL_MAP_WRITE_INVALIDATE_REGION ?
- PIPE_TRANSFER_DISCARD_RANGE : 0) |
- (!blocking ? PIPE_TRANSFER_UNSYNCHRONIZED : 0));
+ PIPE_MAP_DISCARD_RANGE : 0) |
+ (!blocking ? PIPE_MAP_UNSYNCHRONIZED : 0));
p = pctx->transfer_map(pctx, r.pipe, 0, usage,
box(origin + r.offset, region), &pxfer);
diff --git a/src/gallium/frontends/dri/dri2.c b/src/gallium/frontends/dri/dri2.c
index b02d9f26049..273cfde35e8 100644
--- a/src/gallium/frontends/dri/dri2.c
+++ b/src/gallium/frontends/dri/dri2.c
@@ -1574,9 +1574,9 @@ dri2_map_image(__DRIcontext *context, __DRIimage *image,
resource = resource->next;
if (flags & __DRI_IMAGE_TRANSFER_READ)
- pipe_access |= PIPE_TRANSFER_READ;
+ pipe_access |= PIPE_MAP_READ;
if (flags & __DRI_IMAGE_TRANSFER_WRITE)
- pipe_access |= PIPE_TRANSFER_WRITE;
+ pipe_access |= PIPE_MAP_WRITE;
map = pipe_transfer_map(pipe, resource, 0, 0, pipe_access, x0, y0,
width, height, &trans);
diff --git a/src/gallium/frontends/dri/drisw.c b/src/gallium/frontends/dri/drisw.c
index 8d7148ef6dc..4ac241c6dbc 100644
--- a/src/gallium/frontends/dri/drisw.c
+++ b/src/gallium/frontends/dri/drisw.c
@@ -422,7 +422,7 @@ drisw_update_tex_buffer(struct dri_drawable *drawable,
map = pipe_transfer_map(pipe, res,
0, 0, // level, layer,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
x, y, w, h, &transfer);
/* Copy the Drawable content to the mapped texture buffer */
diff --git a/src/gallium/frontends/glx/xlib/xm_api.c b/src/gallium/frontends/glx/xlib/xm_api.c
index e0e0b2d992c..c3ce20faba4 100644
--- a/src/gallium/frontends/glx/xlib/xm_api.c
+++ b/src/gallium/frontends/glx/xlib/xm_api.c
@@ -1506,7 +1506,7 @@ XMesaBindTexImage(Display *dpy, XMesaBuffer drawable, int buffer,
map = pipe_transfer_map(pipe, res,
0, 0, /* level, layer */
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
x, y,
w, h, &tex_xfer);
if (!map)
diff --git a/src/gallium/frontends/nine/buffer9.c b/src/gallium/frontends/nine/buffer9.c
index 81baebcd8bc..6c2bc8895da 100644
--- a/src/gallium/frontends/nine/buffer9.c
+++ b/src/gallium/frontends/nine/buffer9.c
@@ -279,18 +279,18 @@ NineBuffer9_Lock( struct NineBuffer9 *This,
Flags &= ~(D3DLOCK_DISCARD | D3DLOCK_NOOVERWRITE);
if (Flags & D3DLOCK_DISCARD)
- usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ usage = PIPE_MAP_WRITE | PIPE_MAP_DISCARD_WHOLE_RESOURCE;
else if (Flags & D3DLOCK_NOOVERWRITE)
- usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage = PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED;
else
/* Do not ask for READ if writeonly and default pool (should be safe enough,
* as the doc says app shouldn't expect reading to work with writeonly).
* Ignore for Systemmem as it has special behaviours. */
usage = ((This->base.usage & D3DUSAGE_WRITEONLY) && This->base.pool == D3DPOOL_DEFAULT) ?
- PIPE_TRANSFER_WRITE :
- PIPE_TRANSFER_READ_WRITE;
+ PIPE_MAP_WRITE :
+ PIPE_MAP_READ_WRITE;
if (Flags & D3DLOCK_DONOTWAIT && !(This->base.usage & D3DUSAGE_DYNAMIC))
- usage |= PIPE_TRANSFER_DONTBLOCK;
+ usage |= PIPE_MAP_DONTBLOCK;
This->discard_nooverwrite_only &= !!(Flags & (D3DLOCK_DISCARD | D3DLOCK_NOOVERWRITE));
@@ -390,7 +390,7 @@ NineBuffer9_Lock( struct NineBuffer9 *This,
/* Use the new resource */
pipe_resource_reference(&This->base.resource, new_res);
pipe_resource_reference(&new_res, NULL);
- usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage = PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED;
NineBuffer9_RebindIfRequired(This, device);
This->maps[This->nmaps].is_pipe_secondary = TRUE;
}
diff --git a/src/gallium/frontends/nine/device9.c b/src/gallium/frontends/nine/device9.c
index b24d9b698b9..36f0c8f1266 100644
--- a/src/gallium/frontends/nine/device9.c
+++ b/src/gallium/frontends/nine/device9.c
@@ -352,8 +352,8 @@ NineDevice9_ctor( struct NineDevice9 *This,
u_box_1d(0, 16, &box);
data = This->context.pipe->transfer_map(This->context.pipe, This->dummy_vbo, 0,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE,
&box, &transfer);
assert(data);
assert(transfer);
@@ -787,8 +787,8 @@ NineDevice9_SetCursorProperties( struct NineDevice9 *This,
u_box_origin_2d(This->cursor.w, This->cursor.h, &box);
ptr = pipe->transfer_map(pipe, This->cursor.image, 0,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE,
&box, &transfer);
if (!ptr)
ret_err("Failed to update cursor image.\n", D3DERR_DRIVERINTERNALERROR);
@@ -3192,7 +3192,7 @@ NineDevice9_ProcessVertices( struct NineDevice9 *This,
pipe_sw->stream_output_target_destroy(pipe_sw, target);
u_box_1d(0, VertexCount * so.stride[0] * 4, &box);
- map = pipe_sw->transfer_map(pipe_sw, resource, 0, PIPE_TRANSFER_READ, &box,
+ map = pipe_sw->transfer_map(pipe_sw, resource, 0, PIPE_MAP_READ, &box,
&transfer);
if (!map) {
hr = D3DERR_DRIVERINTERNALERROR;
diff --git a/src/gallium/frontends/nine/nine_buffer_upload.c b/src/gallium/frontends/nine/nine_buffer_upload.c
index a71f0195a2a..0fbaa754de6 100644
--- a/src/gallium/frontends/nine/nine_buffer_upload.c
+++ b/src/gallium/frontends/nine/nine_buffer_upload.c
@@ -96,9 +96,9 @@ nine_upload_create_buffer_group(struct nine_buffer_upload *upload,
group->map = pipe_buffer_map_range(upload->pipe, group->resource,
0, upload->buffers_size,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_PERSISTENT |
- PIPE_TRANSFER_COHERENT,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_PERSISTENT |
+ PIPE_MAP_COHERENT,
&group->transfer);
if (group->map == NULL) {
group->transfer = NULL;
@@ -225,9 +225,9 @@ nine_upload_create_buffer(struct nine_buffer_upload *upload,
buf->map = pipe_buffer_map_range(upload->pipe, buf->resource,
0, buffer_size,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_PERSISTENT |
- PIPE_TRANSFER_COHERENT,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_PERSISTENT |
+ PIPE_MAP_COHERENT,
&buf->transfer);
if (buf->map == NULL) {
pipe_resource_reference(&buf->resource, NULL);
diff --git a/src/gallium/frontends/nine/nine_state.c b/src/gallium/frontends/nine/nine_state.c
index 90de3dd80f3..9dae199d5f8 100644
--- a/src/gallium/frontends/nine/nine_state.c
+++ b/src/gallium/frontends/nine/nine_state.c
@@ -2531,7 +2531,7 @@ CSMT_ITEM_NO_WAIT_WITH_COUNTER(nine_context_box_upload,
map = pipe->transfer_map(pipe,
res,
level,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
dst_box, &transfer);
if (!map)
return;
@@ -3077,7 +3077,7 @@ update_vertex_buffers_sw(struct NineDevice9 *device, int start_vertice, int num_
u_box_1d(vtxbuf.buffer_offset + offset + start_vertice * vtxbuf.stride,
num_vertices * vtxbuf.stride, &box);
- userbuf = pipe->transfer_map(pipe, buf, 0, PIPE_TRANSFER_READ, &box,
+ userbuf = pipe->transfer_map(pipe, buf, 0, PIPE_MAP_READ, &box,
&(sw_internal->transfers_so[i]));
vtxbuf.is_user_buffer = true;
vtxbuf.buffer.user = userbuf;
diff --git a/src/gallium/frontends/nine/surface9.c b/src/gallium/frontends/nine/surface9.c
index db74de2823a..6e786e28ad0 100644
--- a/src/gallium/frontends/nine/surface9.c
+++ b/src/gallium/frontends/nine/surface9.c
@@ -463,13 +463,13 @@ NineSurface9_LockRect( struct NineSurface9 *This,
}
if (Flags & D3DLOCK_DISCARD) {
- usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE;
+ usage = PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE;
} else {
usage = (Flags & D3DLOCK_READONLY) ?
- PIPE_TRANSFER_READ : PIPE_TRANSFER_READ_WRITE;
+ PIPE_MAP_READ : PIPE_MAP_READ_WRITE;
}
if (Flags & D3DLOCK_DONOTWAIT)
- usage |= PIPE_TRANSFER_DONTBLOCK;
+ usage |= PIPE_MAP_DONTBLOCK;
if (pRect) {
/* Windows XP accepts invalid locking rectangles, Windows 7 rejects
@@ -728,7 +728,7 @@ NineSurface9_CopyDefaultToMem( struct NineSurface9 *This,
pipe = NineDevice9_GetPipe(This->base.base.device);
p_src = pipe->transfer_map(pipe, r_src, From->level,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&src_box, &transfer);
p_dst = This->data;
diff --git a/src/gallium/frontends/nine/volume9.c b/src/gallium/frontends/nine/volume9.c
index 4a3d0c88f10..e1c16ff77f3 100644
--- a/src/gallium/frontends/nine/volume9.c
+++ b/src/gallium/frontends/nine/volume9.c
@@ -290,13 +290,13 @@ NineVolume9_LockBox( struct NineVolume9 *This,
}
if (Flags & D3DLOCK_DISCARD) {
- usage = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE;
+ usage = PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE;
} else {
usage = (Flags & D3DLOCK_READONLY) ?
- PIPE_TRANSFER_READ : PIPE_TRANSFER_READ_WRITE;
+ PIPE_MAP_READ : PIPE_MAP_READ_WRITE;
}
if (Flags & D3DLOCK_DONOTWAIT)
- usage |= PIPE_TRANSFER_DONTBLOCK;
+ usage |= PIPE_MAP_DONTBLOCK;
if (pBox) {
user_assert(pBox->Right > pBox->Left, D3DERR_INVALIDCALL);
diff --git a/src/gallium/frontends/omx/bellagio/vid_enc.c b/src/gallium/frontends/omx/bellagio/vid_enc.c
index 7f4a673d3cd..7ace68052e3 100644
--- a/src/gallium/frontends/omx/bellagio/vid_enc.c
+++ b/src/gallium/frontends/omx/bellagio/vid_enc.c
@@ -311,7 +311,7 @@ static OMX_ERRORTYPE enc_AllocateBackTexture(omx_base_PortType *port,
box.width = (*resource)->width0;
box.height = (*resource)->height0;
box.depth = (*resource)->depth0;
- ptr = priv->s_pipe->transfer_map(priv->s_pipe, *resource, 0, PIPE_TRANSFER_WRITE, &box, transfer);
+ ptr = priv->s_pipe->transfer_map(priv->s_pipe, *resource, 0, PIPE_MAP_WRITE, &box, transfer);
if (map)
*map = ptr;
diff --git a/src/gallium/frontends/omx/tizonia/h264einport.c b/src/gallium/frontends/omx/tizonia/h264einport.c
index 609a1782b6b..7c3719f38ae 100644
--- a/src/gallium/frontends/omx/tizonia/h264einport.c
+++ b/src/gallium/frontends/omx/tizonia/h264einport.c
@@ -69,7 +69,7 @@ static OMX_ERRORTYPE enc_AllocateBackTexture(OMX_HANDLETYPE ap_hdl,
box.width = (*resource)->width0;
box.height = (*resource)->height0;
box.depth = (*resource)->depth0;
- ptr = priv->s_pipe->transfer_map(priv->s_pipe, *resource, 0, PIPE_TRANSFER_WRITE, &box, transfer);
+ ptr = priv->s_pipe->transfer_map(priv->s_pipe, *resource, 0, PIPE_MAP_WRITE, &box, transfer);
if (map)
*map = ptr;
diff --git a/src/gallium/frontends/omx/vid_dec_common.c b/src/gallium/frontends/omx/vid_dec_common.c
index 5ca544f8386..0a59aad1ea7 100644
--- a/src/gallium/frontends/omx/vid_dec_common.c
+++ b/src/gallium/frontends/omx/vid_dec_common.c
@@ -139,7 +139,7 @@ void vid_dec_FillOutput(vid_dec_PrivateType *priv, struct pipe_video_buffer *buf
struct pipe_transfer *transfer;
uint8_t *map, *dst;
map = priv->pipe->transfer_map(priv->pipe, views[i]->texture, 0,
- PIPE_TRANSFER_READ, &box, &transfer);
+ PIPE_MAP_READ, &box, &transfer);
if (!map)
return;
diff --git a/src/gallium/frontends/omx/vid_enc_common.c b/src/gallium/frontends/omx/vid_enc_common.c
index 1cc0c130e5d..a3b274606cf 100644
--- a/src/gallium/frontends/omx/vid_enc_common.c
+++ b/src/gallium/frontends/omx/vid_enc_common.c
@@ -157,7 +157,7 @@ void vid_enc_BufferEncoded_common(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
box.depth = outp->bitstream->depth0;
output->pBuffer = priv->t_pipe->transfer_map(priv->t_pipe, outp->bitstream, 0,
- PIPE_TRANSFER_READ_WRITE,
+ PIPE_MAP_READ_WRITE,
&box, &outp->transfer);
/* ------------- get size of result ----------------- */
@@ -413,14 +413,14 @@ OMX_ERRORTYPE enc_LoadImage_common(vid_enc_PrivateType * priv, OMX_VIDEO_PORTDEF
box.height = def->nFrameHeight;
box.depth = 1;
pipe->texture_subdata(pipe, views[0]->texture, 0,
- PIPE_TRANSFER_WRITE, &box,
+ PIPE_MAP_WRITE, &box,
ptr, def->nStride, 0);
ptr = ((uint8_t*)buf->pBuffer) + (def->nStride * box.height);
box.width = def->nFrameWidth / 2;
box.height = def->nFrameHeight / 2;
box.depth = 1;
pipe->texture_subdata(pipe, views[1]->texture, 0,
- PIPE_TRANSFER_WRITE, &box,
+ PIPE_MAP_WRITE, &box,
ptr, def->nStride, 0);
} else {
struct vl_video_buffer *dst_buf = (struct vl_video_buffer *)vbuf;
@@ -546,7 +546,7 @@ OMX_ERRORTYPE enc_LoadImage_common(vid_enc_PrivateType * priv, OMX_VIDEO_PORTDEF
box.height = inp->resource->height0;
box.depth = inp->resource->depth0;
buf->pBuffer = pipe->transfer_map(pipe, inp->resource, 0,
- PIPE_TRANSFER_WRITE, &box,
+ PIPE_MAP_WRITE, &box,
&inp->transfer);
}
diff --git a/src/gallium/frontends/osmesa/osmesa.c b/src/gallium/frontends/osmesa/osmesa.c
index 23537c3602e..42cc8256027 100644
--- a/src/gallium/frontends/osmesa/osmesa.c
+++ b/src/gallium/frontends/osmesa/osmesa.c
@@ -349,7 +349,7 @@ osmesa_st_framebuffer_flush_front(struct st_context_iface *stctx,
u_box_2d(0, 0, res->width0, res->height0, &box);
- map = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box,
+ map = pipe->transfer_map(pipe, res, 0, PIPE_MAP_READ, &box,
&transfer);
/*
@@ -930,7 +930,7 @@ OSMesaGetDepthBuffer(OSMesaContext c, GLint *width, GLint *height,
u_box_2d(0, 0, res->width0, res->height0, &box);
- *buffer = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box,
+ *buffer = pipe->transfer_map(pipe, res, 0, PIPE_MAP_READ, &box,
&transfer);
if (!*buffer) {
return GL_FALSE;
diff --git a/src/gallium/frontends/va/buffer.c b/src/gallium/frontends/va/buffer.c
index d1c00f4b8e8..ffc61a5e51b 100644
--- a/src/gallium/frontends/va/buffer.c
+++ b/src/gallium/frontends/va/buffer.c
@@ -132,7 +132,7 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
box.width = resource->width0;
box.height = resource->height0;
box.depth = resource->depth0;
- *pbuff = drv->pipe->transfer_map(drv->pipe, resource, 0, PIPE_TRANSFER_WRITE,
+ *pbuff = drv->pipe->transfer_map(drv->pipe, resource, 0, PIPE_MAP_WRITE,
&box, &buf->derived_surface.transfer);
mtx_unlock(&drv->mutex);
diff --git a/src/gallium/frontends/va/image.c b/src/gallium/frontends/va/image.c
index 8c0f03eabfb..667b6293ec5 100644
--- a/src/gallium/frontends/va/image.c
+++ b/src/gallium/frontends/va/image.c
@@ -528,7 +528,7 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
struct pipe_transfer *transfer;
uint8_t *map;
map = drv->pipe->transfer_map(drv->pipe, views[i]->texture, 0,
- PIPE_TRANSFER_READ, &box, &transfer);
+ PIPE_MAP_READ, &box, &transfer);
if (!map) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_OPERATION_FAILED;
@@ -665,8 +665,8 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
map = drv->pipe->transfer_map(drv->pipe,
tex,
0,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD_RANGE,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_RANGE,
&dst_box, &transfer);
if (map == NULL) {
mtx_unlock(&drv->mutex);
@@ -679,7 +679,7 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
pipe_transfer_unmap(drv->pipe, transfer);
} else {
drv->pipe->texture_subdata(drv->pipe, tex, 0,
- PIPE_TRANSFER_WRITE, &dst_box,
+ PIPE_MAP_WRITE, &dst_box,
data[i] + pitches[i] * j,
pitches[i] * views[i]->texture->array_size, 0);
}
diff --git a/src/gallium/frontends/va/surface.c b/src/gallium/frontends/va/surface.c
index 964d316bc46..368b1cbcfc7 100644
--- a/src/gallium/frontends/va/surface.c
+++ b/src/gallium/frontends/va/surface.c
@@ -200,7 +200,7 @@ upload_sampler(struct pipe_context *pipe, struct pipe_sampler_view *dst,
struct pipe_transfer *transfer;
void *map;
- map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_MAP_WRITE,
dst_box, &transfer);
if (!map)
return;
diff --git a/src/gallium/frontends/vallium/val_execute.c b/src/gallium/frontends/vallium/val_execute.c
index 2415c010b74..d7f3cad96b1 100644
--- a/src/gallium/frontends/vallium/val_execute.c
+++ b/src/gallium/frontends/vallium/val_execute.c
@@ -1522,7 +1522,7 @@ static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry *cmd,
src_data = state->pctx->transfer_map(state->pctx,
copycmd->src->bo,
copycmd->regions[i].imageSubresource.mipLevel,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&box,
&src_t);
@@ -1535,7 +1535,7 @@ static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry *cmd,
dst_data = state->pctx->transfer_map(state->pctx,
copycmd->dst->bo,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&dbox,
&dst_t);
@@ -1600,7 +1600,7 @@ static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry *cmd,
src_data = state->pctx->transfer_map(state->pctx,
copycmd->src->bo,
0,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&sbox,
&src_t);
@@ -1615,7 +1615,7 @@ static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry *cmd,
dst_data = state->pctx->transfer_map(state->pctx,
copycmd->dst->bo,
copycmd->regions[i].imageSubresource.mipLevel,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
&dst_t);
@@ -1789,7 +1789,7 @@ static void handle_fill_buffer(struct val_cmd_buffer_entry *cmd,
dst = state->pctx->transfer_map(state->pctx,
fillcmd->buffer->bo,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
&dst_t);
@@ -1810,7 +1810,7 @@ static void handle_update_buffer(struct val_cmd_buffer_entry *cmd,
dst = state->pctx->transfer_map(state->pctx,
updcmd->buffer->bo,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
&dst_t);
@@ -2061,7 +2061,7 @@ static void handle_copy_query_pool_results(struct val_cmd_buffer_entry *cmd,
box.height = 1;
box.depth = 1;
map = state->pctx->transfer_map(state->pctx,
- copycmd->dst->bo, 0, PIPE_TRANSFER_READ, &box,
+ copycmd->dst->bo, 0, PIPE_MAP_READ, &box,
&src_t);
memset(map, 0, box.width);
diff --git a/src/gallium/frontends/vdpau/bitmap.c b/src/gallium/frontends/vdpau/bitmap.c
index 643be75e618..027cfcbd368 100644
--- a/src/gallium/frontends/vdpau/bitmap.c
+++ b/src/gallium/frontends/vdpau/bitmap.c
@@ -200,7 +200,7 @@ vlVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface,
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
- PIPE_TRANSFER_WRITE, &dst_box, *source_data,
+ PIPE_MAP_WRITE, &dst_box, *source_data,
*source_pitches, 0);
mtx_unlock(&vlsurface->device->mutex);
diff --git a/src/gallium/frontends/vdpau/output.c b/src/gallium/frontends/vdpau/output.c
index 75a9f016e90..fa902705109 100644
--- a/src/gallium/frontends/vdpau/output.c
+++ b/src/gallium/frontends/vdpau/output.c
@@ -221,7 +221,7 @@ vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
res = vlsurface->sampler_view->texture;
box = RectToPipeBox(source_rect, res);
- map = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box, &transfer);
+ map = pipe->transfer_map(pipe, res, 0, PIPE_MAP_READ, &box, &transfer);
if (!map) {
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
@@ -272,7 +272,7 @@ vlVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
}
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
- PIPE_TRANSFER_WRITE, &dst_box, *source_data,
+ PIPE_MAP_WRITE, &dst_box, *source_data,
*source_pitches, 0);
mtx_unlock(&vlsurface->device->mutex);
@@ -359,7 +359,7 @@ vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
box.height = res->height0;
box.depth = res->depth0;
- context->texture_subdata(context, res, 0, PIPE_TRANSFER_WRITE, &box,
+ context->texture_subdata(context, res, 0, PIPE_MAP_WRITE, &box,
source_data[0], source_pitch[0],
source_pitch[0] * res->height0);
@@ -392,7 +392,7 @@ vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
box.height = res->height0;
box.depth = res->depth0;
- context->texture_subdata(context, res, 0, PIPE_TRANSFER_WRITE, &box, color_table,
+ context->texture_subdata(context, res, 0, PIPE_MAP_WRITE, &box, color_table,
util_format_get_stride(colortbl_format, res->width0), 0);
memset(&sv_tmpl, 0, sizeof(sv_tmpl));
@@ -496,7 +496,7 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
sv->texture->width0, sv->texture->height0, 1
};
- pipe->texture_subdata(pipe, sv->texture, 0, PIPE_TRANSFER_WRITE, &dst_box,
+ pipe->texture_subdata(pipe, sv->texture, 0, PIPE_MAP_WRITE, &dst_box,
source_data[i], source_pitches[i], 0);
}
diff --git a/src/gallium/frontends/vdpau/surface.c b/src/gallium/frontends/vdpau/surface.c
index 799c2cb1f1b..0171001f6e5 100644
--- a/src/gallium/frontends/vdpau/surface.c
+++ b/src/gallium/frontends/vdpau/surface.c
@@ -261,7 +261,7 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
uint8_t *map;
map = pipe->transfer_map(pipe, sv->texture, 0,
- PIPE_TRANSFER_READ, &box, &transfer);
+ PIPE_MAP_READ, &box, &transfer);
if (!map) {
mtx_unlock(&vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
@@ -308,7 +308,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
struct pipe_context *pipe;
struct pipe_sampler_view **sampler_views;
unsigned i, j;
- unsigned usage = PIPE_TRANSFER_WRITE;
+ unsigned usage = PIPE_MAP_WRITE;
vlVdpSurface *p_surf = vlGetDataHTAB(surface);
if (!p_surf)
@@ -414,7 +414,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
pipe_transfer_unmap(pipe, transfer);
} else {
pipe->texture_subdata(pipe, tex, 0,
- PIPE_TRANSFER_WRITE, &dst_box,
+ PIPE_MAP_WRITE, &dst_box,
source_data[i] + source_pitches[i] * j,
source_pitches[i] * tex->array_size,
0);
@@ -423,7 +423,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
* This surface has already been synced
* by the first map.
*/
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ usage |= PIPE_MAP_UNSYNCHRONIZED;
}
}
mtx_unlock(&p_surf->device->mutex);
diff --git a/src/gallium/frontends/xa/xa_context.c b/src/gallium/frontends/xa/xa_context.c
index a4630cf09ca..3e979606560 100644
--- a/src/gallium/frontends/xa/xa_context.c
+++ b/src/gallium/frontends/xa/xa_context.c
@@ -107,8 +107,8 @@ xa_surface_dma(struct xa_context *ctx,
enum pipe_transfer_usage transfer_direction;
struct pipe_context *pipe = ctx->pipe;
- transfer_direction = (to_surface ? PIPE_TRANSFER_WRITE :
- PIPE_TRANSFER_READ);
+ transfer_direction = (to_surface ? PIPE_MAP_WRITE :
+ PIPE_MAP_READ);
for (i = 0; i < num_boxes; ++i, ++boxes) {
w = boxes->x2 - boxes->x1;
@@ -148,19 +148,19 @@ xa_surface_map(struct xa_context *ctx,
return NULL;
if (usage & XA_MAP_READ)
- gallium_usage |= PIPE_TRANSFER_READ;
+ gallium_usage |= PIPE_MAP_READ;
if (usage & XA_MAP_WRITE)
- gallium_usage |= PIPE_TRANSFER_WRITE;
+ gallium_usage |= PIPE_MAP_WRITE;
if (usage & XA_MAP_MAP_DIRECTLY)
- gallium_usage |= PIPE_TRANSFER_MAP_DIRECTLY;
+ gallium_usage |= PIPE_MAP_DIRECTLY;
if (usage & XA_MAP_UNSYNCHRONIZED)
- gallium_usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ gallium_usage |= PIPE_MAP_UNSYNCHRONIZED;
if (usage & XA_MAP_DONTBLOCK)
- gallium_usage |= PIPE_TRANSFER_DONTBLOCK;
+ gallium_usage |= PIPE_MAP_DONTBLOCK;
if (usage & XA_MAP_DISCARD_WHOLE_RESOURCE)
- gallium_usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ gallium_usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
- if (!(gallium_usage & (PIPE_TRANSFER_READ_WRITE)))
+ if (!(gallium_usage & (PIPE_MAP_READ_WRITE)))
return NULL;
map = pipe_transfer_map(pipe, srf->tex, 0, 0,
diff --git a/src/gallium/frontends/xvmc/subpicture.c b/src/gallium/frontends/xvmc/subpicture.c
index 42eefe74e69..aa6253bed5e 100644
--- a/src/gallium/frontends/xvmc/subpicture.c
+++ b/src/gallium/frontends/xvmc/subpicture.c
@@ -210,7 +210,7 @@ upload_sampler(struct pipe_context *pipe, struct pipe_sampler_view *dst,
struct pipe_transfer *transfer;
void *map;
- map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_MAP_WRITE,
dst_box, &transfer);
if (!map)
return;
@@ -231,7 +231,7 @@ upload_sampler_convert(struct pipe_context *pipe, struct pipe_sampler_view *dst,
int i, j;
char *map, *src;
- map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_MAP_WRITE,
dst_box, &transfer);
if (!map)
return;
@@ -393,7 +393,7 @@ Status XvMCClearSubpicture(Display *dpy, XvMCSubpicture *subpicture, short x, sh
dst = subpicture_priv->sampler;
/* TODO: Assert clear rect is within bounds? Or clip? */
- map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, dst->texture, 0, PIPE_MAP_WRITE,
&dst_box, &transfer);
if (!map)
return XvMCBadSubpicture;
diff --git a/src/gallium/include/frontend/sw_winsys.h b/src/gallium/include/frontend/sw_winsys.h
index 86eca9defca..d412cc8d4af 100644
--- a/src/gallium/include/frontend/sw_winsys.h
+++ b/src/gallium/include/frontend/sw_winsys.h
@@ -109,7 +109,7 @@ struct sw_winsys
struct winsys_handle *whandle );
/**
- * \param flags bitmask of PIPE_TRANSFER_x flags
+ * \param flags bitmask of PIPE_MAP_x flags
*/
void *
(*displaytarget_map)( struct sw_winsys *ws,
diff --git a/src/gallium/include/pipe/p_context.h b/src/gallium/include/pipe/p_context.h
index f17bf2d3fc8..9553636ac02 100644
--- a/src/gallium/include/pipe/p_context.h
+++ b/src/gallium/include/pipe/p_context.h
@@ -674,7 +674,7 @@ struct pipe_context {
void *(*transfer_map)(struct pipe_context *,
struct pipe_resource *resource,
unsigned level,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ unsigned usage, /* a combination of PIPE_MAP_x */
const struct pipe_box *,
struct pipe_transfer **out_transfer);
@@ -694,7 +694,7 @@ struct pipe_context {
*/
void (*buffer_subdata)(struct pipe_context *,
struct pipe_resource *,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ unsigned usage, /* a combination of PIPE_MAP_x */
unsigned offset,
unsigned size,
const void *data);
@@ -702,7 +702,7 @@ struct pipe_context {
void (*texture_subdata)(struct pipe_context *,
struct pipe_resource *,
unsigned level,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ unsigned usage, /* a combination of PIPE_MAP_x */
const struct pipe_box *,
const void *data,
unsigned stride,
diff --git a/src/gallium/include/pipe/p_defines.h b/src/gallium/include/pipe/p_defines.h
index 83910d2a913..bd82f64ec10 100644
--- a/src/gallium/include/pipe/p_defines.h
+++ b/src/gallium/include/pipe/p_defines.h
@@ -240,18 +240,18 @@ enum pipe_transfer_usage
* Resource contents read back (or accessed directly) at transfer
* create time.
*/
- PIPE_TRANSFER_READ = (1 << 0),
+ PIPE_MAP_READ = (1 << 0),
/**
* Resource contents will be written back at transfer_unmap
* time (or modified as a result of being accessed directly).
*/
- PIPE_TRANSFER_WRITE = (1 << 1),
+ PIPE_MAP_WRITE = (1 << 1),
/**
* Read/modify/write
*/
- PIPE_TRANSFER_READ_WRITE = PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE,
+ PIPE_MAP_READ_WRITE = PIPE_MAP_READ | PIPE_MAP_WRITE,
/**
* The transfer should map the texture storage directly. The driver may
@@ -264,17 +264,17 @@ enum pipe_transfer_usage
*
* This flag supresses implicit "DISCARD" for buffer_subdata.
*/
- PIPE_TRANSFER_MAP_DIRECTLY = (1 << 2),
+ PIPE_MAP_DIRECTLY = (1 << 2),
/**
* Discards the memory within the mapped region.
*
- * It should not be used with PIPE_TRANSFER_READ.
+ * It should not be used with PIPE_MAP_READ.
*
* See also:
* - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_RANGE_BIT flag.
*/
- PIPE_TRANSFER_DISCARD_RANGE = (1 << 8),
+ PIPE_MAP_DISCARD_RANGE = (1 << 8),
/**
* Fail if the resource cannot be mapped immediately.
@@ -284,36 +284,36 @@ enum pipe_transfer_usage
* - Mesa's MESA_MAP_NOWAIT_BIT flag.
* - WDDM's D3DDDICB_LOCKFLAGS.DonotWait flag.
*/
- PIPE_TRANSFER_DONTBLOCK = (1 << 9),
+ PIPE_MAP_DONTBLOCK = (1 << 9),
/**
* Do not attempt to synchronize pending operations on the resource when mapping.
*
- * It should not be used with PIPE_TRANSFER_READ.
+ * It should not be used with PIPE_MAP_READ.
*
* See also:
* - OpenGL's ARB_map_buffer_range extension, MAP_UNSYNCHRONIZED_BIT flag.
* - Direct3D's D3DLOCK_NOOVERWRITE flag.
* - WDDM's D3DDDICB_LOCKFLAGS.IgnoreSync flag.
*/
- PIPE_TRANSFER_UNSYNCHRONIZED = (1 << 10),
+ PIPE_MAP_UNSYNCHRONIZED = (1 << 10),
/**
* Written ranges will be notified later with
* pipe_context::transfer_flush_region.
*
- * It should not be used with PIPE_TRANSFER_READ.
+ * It should not be used with PIPE_MAP_READ.
*
* See also:
* - pipe_context::transfer_flush_region
* - OpenGL's ARB_map_buffer_range extension, MAP_FLUSH_EXPLICIT_BIT flag.
*/
- PIPE_TRANSFER_FLUSH_EXPLICIT = (1 << 11),
+ PIPE_MAP_FLUSH_EXPLICIT = (1 << 11),
/**
* Discards all memory backing the resource.
*
- * It should not be used with PIPE_TRANSFER_READ.
+ * It should not be used with PIPE_MAP_READ.
*
* This is equivalent to:
* - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_BUFFER_BIT
@@ -323,7 +323,7 @@ enum pipe_transfer_usage
* - D3D10 DDI's D3D10_DDI_MAP_WRITE_DISCARD flag
* - D3D10's D3D10_MAP_WRITE_DISCARD flag.
*/
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE = (1 << 12),
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE = (1 << 12),
/**
* Allows the resource to be used for rendering while mapped.
@@ -334,7 +334,7 @@ enum pipe_transfer_usage
* If COHERENT is not set, memory_barrier(PIPE_BARRIER_MAPPED_BUFFER)
* must be called to ensure the device can see what the CPU has written.
*/
- PIPE_TRANSFER_PERSISTENT = (1 << 13),
+ PIPE_MAP_PERSISTENT = (1 << 13),
/**
* If PERSISTENT is set, this ensures any writes done by the device are
@@ -343,30 +343,30 @@ enum pipe_transfer_usage
* PIPE_RESOURCE_FLAG_MAP_COHERENT must be set when creating
* the resource.
*/
- PIPE_TRANSFER_COHERENT = (1 << 14),
+ PIPE_MAP_COHERENT = (1 << 14),
/**
* Map a resource in a thread-safe manner, because the calling thread can
* be any thread. It can only be used if both WRITE and UNSYNCHRONIZED are
* set.
*/
- PIPE_TRANSFER_THREAD_SAFE = 1 << 15,
+ PIPE_MAP_THREAD_SAFE = 1 << 15,
/**
* Map only the depth aspect of a resource
*/
- PIPE_TRANSFER_DEPTH_ONLY = 1 << 16,
+ PIPE_MAP_DEPTH_ONLY = 1 << 16,
/**
* Map only the stencil aspect of a resource
*/
- PIPE_TRANSFER_STENCIL_ONLY = 1 << 17,
+ PIPE_MAP_STENCIL_ONLY = 1 << 17,
/**
* This and higher bits are reserved for private use by drivers. Drivers
- * should use this as (PIPE_TRANSFER_DRV_PRV << i).
+ * should use this as (PIPE_MAP_DRV_PRV << i).
*/
- PIPE_TRANSFER_DRV_PRV = (1 << 24)
+ PIPE_MAP_DRV_PRV = (1 << 24)
};
/**
diff --git a/src/gallium/tests/graw/fs-test.c b/src/gallium/tests/graw/fs-test.c
index 8c40ecd705d..6eca0d54967 100644
--- a/src/gallium/tests/graw/fs-test.c
+++ b/src/gallium/tests/graw/fs-test.c
@@ -314,7 +314,7 @@ static void init_tex( void )
ctx->texture_subdata(ctx,
samptex,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
tex2d,
sizeof tex2d[0],
@@ -328,7 +328,7 @@ static void init_tex( void )
uint32_t *ptr;
ptr = pipe_transfer_map(ctx, samptex,
0, 0, /* level, layer */
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
0, 0, SIZE, SIZE, &t); /* x, y, width, height */
if (memcmp(ptr, tex2d, sizeof tex2d) != 0) {
diff --git a/src/gallium/tests/graw/graw_util.h b/src/gallium/tests/graw/graw_util.h
index bb5d53d24bf..45c11659d0d 100644
--- a/src/gallium/tests/graw/graw_util.h
+++ b/src/gallium/tests/graw/graw_util.h
@@ -244,7 +244,7 @@ graw_util_create_tex2d(const struct graw_info *info,
info->ctx->texture_subdata(info->ctx,
tex,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
data,
row_stride,
@@ -258,7 +258,7 @@ graw_util_create_tex2d(const struct graw_info *info,
uint32_t *ptr;
t = pipe_transfer_map(info->ctx, samptex,
0, 0, /* level, layer */
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
0, 0, SIZE, SIZE); /* x, y, width, height */
ptr = info->ctx->transfer_map(info->ctx, t);
diff --git a/src/gallium/tests/graw/gs-test.c b/src/gallium/tests/graw/gs-test.c
index 34f1decdd66..39d3737524d 100644
--- a/src/gallium/tests/graw/gs-test.c
+++ b/src/gallium/tests/graw/gs-test.c
@@ -169,7 +169,7 @@ static void init_fs_constbuf( void )
{
ctx->buffer_subdata(ctx, constbuf1,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
0, sizeof(constants1), constants1);
pipe_set_constant_buffer(ctx,
@@ -178,7 +178,7 @@ static void init_fs_constbuf( void )
}
{
ctx->buffer_subdata(ctx, constbuf2,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
0, sizeof(constants2), constants2);
pipe_set_constant_buffer(ctx,
@@ -404,7 +404,7 @@ static void init_tex( void )
ctx->texture_subdata(ctx,
samptex,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
tex2d,
sizeof tex2d[0],
@@ -418,7 +418,7 @@ static void init_tex( void )
uint32_t *ptr;
ptr = pipe_transfer_map(ctx, samptex,
0, 0, /* level, layer */
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
0, 0, SIZE, SIZE, &t); /* x, y, width, height */
if (memcmp(ptr, tex2d, sizeof tex2d) != 0) {
diff --git a/src/gallium/tests/graw/quad-sample.c b/src/gallium/tests/graw/quad-sample.c
index 160c8db5301..1164a2b2bbf 100644
--- a/src/gallium/tests/graw/quad-sample.c
+++ b/src/gallium/tests/graw/quad-sample.c
@@ -229,7 +229,7 @@ static void init_tex( void )
ctx->texture_subdata(ctx,
samptex,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
tex2d,
sizeof tex2d[0],
@@ -243,7 +243,7 @@ static void init_tex( void )
uint32_t *ptr;
ptr = pipe_transfer_map(ctx, samptex,
0, 0, /* level, layer */
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
0, 0, SIZE, SIZE, &t); /* x, y, width, height */
if (memcmp(ptr, tex2d, sizeof tex2d) != 0) {
diff --git a/src/gallium/tests/graw/vs-test.c b/src/gallium/tests/graw/vs-test.c
index a48e83f6923..e634154dd1c 100644
--- a/src/gallium/tests/graw/vs-test.c
+++ b/src/gallium/tests/graw/vs-test.c
@@ -101,7 +101,7 @@ static void init_fs_constbuf( void )
u_box_2d(0,0,sizeof(constants),1, &box);
ctx->buffer_subdata(ctx, constbuf,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
0, sizeof(constants), constants);
pipe_set_constant_buffer(ctx,
@@ -302,7 +302,7 @@ static void init_tex( void )
ctx->texture_subdata(ctx,
samptex,
0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
&box,
tex2d,
sizeof tex2d[0],
@@ -316,7 +316,7 @@ static void init_tex( void )
uint32_t *ptr;
ptr = pipe_transfer_map(ctx, samptex,
0, 0, /* level, layer */
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
0, 0, SIZE, SIZE, &t); /* x, y, width, height */
if (memcmp(ptr, tex2d, sizeof tex2d) != 0) {
diff --git a/src/gallium/tests/trivial/compute.c b/src/gallium/tests/trivial/compute.c
index 6f4fe36a536..94599e92288 100644
--- a/src/gallium/tests/trivial/compute.c
+++ b/src/gallium/tests/trivial/compute.c
@@ -204,7 +204,7 @@ static void init_tex(struct context *ctx, int slot,
*tex = ctx->screen->resource_create(ctx->screen, &ttex);
assert(*tex);
- map = pipe->transfer_map(pipe, *tex, 0, PIPE_TRANSFER_WRITE,
+ map = pipe->transfer_map(pipe, *tex, 0, PIPE_MAP_WRITE,
&(struct pipe_box) { .width = w,
.height = h,
.depth = 1 }, &xfer);
@@ -246,7 +246,7 @@ static void check_tex(struct context *ctx, int slot,
if (!check)
check = default_check;
- map = pipe->transfer_map(pipe, tex, 0, PIPE_TRANSFER_READ,
+ map = pipe->transfer_map(pipe, tex, 0, PIPE_MAP_READ,
&(struct pipe_box) { .width = tex->width0,
.height = tex->height0,
.depth = 1 }, &xfer);
diff --git a/src/gallium/tests/trivial/quad-tex.c b/src/gallium/tests/trivial/quad-tex.c
index ba0a7ac8fb3..fe345a94ae7 100644
--- a/src/gallium/tests/trivial/quad-tex.c
+++ b/src/gallium/tests/trivial/quad-tex.c
@@ -177,7 +177,7 @@ static void init_prog(struct program *p)
box.height = 2;
box.depth = 1;
- ptr = p->pipe->transfer_map(p->pipe, p->tex, 0, PIPE_TRANSFER_WRITE, &box, &t);
+ ptr = p->pipe->transfer_map(p->pipe, p->tex, 0, PIPE_MAP_WRITE, &box, &t);
ptr[0] = 0xffff0000;
ptr[1] = 0xff0000ff;
ptr[2] = 0xff00ff00;
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index ebc4fed0e70..3984acc593c 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -279,10 +279,10 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
assert(!bo->sparse);
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
- if (!(usage & PIPE_TRANSFER_WRITE)) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
+ if (!(usage & PIPE_MAP_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
@@ -316,7 +316,7 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
} else {
uint64_t time = os_time_get_nano();
- if (!(usage & PIPE_TRANSFER_WRITE)) {
+ if (!(usage & PIPE_MAP_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index abfe4940541..59a24b97304 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -715,7 +715,7 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
if (!pb)
return false;
- mapped = amdgpu_bo_map(pb, NULL, PIPE_TRANSFER_WRITE);
+ mapped = amdgpu_bo_map(pb, NULL, PIPE_MAP_WRITE);
if (!mapped) {
pb_reference(&pb, NULL);
return false;
@@ -1054,7 +1054,7 @@ amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_i
return false;
map = (uint32_t*)amdgpu_bo_map(preamble_bo, NULL,
- PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ PIPE_MAP_WRITE | RADEON_TRANSFER_TEMPORARY);
if (!map) {
pb_reference(&preamble_bo, NULL);
return false;
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index f432b9bfe01..de0e1b61c8f 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -504,10 +504,10 @@ static void *radeon_bo_map(struct pb_buffer *buf,
struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
- if (usage & PIPE_TRANSFER_DONTBLOCK) {
- if (!(usage & PIPE_TRANSFER_WRITE)) {
+ if (usage & PIPE_MAP_DONTBLOCK) {
+ if (!(usage & PIPE_MAP_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
@@ -540,7 +540,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
} else {
uint64_t time = os_time_get_nano();
- if (!(usage & PIPE_TRANSFER_WRITE)) {
+ if (!(usage & PIPE_MAP_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
diff --git a/src/gallium/winsys/svga/drm/vmw_buffer.c b/src/gallium/winsys/svga/drm/vmw_buffer.c
index 03db92a6481..549727a5145 100644
--- a/src/gallium/winsys/svga/drm/vmw_buffer.c
+++ b/src/gallium/winsys/svga/drm/vmw_buffer.c
@@ -359,24 +359,24 @@ vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
void *map;
(void)sws;
- if (flags & PIPE_TRANSFER_UNSYNCHRONIZED)
- flags &= ~PIPE_TRANSFER_DONTBLOCK;
+ if (flags & PIPE_MAP_UNSYNCHRONIZED)
+ flags &= ~PIPE_MAP_DONTBLOCK;
- /* NOTE: we're passing PIPE_TRANSFER_x flags instead of
+ /* NOTE: we're passing PIPE_MAP_x flags instead of
* PB_USAGE_x flags here. We should probably fix that.
*/
STATIC_ASSERT((unsigned) PB_USAGE_CPU_READ ==
- (unsigned) PIPE_TRANSFER_READ);
+ (unsigned) PIPE_MAP_READ);
STATIC_ASSERT((unsigned) PB_USAGE_CPU_WRITE ==
- (unsigned) PIPE_TRANSFER_WRITE);
+ (unsigned) PIPE_MAP_WRITE);
STATIC_ASSERT((unsigned) PB_USAGE_GPU_READ ==
- (unsigned) PIPE_TRANSFER_MAP_DIRECTLY);
+ (unsigned) PIPE_MAP_DIRECTLY);
STATIC_ASSERT((unsigned) PB_USAGE_DONTBLOCK ==
- (unsigned) PIPE_TRANSFER_DONTBLOCK);
+ (unsigned) PIPE_MAP_DONTBLOCK);
STATIC_ASSERT((unsigned) PB_USAGE_UNSYNCHRONIZED ==
- (unsigned) PIPE_TRANSFER_UNSYNCHRONIZED);
+ (unsigned) PIPE_MAP_UNSYNCHRONIZED);
STATIC_ASSERT((unsigned) PB_USAGE_PERSISTENT ==
- (unsigned) PIPE_TRANSFER_PERSISTENT);
+ (unsigned) PIPE_MAP_PERSISTENT);
map = pb_map(vmw_pb_buffer(buf), flags & PB_USAGE_ALL, NULL);
diff --git a/src/gallium/winsys/svga/drm/vmw_query.c b/src/gallium/winsys/svga/drm/vmw_query.c
index 7baf2c1d7ae..6f761d7bf7f 100644
--- a/src/gallium/winsys/svga/drm/vmw_query.c
+++ b/src/gallium/winsys/svga/drm/vmw_query.c
@@ -82,7 +82,7 @@ vmw_svga_winsys_query_init(struct svga_winsys_screen *sws,
state = (SVGA3dQueryState *) vmw_svga_winsys_buffer_map(sws,
query->buf,
- PIPE_TRANSFER_WRITE);
+ PIPE_MAP_WRITE);
if (!state) {
debug_printf("Failed to map query result memory for initialization\n");
return -1;
@@ -110,7 +110,7 @@ vmw_svga_winsys_query_get_result(struct svga_winsys_screen *sws,
state = (SVGA3dQueryState *) vmw_svga_winsys_buffer_map(sws,
query->buf,
- PIPE_TRANSFER_READ);
+ PIPE_MAP_READ);
if (!state) {
debug_printf("Failed to lock query result memory\n");
diff --git a/src/gallium/winsys/svga/drm/vmw_screen_svga.c b/src/gallium/winsys/svga/drm/vmw_screen_svga.c
index 334adde7b31..9cf5ad609d5 100644
--- a/src/gallium/winsys/svga/drm/vmw_screen_svga.c
+++ b/src/gallium/winsys/svga/drm/vmw_screen_svga.c
@@ -432,7 +432,7 @@ vmw_svga_winsys_shader_create(struct svga_winsys_screen *sws,
if (!shader->buf)
goto out_no_buf;
- code = vmw_svga_winsys_buffer_map(sws, shader->buf, PIPE_TRANSFER_WRITE);
+ code = vmw_svga_winsys_buffer_map(sws, shader->buf, PIPE_MAP_WRITE);
if (!code)
goto out_no_buf;
diff --git a/src/gallium/winsys/svga/drm/vmw_shader.c b/src/gallium/winsys/svga/drm/vmw_shader.c
index dbf63c59234..c1018daf2e3 100644
--- a/src/gallium/winsys/svga/drm/vmw_shader.c
+++ b/src/gallium/winsys/svga/drm/vmw_shader.c
@@ -97,7 +97,7 @@ vmw_svga_shader_create(struct svga_winsys_screen *sws,
return NULL;
}
- map = sws->buffer_map(sws, shader->buf, PIPE_TRANSFER_WRITE);
+ map = sws->buffer_map(sws, shader->buf, PIPE_MAP_WRITE);
if (!map) {
FREE(shader);
return NULL;
diff --git a/src/gallium/winsys/svga/drm/vmw_surface.c b/src/gallium/winsys/svga/drm/vmw_surface.c
index 25c6d320c74..3aa4eef00bc 100644
--- a/src/gallium/winsys/svga/drm/vmw_surface.c
+++ b/src/gallium/winsys/svga/drm/vmw_surface.c
@@ -44,7 +44,7 @@ vmw_svga_winsys_surface_init(struct svga_winsys_screen *sws,
struct pb_buffer *pb_buf;
uint32_t pb_flags;
struct vmw_winsys_screen *vws = vsrf->screen;
- pb_flags = PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ pb_flags = PIPE_MAP_WRITE | PIPE_MAP_DISCARD_WHOLE_RESOURCE;
struct pb_manager *provider;
struct pb_desc desc;
@@ -113,12 +113,12 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
*retry = FALSE;
*rebind = FALSE;
- assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
+ assert((flags & (PIPE_MAP_READ | PIPE_MAP_WRITE)) != 0);
mtx_lock(&vsrf->mutex);
if (vsrf->mapcount) {
/* Other mappers will get confused if we discard. */
- flags &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ flags &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
vsrf->rebind = FALSE;
@@ -127,31 +127,31 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
* If we intend to read, there's no point discarding the
* data if busy.
*/
- if (flags & PIPE_TRANSFER_READ || vsrf->shared)
- flags &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ if (flags & PIPE_MAP_READ || vsrf->shared)
+ flags &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
/*
* Discard is a hint to a synchronized map.
*/
- if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
- flags &= ~PIPE_TRANSFER_UNSYNCHRONIZED;
+ if (flags & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
+ flags &= ~PIPE_MAP_UNSYNCHRONIZED;
/*
* The surface is allowed to be referenced on the command stream iff
* we're mapping unsynchronized or discard. This is an early check.
* We need to recheck after a failing discard map.
*/
- if (!(flags & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
- PIPE_TRANSFER_UNSYNCHRONIZED)) &&
+ if (!(flags & (PIPE_MAP_DISCARD_WHOLE_RESOURCE |
+ PIPE_MAP_UNSYNCHRONIZED)) &&
p_atomic_read(&vsrf->validated)) {
*retry = TRUE;
goto out_unlock;
}
- pb_flags = flags & (PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT);
+ pb_flags = flags & (PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED |
+ PIPE_MAP_PERSISTENT);
- if (flags & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ if (flags & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
struct pb_manager *provider;
struct pb_desc desc;
@@ -160,7 +160,7 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
*/
if (!p_atomic_read(&vsrf->validated)) {
data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf,
- PIPE_TRANSFER_DONTBLOCK | pb_flags);
+ PIPE_MAP_DONTBLOCK | pb_flags);
if (data)
goto out_mapped;
}
@@ -189,7 +189,7 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
vsrf->buf = vbuf;
/* Rebind persistent maps immediately */
- if (flags & PIPE_TRANSFER_PERSISTENT) {
+ if (flags & PIPE_MAP_PERSISTENT) {
*rebind = TRUE;
vsrf->rebind = FALSE;
}
@@ -203,14 +203,14 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
* But tell pipe driver to flush now if already on validate list,
* Otherwise we'll overwrite previous contents.
*/
- if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ if (!(flags & PIPE_MAP_UNSYNCHRONIZED) &&
p_atomic_read(&vsrf->validated)) {
*retry = TRUE;
goto out_unlock;
}
}
- pb_flags |= (flags & PIPE_TRANSFER_DONTBLOCK);
+ pb_flags |= (flags & PIPE_MAP_DONTBLOCK);
data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
if (data == NULL)
goto out_unlock;
@@ -218,7 +218,7 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
out_mapped:
++vsrf->mapcount;
vsrf->data = data;
- vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
+ vsrf->map_mode = flags & (PIPE_MAP_READ | PIPE_MAP_WRITE);
out_unlock:
mtx_unlock(&vsrf->mutex);
return data;
diff --git a/src/gallium/winsys/svga/drm/vmw_surface.h b/src/gallium/winsys/svga/drm/vmw_surface.h
index 4b1011aa3c7..b6cf63bb476 100644
--- a/src/gallium/winsys/svga/drm/vmw_surface.h
+++ b/src/gallium/winsys/svga/drm/vmw_surface.h
@@ -60,7 +60,7 @@ struct vmw_svga_winsys_surface
mtx_t mutex;
struct svga_winsys_buffer *buf; /* Current backing guest buffer */
uint32_t mapcount; /* Number of mappers */
- uint32_t map_mode; /* PIPE_TRANSFER_[READ|WRITE] */
+ uint32_t map_mode; /* PIPE_MAP_[READ|WRITE] */
void *data; /* Pointer to data if mapcount != 0*/
boolean shared; /* Shared surface. Never discard */
uint32_t size; /* Size of backing buffer */
diff --git a/src/gallium/winsys/sw/dri/dri_sw_winsys.c b/src/gallium/winsys/sw/dri/dri_sw_winsys.c
index f9ff6d25d73..5bdf864750b 100644
--- a/src/gallium/winsys/sw/dri/dri_sw_winsys.c
+++ b/src/gallium/winsys/sw/dri/dri_sw_winsys.c
@@ -190,7 +190,7 @@ dri_sw_displaytarget_map(struct sw_winsys *ws,
struct dri_sw_displaytarget *dri_sw_dt = dri_sw_displaytarget(dt);
dri_sw_dt->mapped = dri_sw_dt->data;
- if (dri_sw_dt->front_private && (flags & PIPE_TRANSFER_READ)) {
+ if (dri_sw_dt->front_private && (flags & PIPE_MAP_READ)) {
struct dri_sw_winsys *dri_sw_ws = dri_sw_winsys(ws);
dri_sw_ws->lf->get_image((void *)dri_sw_dt->front_private, 0, 0, dri_sw_dt->width, dri_sw_dt->height, dri_sw_dt->stride, dri_sw_dt->data);
}
@@ -203,7 +203,7 @@ dri_sw_displaytarget_unmap(struct sw_winsys *ws,
struct sw_displaytarget *dt)
{
struct dri_sw_displaytarget *dri_sw_dt = dri_sw_displaytarget(dt);
- if (dri_sw_dt->front_private && (dri_sw_dt->map_flags & PIPE_TRANSFER_WRITE)) {
+ if (dri_sw_dt->front_private && (dri_sw_dt->map_flags & PIPE_MAP_WRITE)) {
struct dri_sw_winsys *dri_sw_ws = dri_sw_winsys(ws);
dri_sw_ws->lf->put_image2((void *)dri_sw_dt->front_private, dri_sw_dt->data, 0, 0, dri_sw_dt->width, dri_sw_dt->height, dri_sw_dt->stride);
}
diff --git a/src/gallium/winsys/sw/kms-dri/kms_dri_sw_winsys.c b/src/gallium/winsys/sw/kms-dri/kms_dri_sw_winsys.c
index b501ba0403a..4926a8cbd14 100644
--- a/src/gallium/winsys/sw/kms-dri/kms_dri_sw_winsys.c
+++ b/src/gallium/winsys/sw/kms-dri/kms_dri_sw_winsys.c
@@ -263,8 +263,8 @@ kms_sw_displaytarget_map(struct sw_winsys *ws,
if (ret)
return NULL;
- prot = (flags == PIPE_TRANSFER_READ) ? PROT_READ : (PROT_READ | PROT_WRITE);
- void **ptr = (flags == PIPE_TRANSFER_READ) ? &kms_sw_dt->ro_mapped : &kms_sw_dt->mapped;
+ prot = (flags == PIPE_MAP_READ) ? PROT_READ : (PROT_READ | PROT_WRITE);
+ void **ptr = (flags == PIPE_MAP_READ) ? &kms_sw_dt->ro_mapped : &kms_sw_dt->mapped;
if (*ptr == MAP_FAILED) {
void *tmp = mmap(0, kms_sw_dt->size, prot, MAP_SHARED,
kms_sw->fd, map_req.offset);
diff --git a/src/gallium/winsys/sw/wrapper/wrapper_sw_winsys.c b/src/gallium/winsys/sw/wrapper/wrapper_sw_winsys.c
index b042d4a56c4..529d40d153a 100644
--- a/src/gallium/winsys/sw/wrapper/wrapper_sw_winsys.c
+++ b/src/gallium/winsys/sw/wrapper/wrapper_sw_winsys.c
@@ -106,7 +106,7 @@ wsw_dt_get_stride(struct wrapper_sw_displaytarget *wdt, unsigned *stride)
void *map;
map = pipe_transfer_map(pipe, tex, 0, 0,
- PIPE_TRANSFER_READ_WRITE,
+ PIPE_MAP_READ_WRITE,
0, 0, wdt->tex->width0, wdt->tex->height0, &tr);
if (!map)
return false;
@@ -222,7 +222,7 @@ wsw_dt_map(struct sw_winsys *ws,
assert(!wdt->transfer);
ptr = pipe_transfer_map(pipe, tex, 0, 0,
- PIPE_TRANSFER_READ_WRITE,
+ PIPE_MAP_READ_WRITE,
0, 0, wdt->tex->width0, wdt->tex->height0, &tr);
if (!ptr)
goto err;
diff --git a/src/mesa/state_tracker/st_atom_pixeltransfer.c b/src/mesa/state_tracker/st_atom_pixeltransfer.c
index 9b990360719..b198641b305 100644
--- a/src/mesa/state_tracker/st_atom_pixeltransfer.c
+++ b/src/mesa/state_tracker/st_atom_pixeltransfer.c
@@ -55,7 +55,7 @@ load_color_map_texture(struct gl_context *ctx, struct pipe_resource *pt)
uint i, j;
dest = (uint *) pipe_transfer_map(pipe,
- pt, 0, 0, PIPE_TRANSFER_WRITE,
+ pt, 0, 0, PIPE_MAP_WRITE,
0, 0, texSize, texSize, &transfer);
/* Pack four 1D maps into a 2D texture:
diff --git a/src/mesa/state_tracker/st_cb_bitmap.c b/src/mesa/state_tracker/st_cb_bitmap.c
index 5ba3c87b417..fcdf26e4a27 100644
--- a/src/mesa/state_tracker/st_cb_bitmap.c
+++ b/src/mesa/state_tracker/st_cb_bitmap.c
@@ -144,7 +144,7 @@ make_bitmap_texture(struct gl_context *ctx, GLsizei width, GLsizei height,
}
dest = pipe_transfer_map(st->pipe, pt, 0, 0,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
0, 0, width, height, &transfer);
/* Put image into texture transfer */
@@ -393,7 +393,7 @@ create_cache_trans(struct st_context *st)
* Subsequent glBitmap calls will write into the texture image.
*/
cache->buffer = pipe_transfer_map(pipe, cache->texture, 0, 0,
- PIPE_TRANSFER_WRITE, 0, 0,
+ PIPE_MAP_WRITE, 0, 0,
BITMAP_CACHE_WIDTH,
BITMAP_CACHE_HEIGHT, &cache->trans);
diff --git a/src/mesa/state_tracker/st_cb_bufferobjects.c b/src/mesa/state_tracker/st_cb_bufferobjects.c
index 01422bb7908..d1299620e53 100644
--- a/src/mesa/state_tracker/st_cb_bufferobjects.c
+++ b/src/mesa/state_tracker/st_cb_bufferobjects.c
@@ -133,13 +133,13 @@ st_bufferobj_subdata(struct gl_context *ctx,
* buffer directly.
*
* If the buffer is mapped, suppress implicit buffer range invalidation
- * by using PIPE_TRANSFER_MAP_DIRECTLY.
+ * by using PIPE_MAP_DIRECTLY.
*/
struct pipe_context *pipe = st_context(ctx)->pipe;
pipe->buffer_subdata(pipe, st_obj->buffer,
_mesa_bufferobj_mapped(obj, MAP_USER) ?
- PIPE_TRANSFER_MAP_DIRECTLY : 0,
+ PIPE_MAP_DIRECTLY : 0,
offset, size, data);
}
@@ -310,12 +310,12 @@ bufferobj_data(struct gl_context *ctx,
*
* If the buffer is mapped, we can't discard it.
*
- * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
+ * PIPE_MAP_DIRECTLY supresses implicit buffer range
* invalidation.
*/
pipe->buffer_subdata(pipe, st_obj->buffer,
- is_mapped ? PIPE_TRANSFER_MAP_DIRECTLY :
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ is_mapped ? PIPE_MAP_DIRECTLY :
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE,
0, size, data);
return GL_TRUE;
} else if (is_mapped) {
@@ -462,40 +462,40 @@ st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
enum pipe_transfer_usage flags = 0;
if (access & GL_MAP_WRITE_BIT)
- flags |= PIPE_TRANSFER_WRITE;
+ flags |= PIPE_MAP_WRITE;
if (access & GL_MAP_READ_BIT)
- flags |= PIPE_TRANSFER_READ;
+ flags |= PIPE_MAP_READ;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
- flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
+ flags |= PIPE_MAP_FLUSH_EXPLICIT;
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
- flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
}
else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
if (wholeBuffer)
- flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
else
- flags |= PIPE_TRANSFER_DISCARD_RANGE;
+ flags |= PIPE_MAP_DISCARD_RANGE;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
- flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ flags |= PIPE_MAP_UNSYNCHRONIZED;
if (access & GL_MAP_PERSISTENT_BIT)
- flags |= PIPE_TRANSFER_PERSISTENT;
+ flags |= PIPE_MAP_PERSISTENT;
if (access & GL_MAP_COHERENT_BIT)
- flags |= PIPE_TRANSFER_COHERENT;
+ flags |= PIPE_MAP_COHERENT;
/* ... other flags ...
*/
if (access & MESA_MAP_NOWAIT_BIT)
- flags |= PIPE_TRANSFER_DONTBLOCK;
+ flags |= PIPE_MAP_DONTBLOCK;
if (access & MESA_MAP_THREAD_SAFE_BIT)
- flags |= PIPE_TRANSFER_THREAD_SAFE;
+ flags |= PIPE_MAP_THREAD_SAFE;
return flags;
}
diff --git a/src/mesa/state_tracker/st_cb_copyimage.c b/src/mesa/state_tracker/st_cb_copyimage.c
index 5d9c77fdede..5d79148db7b 100644
--- a/src/mesa/state_tracker/st_cb_copyimage.c
+++ b/src/mesa/state_tracker/st_cb_copyimage.c
@@ -585,7 +585,7 @@ fallback_copy_image(struct st_context *st,
GL_MAP_WRITE_BIT, &dst, &dst_stride);
} else {
dst = pipe_transfer_map(st->pipe, dst_res, 0, dst_z,
- PIPE_TRANSFER_WRITE,
+ PIPE_MAP_WRITE,
dst_x, dst_y, dst_w, dst_h,
&dst_transfer);
dst_stride = dst_transfer->stride;
@@ -598,7 +598,7 @@ fallback_copy_image(struct st_context *st,
GL_MAP_READ_BIT, &src, &src_stride);
} else {
src = pipe_transfer_map(st->pipe, src_res, 0, src_z,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
src_x, src_y, src_w, src_h,
&src_transfer);
src_stride = src_transfer->stride;
diff --git a/src/mesa/state_tracker/st_cb_drawpixels.c b/src/mesa/state_tracker/st_cb_drawpixels.c
index 90cd9497020..cb7d85d0ef8 100644
--- a/src/mesa/state_tracker/st_cb_drawpixels.c
+++ b/src/mesa/state_tracker/st_cb_drawpixels.c
@@ -772,7 +772,7 @@ make_texture(struct st_context *st,
/* map texture transfer */
dest = pipe_transfer_map(pipe, pt, 0, 0,
- PIPE_TRANSFER_WRITE, 0, 0,
+ PIPE_MAP_WRITE, 0, 0,
width, height, &transfer);
if (!dest) {
pipe_resource_reference(&pt, NULL);
@@ -1078,10 +1078,10 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
if (format == GL_STENCIL_INDEX &&
_mesa_is_format_packed_depth_stencil(strb->Base.Format)) {
/* writing stencil to a combined depth+stencil buffer */
- usage = PIPE_TRANSFER_READ_WRITE;
+ usage = PIPE_MAP_READ_WRITE;
}
else {
- usage = PIPE_TRANSFER_WRITE;
+ usage = PIPE_MAP_WRITE;
}
stmap = pipe_transfer_map(pipe, strb->texture,
@@ -1139,7 +1139,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
case PIPE_FORMAT_S8_UINT:
{
ubyte *dest = stmap + spanY * pt->stride;
- assert(usage == PIPE_TRANSFER_WRITE);
+ assert(usage == PIPE_MAP_WRITE);
memcpy(dest, sValues, width);
}
break;
@@ -1147,7 +1147,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
if (format == GL_DEPTH_STENCIL) {
uint *dest = (uint *) (stmap + spanY * pt->stride);
GLint k;
- assert(usage == PIPE_TRANSFER_WRITE);
+ assert(usage == PIPE_MAP_WRITE);
for (k = 0; k < width; k++) {
dest[k] = zValues[k] | (sValues[k] << 24);
}
@@ -1155,7 +1155,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
else {
uint *dest = (uint *) (stmap + spanY * pt->stride);
GLint k;
- assert(usage == PIPE_TRANSFER_READ_WRITE);
+ assert(usage == PIPE_MAP_READ_WRITE);
for (k = 0; k < width; k++) {
dest[k] = (dest[k] & 0xffffff) | (sValues[k] << 24);
}
@@ -1165,7 +1165,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
if (format == GL_DEPTH_STENCIL) {
uint *dest = (uint *) (stmap + spanY * pt->stride);
GLint k;
- assert(usage == PIPE_TRANSFER_WRITE);
+ assert(usage == PIPE_MAP_WRITE);
for (k = 0; k < width; k++) {
dest[k] = (zValues[k] << 8) | (sValues[k] & 0xff);
}
@@ -1173,7 +1173,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
else {
uint *dest = (uint *) (stmap + spanY * pt->stride);
GLint k;
- assert(usage == PIPE_TRANSFER_READ_WRITE);
+ assert(usage == PIPE_MAP_READ_WRITE);
for (k = 0; k < width; k++) {
dest[k] = (dest[k] & 0xffffff00) | (sValues[k] & 0xff);
}
@@ -1184,7 +1184,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
uint *dest = (uint *) (stmap + spanY * pt->stride);
GLfloat *destf = (GLfloat*)dest;
GLint k;
- assert(usage == PIPE_TRANSFER_WRITE);
+ assert(usage == PIPE_MAP_WRITE);
for (k = 0; k < width; k++) {
destf[k*2] = zValuesFloat[k];
dest[k*2+1] = sValues[k] & 0xff;
@@ -1193,7 +1193,7 @@ draw_stencil_pixels(struct gl_context *ctx, GLint x, GLint y,
else {
uint *dest = (uint *) (stmap + spanY * pt->stride);
GLint k;
- assert(usage == PIPE_TRANSFER_READ_WRITE);
+ assert(usage == PIPE_MAP_READ_WRITE);
for (k = 0; k < width; k++) {
dest[k*2+1] = sValues[k] & 0xff;
}
@@ -1572,9 +1572,9 @@ copy_stencil_pixels(struct gl_context *ctx, GLint srcx, GLint srcy,
}
if (_mesa_is_format_packed_depth_stencil(rbDraw->Base.Format))
- usage = PIPE_TRANSFER_READ_WRITE;
+ usage = PIPE_MAP_READ_WRITE;
else
- usage = PIPE_TRANSFER_WRITE;
+ usage = PIPE_MAP_WRITE;
if (st_fb_orientation(ctx->DrawBuffer) == Y_0_TOP) {
dsty = rbDraw->Base.Height - dsty - height;
diff --git a/src/mesa/state_tracker/st_cb_readpixels.c b/src/mesa/state_tracker/st_cb_readpixels.c
index f5498523775..642ea0df032 100644
--- a/src/mesa/state_tracker/st_cb_readpixels.c
+++ b/src/mesa/state_tracker/st_cb_readpixels.c
@@ -515,7 +515,7 @@ st_ReadPixels(struct gl_context *ctx, GLint x, GLint y,
/* map resources */
pixels = _mesa_map_pbo_dest(ctx, pack, pixels);
- map = pipe_transfer_map_3d(pipe, dst, 0, PIPE_TRANSFER_READ,
+ map = pipe_transfer_map_3d(pipe, dst, 0, PIPE_MAP_READ,
dst_x, dst_y, 0, width, height, 1, &tex_xfer);
if (!map) {
_mesa_unmap_pbo_dest(ctx, pack);
diff --git a/src/mesa/state_tracker/st_cb_texture.c b/src/mesa/state_tracker/st_cb_texture.c
index d92a48a5fcf..3747d840fe6 100644
--- a/src/mesa/state_tracker/st_cb_texture.c
+++ b/src/mesa/state_tracker/st_cb_texture.c
@@ -381,7 +381,7 @@ st_UnmapTextureImage(struct gl_context *ctx,
assert(z == transfer->box.z);
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ if (transfer->usage & PIPE_MAP_WRITE) {
if (texImage->TexFormat == MESA_FORMAT_ETC1_RGB8) {
_mesa_etc1_unpack_rgba8888(itransfer->map, transfer->stride,
itransfer->temp_data,
@@ -1676,7 +1676,7 @@ st_TexSubImage(struct gl_context *ctx, GLuint dims,
height = 1;
}
- map = pipe_transfer_map_3d(pipe, src, 0, PIPE_TRANSFER_WRITE, 0, 0, 0,
+ map = pipe_transfer_map_3d(pipe, src, 0, PIPE_MAP_WRITE, 0, 0, 0,
width, height, depth, &transfer);
if (!map) {
_mesa_unmap_teximage_pbo(ctx, unpack);
@@ -2205,7 +2205,7 @@ st_GetTexSubImage(struct gl_context * ctx,
pixels = _mesa_map_pbo_dest(ctx, &ctx->Pack, pixels);
- map = pipe_transfer_map_3d(pipe, dst, 0, PIPE_TRANSFER_READ,
+ map = pipe_transfer_map_3d(pipe, dst, 0, PIPE_MAP_READ,
0, 0, 0, width, height, depth, &tex_xfer);
if (!map) {
goto end;
@@ -2337,7 +2337,7 @@ fallback_copy_texsubimage(struct gl_context *ctx,
strb->texture,
strb->surface->u.tex.level,
strb->surface->u.tex.first_layer,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
srcX, srcY,
width, height, &src_trans);
if (!map) {
@@ -2348,9 +2348,9 @@ fallback_copy_texsubimage(struct gl_context *ctx,
if ((baseFormat == GL_DEPTH_COMPONENT ||
baseFormat == GL_DEPTH_STENCIL) &&
util_format_is_depth_and_stencil(stImage->pt->format))
- transfer_usage = PIPE_TRANSFER_READ_WRITE;
+ transfer_usage = PIPE_MAP_READ_WRITE;
else
- transfer_usage = PIPE_TRANSFER_WRITE;
+ transfer_usage = PIPE_MAP_WRITE;
texDest = st_texture_image_map(st, stImage, transfer_usage,
destX, destY, slice,
diff --git a/src/mesa/state_tracker/st_draw_feedback.c b/src/mesa/state_tracker/st_draw_feedback.c
index c70599a10c7..a2abc9b4b33 100644
--- a/src/mesa/state_tracker/st_draw_feedback.c
+++ b/src/mesa/state_tracker/st_draw_feedback.c
@@ -175,7 +175,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
draw_set_mapped_vertex_buffer(draw, buf, vbuffer->buffer.user, ~0);
} else {
void *map = pipe_buffer_map(pipe, vbuffer->buffer.resource,
- PIPE_TRANSFER_READ, &vb_transfer[buf]);
+ PIPE_MAP_READ, &vb_transfer[buf]);
draw_set_mapped_vertex_buffer(draw, buf, map,
vbuffer->buffer.resource->width0);
}
@@ -198,7 +198,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
start = pointer_to_offset(ib->ptr) >> ib->index_size_shift;
mapped_indices = pipe_buffer_map(pipe, stobj->buffer,
- PIPE_TRANSFER_READ, &ib_transfer);
+ PIPE_MAP_READ, &ib_transfer);
}
else {
mapped_indices = ib->ptr;
@@ -251,7 +251,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
size = MIN2(size, (unsigned) binding->Size);
void *ptr = pipe_buffer_map_range(pipe, buf, offset, size,
- PIPE_TRANSFER_READ, &ubo_transfer[i]);
+ PIPE_MAP_READ, &ubo_transfer[i]);
draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 1 + i, ptr,
size);
@@ -281,7 +281,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
size = MIN2(size, (unsigned) binding->Size);
void *ptr = pipe_buffer_map_range(pipe, buf, offset, size,
- PIPE_TRANSFER_READ, &ssbo_transfer[i]);
+ PIPE_MAP_READ, &ssbo_transfer[i]);
draw_set_mapped_shader_buffer(draw, PIPE_SHADER_VERTEX,
i, ptr, size);
@@ -331,7 +331,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
sv_transfer[i][j] = NULL;
mip_addr[j] = (uintptr_t)
pipe_transfer_map_3d(pipe, res, j,
- PIPE_TRANSFER_READ, 0, 0,
+ PIPE_MAP_READ, 0, 0,
view->u.tex.first_layer,
u_minify(res->width0, j),
u_minify(res->height0, j),
@@ -364,7 +364,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
base_addr = (uintptr_t)
pipe_buffer_map_range(pipe, res, view->u.buf.offset,
view->u.buf.size,
- PIPE_TRANSFER_READ,
+ PIPE_MAP_READ,
&sv_transfer[i][0]);
}
@@ -397,7 +397,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
num_layers = img->u.tex.last_layer - img->u.tex.first_layer + 1;
addr = pipe_transfer_map_3d(pipe, res, img->u.tex.level,
- PIPE_TRANSFER_READ, 0, 0,
+ PIPE_MAP_READ, 0, 0,
img->u.tex.first_layer,
width, height, num_layers,
&img_transfer[i]);
@@ -412,7 +412,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
height = num_layers = 1;
addr = pipe_buffer_map_range(pipe, res, img->u.buf.offset,
- img->u.buf.size, PIPE_TRANSFER_READ,
+ img->u.buf.size, PIPE_MAP_READ,
&img_transfer[i]);
}
diff --git a/src/mesa/state_tracker/st_texture.c b/src/mesa/state_tracker/st_texture.c
index a2a310daab4..cb7398a6320 100644
--- a/src/mesa/state_tracker/st_texture.c
+++ b/src/mesa/state_tracker/st_texture.c
@@ -241,7 +241,7 @@ st_texture_match_image(struct st_context *st,
* Map a texture image and return the address for a particular 2D face/slice/
* layer. The stImage indicates the cube face and mipmap level. The slice
* of the 3D texture is passed in 'zoffset'.
- * \param usage one of the PIPE_TRANSFER_x values
+ * \param usage one of the PIPE_MAP_x values
* \param x, y, w, h the region of interest of the 2D image.
* \return address of mapping or NULL if any error
*/
@@ -335,7 +335,7 @@ print_center_pixel(struct pipe_context *pipe, struct pipe_resource *src)
region.height = 1;
region.depth = 1;
- map = pipe->transfer_map(pipe, src, 0, PIPE_TRANSFER_READ, &region, &xfer);
+ map = pipe->transfer_map(pipe, src, 0, PIPE_MAP_READ, &region, &xfer);
printf("center pixel: %d %d %d %d\n", map[0], map[1], map[2], map[3]);
diff --git a/src/panfrost/shared/pan_minmax_cache.c b/src/panfrost/shared/pan_minmax_cache.c
index 17018b28072..be6f173893a 100644
--- a/src/panfrost/shared/pan_minmax_cache.c
+++ b/src/panfrost/shared/pan_minmax_cache.c
@@ -98,7 +98,7 @@ panfrost_minmax_cache_invalidate(struct panfrost_minmax_cache *cache, struct pip
if (!cache)
return;
- if (!(transfer->usage & PIPE_TRANSFER_WRITE))
+ if (!(transfer->usage & PIPE_MAP_WRITE))
return;
unsigned valid_count = 0;