summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/gallium/auxiliary/Makefile3
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor.c9
-rw-r--r--src/gallium/auxiliary/vl/vl_context.c264
-rw-r--r--src/gallium/auxiliary/vl/vl_context.h49
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_context.c723
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_decoder.c561
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_decoder.h (renamed from src/gallium/auxiliary/vl/vl_mpeg12_context.h)34
-rw-r--r--src/gallium/auxiliary/vl/vl_video_buffer.c224
-rw-r--r--src/gallium/auxiliary/vl/vl_video_buffer.h54
-rw-r--r--src/gallium/drivers/nv40/nv40_video_context.c16
-rw-r--r--src/gallium/drivers/nv40/nv40_video_context.h4
-rw-r--r--src/gallium/drivers/nvfx/nvfx_video_context.c16
-rw-r--r--src/gallium/drivers/nvfx/nvfx_video_context.h4
-rw-r--r--src/gallium/drivers/r600/r600_video_context.c16
-rw-r--r--src/gallium/drivers/r600/r600_video_context.h4
-rw-r--r--src/gallium/drivers/softpipe/sp_screen.c17
-rw-r--r--src/gallium/include/pipe/p_screen.h18
-rw-r--r--src/gallium/include/pipe/p_video_context.h150
-rw-r--r--src/gallium/state_trackers/xorg/xvmc/context.c19
-rw-r--r--src/gallium/state_trackers/xorg/xvmc/surface.c29
-rw-r--r--src/gallium/state_trackers/xorg/xvmc/xvmc_private.h5
-rw-r--r--src/gallium/winsys/g3dvl/dri/dri_winsys.c10
-rw-r--r--src/gallium/winsys/g3dvl/vl_winsys.h5
23 files changed, 1209 insertions, 1025 deletions
diff --git a/src/gallium/auxiliary/Makefile b/src/gallium/auxiliary/Makefile
index b4ad059ec90..425ae78138b 100644
--- a/src/gallium/auxiliary/Makefile
+++ b/src/gallium/auxiliary/Makefile
@@ -147,9 +147,10 @@ C_SOURCES = \
util/u_resource.c \
util/u_upload_mgr.c \
util/u_vbuf_mgr.c \
+ vl/vl_context.c \
vl/vl_bitstream_parser.c \
vl/vl_mpeg12_mc_renderer.c \
- vl/vl_mpeg12_context.c \
+ vl/vl_mpeg12_decoder.c \
vl/vl_compositor.c \
vl/vl_csc.c \
vl/vl_idct.c \
diff --git a/src/gallium/auxiliary/vl/vl_compositor.c b/src/gallium/auxiliary/vl/vl_compositor.c
index 45e9cea9f66..46579a88ba7 100644
--- a/src/gallium/auxiliary/vl/vl_compositor.c
+++ b/src/gallium/auxiliary/vl/vl_compositor.c
@@ -484,13 +484,20 @@ vl_compositor_set_buffer_layer(struct pipe_video_compositor *compositor,
struct pipe_video_rect *dst_rect)
{
struct vl_compositor *c = (struct vl_compositor *)compositor;
+ struct pipe_sampler_view **sampler_views;
+ unsigned i;
+
assert(compositor && buffer);
assert(layer < VL_COMPOSITOR_MAX_LAYERS);
c->used_layers |= 1 << layer;
c->layers[layer].fs = c->fs_video_buffer;
- buffer->get_sampler_views(buffer, c->layers[layer].sampler_views);
+
+ sampler_views = buffer->get_sampler_views(buffer);
+ for (i = 0; i < 3; ++i)
+ pipe_sampler_view_reference(&c->layers[layer].sampler_views[i], sampler_views[i]);
+
c->layers[layer].src_rect = src_rect ? *src_rect : default_rect(&c->layers[layer]);
c->layers[layer].dst_rect = dst_rect ? *dst_rect : default_rect(&c->layers[layer]);
}
diff --git a/src/gallium/auxiliary/vl/vl_context.c b/src/gallium/auxiliary/vl/vl_context.c
new file mode 100644
index 00000000000..e352475cb8d
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_context.c
@@ -0,0 +1,264 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <pipe/p_video_context.h>
+
+#include <util/u_memory.h>
+#include <util/u_rect.h>
+#include <util/u_video.h>
+
+#include "vl_context.h"
+#include "vl_compositor.h"
+#include "vl_mpeg12_decoder.h"
+
+static void
+vl_context_destroy(struct pipe_video_context *context)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+
+ assert(context);
+
+ ctx->pipe->destroy(ctx->pipe);
+
+ FREE(ctx);
+}
+
+static int
+vl_context_get_param(struct pipe_video_context *context, int param)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+
+ assert(context);
+
+ if (param == PIPE_CAP_NPOT_TEXTURES)
+ return !ctx->pot_buffers;
+
+ debug_printf("vl_context: Unknown PIPE_CAP %d\n", param);
+ return 0;
+}
+
+static boolean
+vl_context_is_format_supported(struct pipe_video_context *context,
+ enum pipe_format format,
+ unsigned usage)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+
+ assert(context);
+
+ return ctx->pipe->screen->is_format_supported(ctx->pipe->screen, format,
+ PIPE_TEXTURE_2D,
+ 0, usage);
+}
+
+static struct pipe_surface *
+vl_context_create_surface(struct pipe_video_context *context,
+ struct pipe_resource *resource,
+ const struct pipe_surface *templ)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+
+ assert(ctx);
+
+ return ctx->pipe->create_surface(ctx->pipe, resource, templ);
+}
+
+static struct pipe_sampler_view *
+vl_context_create_sampler_view(struct pipe_video_context *context,
+ struct pipe_resource *resource,
+ const struct pipe_sampler_view *templ)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+
+ assert(ctx);
+
+ return ctx->pipe->create_sampler_view(ctx->pipe, resource, templ);
+}
+
+static void
+vl_context_upload_sampler(struct pipe_video_context *context,
+ struct pipe_sampler_view *dst,
+ const struct pipe_box *dst_box,
+ const void *src, unsigned src_stride,
+ unsigned src_x, unsigned src_y)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+ struct pipe_transfer *transfer;
+ void *map;
+
+ assert(context);
+ assert(dst);
+ assert(dst_box);
+ assert(src);
+
+ transfer = ctx->pipe->get_transfer(ctx->pipe, dst->texture, 0, PIPE_TRANSFER_WRITE, dst_box);
+ if (!transfer)
+ return;
+
+ map = ctx->pipe->transfer_map(ctx->pipe, transfer);
+ if (!transfer)
+ goto error_map;
+
+ util_copy_rect(map, dst->texture->format, transfer->stride, 0, 0,
+ dst_box->width, dst_box->height,
+ src, src_stride, src_x, src_y);
+
+ ctx->pipe->transfer_unmap(ctx->pipe, transfer);
+
+error_map:
+ ctx->pipe->transfer_destroy(ctx->pipe, transfer);
+}
+
+static void
+vl_context_clear_sampler(struct pipe_video_context *context,
+ struct pipe_sampler_view *dst,
+ const struct pipe_box *dst_box,
+ const float *rgba)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+ struct pipe_transfer *transfer;
+ union util_color uc;
+ void *map;
+ unsigned i;
+
+ assert(context);
+ assert(dst);
+ assert(dst_box);
+ assert(rgba);
+
+ transfer = ctx->pipe->get_transfer(ctx->pipe, dst->texture, 0, PIPE_TRANSFER_WRITE, dst_box);
+ if (!transfer)
+ return;
+
+ map = ctx->pipe->transfer_map(ctx->pipe, transfer);
+ if (!transfer)
+ goto error_map;
+
+ for ( i = 0; i < 4; ++i)
+ uc.f[i] = rgba[i];
+
+ util_fill_rect(map, dst->texture->format, transfer->stride, 0, 0,
+ dst_box->width, dst_box->height, &uc);
+
+ ctx->pipe->transfer_unmap(ctx->pipe, transfer);
+
+error_map:
+ ctx->pipe->transfer_destroy(ctx->pipe, transfer);
+}
+
+static struct pipe_video_decoder *
+vl_context_create_decoder(struct pipe_video_context *context,
+ enum pipe_video_profile profile,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+ unsigned buffer_width, buffer_height;
+
+ assert(context);
+ assert(width > 0 && height > 0);
+
+ buffer_width = ctx->pot_buffers ? util_next_power_of_two(width) : width;
+ buffer_height = ctx->pot_buffers ? util_next_power_of_two(height) : height;
+
+ switch (u_reduce_video_profile(profile)) {
+ case PIPE_VIDEO_CODEC_MPEG12:
+ return vl_create_mpeg12_decoder(context, ctx->pipe, profile, chroma_format,
+ buffer_width, buffer_height);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct pipe_video_buffer *
+vl_context_create_buffer(struct pipe_video_context *context,
+ enum pipe_format buffer_format,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height)
+{
+ const enum pipe_format resource_formats[3] = {
+ PIPE_FORMAT_R8_SNORM,
+ PIPE_FORMAT_R8_SNORM,
+ PIPE_FORMAT_R8_SNORM
+ };
+
+ struct vl_context *ctx = (struct vl_context*)context;
+ unsigned buffer_width, buffer_height;
+
+ assert(context);
+ assert(width > 0 && height > 0);
+ assert(buffer_format == PIPE_FORMAT_YV12);
+
+ buffer_width = ctx->pot_buffers ? util_next_power_of_two(width) : width;
+ buffer_height = ctx->pot_buffers ? util_next_power_of_two(height) : height;
+
+ return vl_video_buffer_init(context, ctx->pipe,
+ buffer_width, buffer_height, 1,
+ chroma_format, 3,
+ resource_formats,
+ PIPE_USAGE_STATIC);
+}
+
+static struct pipe_video_compositor *
+vl_context_create_compositor(struct pipe_video_context *context)
+{
+ struct vl_context *ctx = (struct vl_context*)context;
+
+ assert(context);
+
+ return vl_compositor_init(context, ctx->pipe);
+}
+
+struct pipe_video_context *
+vl_create_context(struct pipe_context *pipe, bool pot_buffers)
+{
+ struct vl_context *ctx;
+
+ ctx = CALLOC_STRUCT(vl_context);
+
+ if (!ctx)
+ return NULL;
+
+ ctx->base.screen = pipe->screen;
+
+ ctx->base.destroy = vl_context_destroy;
+ ctx->base.get_param = vl_context_get_param;
+ ctx->base.is_format_supported = vl_context_is_format_supported;
+ ctx->base.create_surface = vl_context_create_surface;
+ ctx->base.create_sampler_view = vl_context_create_sampler_view;
+ ctx->base.clear_sampler = vl_context_clear_sampler;
+ ctx->base.upload_sampler = vl_context_upload_sampler;
+ ctx->base.create_decoder = vl_context_create_decoder;
+ ctx->base.create_buffer = vl_context_create_buffer;
+ ctx->base.create_compositor = vl_context_create_compositor;
+
+ ctx->pipe = pipe;
+ ctx->pot_buffers = pot_buffers;
+
+ return &ctx->base;
+}
diff --git a/src/gallium/auxiliary/vl/vl_context.h b/src/gallium/auxiliary/vl/vl_context.h
new file mode 100644
index 00000000000..a4504871e65
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_context.h
@@ -0,0 +1,49 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VL_CONTEXT_H
+#define VL_CONTEXT_H
+
+#include <pipe/p_video_context.h>
+
+struct pipe_screen;
+struct pipe_context;
+
+struct vl_context
+{
+ struct pipe_video_context base;
+ struct pipe_context *pipe;
+ bool pot_buffers;
+};
+
+/* drivers can call this function in their pipe_video_context constructors and pass it
+ an accelerated pipe_context along with suitable buffering modes, etc */
+struct pipe_video_context *
+vl_create_context(struct pipe_context *pipe, bool pot_buffers);
+
+#endif /* VL_CONTEXT_H */
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_context.c b/src/gallium/auxiliary/vl/vl_mpeg12_context.c
deleted file mode 100644
index a83c240bcd9..00000000000
--- a/src/gallium/auxiliary/vl/vl_mpeg12_context.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 Younes Manton.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-
-#include "vl_mpeg12_context.h"
-#include "vl_defines.h"
-#include <pipe/p_shader_tokens.h>
-#include <util/u_inlines.h>
-#include <util/u_memory.h>
-#include <util/u_keymap.h>
-#include <util/u_rect.h>
-#include <util/u_video.h>
-#include <util/u_surface.h>
-#include <util/u_sampler.h>
-
-static const unsigned const_empty_block_mask_420[3][2][2] = {
- { { 0x20, 0x10 }, { 0x08, 0x04 } },
- { { 0x02, 0x02 }, { 0x02, 0x02 } },
- { { 0x01, 0x01 }, { 0x01, 0x01 } }
-};
-
-static void
-upload_buffer(struct vl_mpeg12_context *ctx,
- struct vl_mpeg12_buffer *buffer,
- struct pipe_mpeg12_macroblock *mb)
-{
- short *blocks;
- unsigned tb, x, y;
-
- assert(ctx);
- assert(buffer);
- assert(mb);
-
- blocks = mb->blocks;
-
- for (y = 0; y < 2; ++y) {
- for (x = 0; x < 2; ++x, ++tb) {
- if (mb->cbp & (*ctx->empty_block_mask)[0][y][x]) {
- vl_idct_add_block(&buffer->idct[0], mb->mbx * 2 + x, mb->mby * 2 + y, blocks);
- blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
- }
- }
- }
-
- /* TODO: Implement 422, 444 */
- assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
-
- for (tb = 1; tb < 3; ++tb) {
- if (mb->cbp & (*ctx->empty_block_mask)[tb][0][0]) {
- vl_idct_add_block(&buffer->idct[tb], mb->mbx, mb->mby, blocks);
- blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
- }
- }
-}
-
-static void
-vl_mpeg12_buffer_destroy(struct pipe_video_buffer *buffer)
-{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)buf->base.context;
- assert(buf && ctx);
-
- vl_video_buffer_cleanup(&buf->idct_source);
- vl_video_buffer_cleanup(&buf->idct_2_mc);
- vl_video_buffer_cleanup(&buf->render_result);
- vl_vb_cleanup(&buf->vertex_stream);
- vl_idct_cleanup_buffer(&ctx->idct_y, &buf->idct[0]);
- vl_idct_cleanup_buffer(&ctx->idct_c, &buf->idct[1]);
- vl_idct_cleanup_buffer(&ctx->idct_c, &buf->idct[2]);
- vl_mpeg12_mc_cleanup_buffer(&buf->mc[0]);
- vl_mpeg12_mc_cleanup_buffer(&buf->mc[1]);
- vl_mpeg12_mc_cleanup_buffer(&buf->mc[2]);
-
- FREE(buf);
-}
-
-static void
-vl_mpeg12_buffer_map(struct pipe_video_buffer *buffer)
-{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_context *ctx;
- assert(buf);
-
- ctx = (struct vl_mpeg12_context *)buf->base.context;
- assert(ctx);
-
- vl_vb_map(&buf->vertex_stream, ctx->pipe);
- vl_idct_map_buffers(&ctx->idct_y, &buf->idct[0]);
- vl_idct_map_buffers(&ctx->idct_c, &buf->idct[1]);
- vl_idct_map_buffers(&ctx->idct_c, &buf->idct[2]);
-}
-
-static void
-vl_mpeg12_buffer_add_macroblocks(struct pipe_video_buffer *buffer,
- unsigned num_macroblocks,
- struct pipe_macroblock *macroblocks)
-{
- struct pipe_mpeg12_macroblock *mpeg12_macroblocks = (struct pipe_mpeg12_macroblock*)macroblocks;
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_context *ctx;
- unsigned i;
-
- assert(buf);
-
- ctx = (struct vl_mpeg12_context*)buf->base.context;
- assert(ctx);
-
- assert(num_macroblocks);
- assert(macroblocks);
- assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
-
- for ( i = 0; i < num_macroblocks; ++i ) {
- vl_vb_add_block(&buf->vertex_stream, &mpeg12_macroblocks[i], ctx->empty_block_mask);
- upload_buffer(ctx, buf, &mpeg12_macroblocks[i]);
- }
-}
-
-static void
-vl_mpeg12_buffer_unmap(struct pipe_video_buffer *buffer)
-{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- struct vl_mpeg12_context *ctx;
- assert(buf);
-
- ctx = (struct vl_mpeg12_context *)buf->base.context;
- assert(ctx);
-
- vl_vb_unmap(&buf->vertex_stream, ctx->pipe);
- vl_idct_unmap_buffers(&ctx->idct_y, &buf->idct[0]);
- vl_idct_unmap_buffers(&ctx->idct_c, &buf->idct[1]);
- vl_idct_unmap_buffers(&ctx->idct_c, &buf->idct[2]);
-}
-
-static void
-vl_mpeg12_buffer_flush(struct pipe_video_buffer *buffer,
- struct pipe_video_buffer *refs[2],
- struct pipe_fence_handle **fence)
-{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer *)buffer;
- struct vl_mpeg12_buffer *past = (struct vl_mpeg12_buffer *)refs[0];
- struct vl_mpeg12_buffer *future = (struct vl_mpeg12_buffer *)refs[1];
-
- vl_surfaces *surfaces;
- vl_sampler_views *sv_past;
- vl_sampler_views *sv_future;
-
- struct pipe_sampler_view *sv_refs[2];
- unsigned ne_start, ne_num, e_start, e_num;
- struct vl_mpeg12_context *ctx;
- unsigned i;
-
- assert(buf);
-
- ctx = (struct vl_mpeg12_context *)buf->base.context;
- assert(ctx);
-
- surfaces = vl_video_buffer_surfaces(&buf->render_result);
-
- sv_past = past ? vl_video_buffer_sampler_views(&past->render_result) : NULL;
- sv_future = future ? vl_video_buffer_sampler_views(&future->render_result) : NULL;
-
- vl_vb_restart(&buf->vertex_stream, &ne_start, &ne_num, &e_start, &e_num);
-
- ctx->pipe->set_vertex_buffers(ctx->pipe, 2, buf->vertex_bufs.all);
- ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend);
-
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->ves[i]);
- vl_idct_flush(i == 0 ? &ctx->idct_y : &ctx->idct_c, &buf->idct[i], ne_num);
-
- sv_refs[0] = sv_past ? (*sv_past)[i] : NULL;
- sv_refs[1] = sv_future ? (*sv_future)[i] : NULL;
-
- vl_mpeg12_mc_renderer_flush(&ctx->mc, &buf->mc[i], (*surfaces)[i],
- sv_refs, ne_start, ne_num, e_start, e_num, fence);
- }
-}
-
-static void
-vl_mpeg12_buffer_get_sampler_views(struct pipe_video_buffer *buffer,
- struct pipe_sampler_view *sampler_views[3])
-{
- struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
- vl_sampler_views *samplers;
- unsigned i;
-
- assert(buf);
-
- samplers = vl_video_buffer_sampler_views(&buf->render_result);
-
- assert(samplers);
-
- for (i = 0; i < VL_MAX_PLANES; ++i)
- pipe_sampler_view_reference(&sampler_views[i], (*samplers)[i]);
-}
-
-static void
-vl_mpeg12_destroy(struct pipe_video_context *vpipe)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
-
- assert(vpipe);
-
- /* Asserted in softpipe_delete_fs_state() for some reason */
- ctx->pipe->bind_vs_state(ctx->pipe, NULL);
- ctx->pipe->bind_fs_state(ctx->pipe, NULL);
-
- ctx->pipe->delete_blend_state(ctx->pipe, ctx->blend);
- ctx->pipe->delete_rasterizer_state(ctx->pipe, ctx->rast);
- ctx->pipe->delete_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
-
- vl_mpeg12_mc_renderer_cleanup(&ctx->mc);
- vl_idct_cleanup(&ctx->idct_y);
- vl_idct_cleanup(&ctx->idct_c);
- ctx->pipe->delete_vertex_elements_state(ctx->pipe, ctx->ves[0]);
- ctx->pipe->delete_vertex_elements_state(ctx->pipe, ctx->ves[1]);
- ctx->pipe->delete_vertex_elements_state(ctx->pipe, ctx->ves[2]);
- pipe_resource_reference(&ctx->quads.buffer, NULL);
- ctx->pipe->destroy(ctx->pipe);
-
- FREE(ctx);
-}
-
-static int
-vl_mpeg12_get_param(struct pipe_video_context *vpipe, int param)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
-
- assert(vpipe);
-
- if (param == PIPE_CAP_NPOT_TEXTURES)
- return !ctx->pot_buffers;
-
- debug_printf("vl_mpeg12_context: Unknown PIPE_CAP %d\n", param);
- return 0;
-}
-
-static struct pipe_surface *
-vl_mpeg12_create_surface(struct pipe_video_context *vpipe,
- struct pipe_resource *resource,
- const struct pipe_surface *templ)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
-
- assert(ctx);
-
- return ctx->pipe->create_surface(ctx->pipe, resource, templ);
-}
-
-static struct pipe_sampler_view *
-vl_mpeg12_create_sampler_view(struct pipe_video_context *vpipe,
- struct pipe_resource *resource,
- const struct pipe_sampler_view *templ)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
-
- assert(ctx);
-
- return ctx->pipe->create_sampler_view(ctx->pipe, resource, templ);
-}
-
-static struct pipe_video_buffer *
-vl_mpeg12_create_buffer(struct pipe_video_context *vpipe)
-{
- const enum pipe_format idct_source_formats[3] = {
- PIPE_FORMAT_R16G16B16A16_SNORM,
- PIPE_FORMAT_R16G16B16A16_SNORM,
- PIPE_FORMAT_R16G16B16A16_SNORM
- };
-
- const enum pipe_format idct_2_mc_formats[3] = {
- PIPE_FORMAT_R16_SNORM,
- PIPE_FORMAT_R16_SNORM,
- PIPE_FORMAT_R16_SNORM
- };
-
- const enum pipe_format render_result_formats[3] = {
- PIPE_FORMAT_R8_SNORM,
- PIPE_FORMAT_R8_SNORM,
- PIPE_FORMAT_R8_SNORM
- };
-
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
- struct vl_mpeg12_buffer *buffer;
-
- vl_sampler_views *idct_views, *mc_views;
- vl_surfaces *idct_surfaces;
-
- assert(ctx);
-
- buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
- if (buffer == NULL)
- return NULL;
-
- buffer->base.context = vpipe;
- buffer->base.destroy = vl_mpeg12_buffer_destroy;
- buffer->base.map = vl_mpeg12_buffer_map;
- buffer->base.add_macroblocks = vl_mpeg12_buffer_add_macroblocks;
- buffer->base.unmap = vl_mpeg12_buffer_unmap;
- buffer->base.flush = vl_mpeg12_buffer_flush;
- buffer->base.get_sampler_views = vl_mpeg12_buffer_get_sampler_views;
-
- buffer->vertex_bufs.individual.quad.stride = ctx->quads.stride;
- buffer->vertex_bufs.individual.quad.buffer_offset = ctx->quads.buffer_offset;
- pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, ctx->quads.buffer);
-
- buffer->vertex_bufs.individual.stream = vl_vb_init(&buffer->vertex_stream, ctx->pipe,
- ctx->buffer_width / MACROBLOCK_WIDTH *
- ctx->buffer_height / MACROBLOCK_HEIGHT);
- if (!buffer->vertex_bufs.individual.stream.buffer)
- goto error_vertex_stream;
-
- if (!vl_video_buffer_init(&buffer->idct_source, ctx->pipe,
- ctx->buffer_width / 4, ctx->buffer_height, 1,
- ctx->base.chroma_format, 3,
- idct_source_formats,
- PIPE_USAGE_STREAM))
- goto error_idct_source;
-
- if (!vl_video_buffer_init(&buffer->idct_2_mc, ctx->pipe,
- ctx->buffer_width, ctx->buffer_height, 1,
- ctx->base.chroma_format, 3,
- idct_2_mc_formats,
- PIPE_USAGE_STATIC))
- goto error_idct_2_mc;
-
- if (!vl_video_buffer_init(&buffer->render_result, ctx->pipe,
- ctx->buffer_width, ctx->buffer_height, 1,
- ctx->base.chroma_format, 3,
- render_result_formats,
- PIPE_USAGE_STATIC))
- goto error_render_result;
-
- idct_views = vl_video_buffer_sampler_views(&buffer->idct_source);
- if (!idct_views)
- goto error_idct_views;
-
- idct_surfaces = vl_video_buffer_surfaces(&buffer->idct_2_mc);
- if (!idct_surfaces)
- goto error_idct_surfaces;
-
- if (!vl_idct_init_buffer(&ctx->idct_y, &buffer->idct[0],
- (*idct_views)[0], (*idct_surfaces)[0]))
- goto error_idct_y;
-
- if (!vl_idct_init_buffer(&ctx->idct_c, &buffer->idct[1],
- (*idct_views)[1], (*idct_surfaces)[1]))
- goto error_idct_cb;
-
- if (!vl_idct_init_buffer(&ctx->idct_c, &buffer->idct[2],
- (*idct_views)[2], (*idct_surfaces)[2]))
- goto error_idct_cr;
-
- mc_views = vl_video_buffer_sampler_views(&buffer->idct_2_mc);
- if (!mc_views)
- goto error_mc_views;
-
- if(!vl_mpeg12_mc_init_buffer(&ctx->mc, &buffer->mc[0], (*mc_views)[0]))
- goto error_mc_y;
-
- if(!vl_mpeg12_mc_init_buffer(&ctx->mc, &buffer->mc[1], (*mc_views)[1]))
- goto error_mc_cb;
-
- if(!vl_mpeg12_mc_init_buffer(&ctx->mc, &buffer->mc[2], (*mc_views)[2]))
- goto error_mc_cr;
-
- return &buffer->base;
-
-error_mc_cr:
- vl_mpeg12_mc_cleanup_buffer(&buffer->mc[1]);
-
-error_mc_cb:
- vl_mpeg12_mc_cleanup_buffer(&buffer->mc[0]);
-
-error_mc_y:
-error_mc_views:
- vl_idct_cleanup_buffer(&ctx->idct_c, &buffer->idct[2]);
-
-error_idct_cr:
- vl_idct_cleanup_buffer(&ctx->idct_c, &buffer->idct[1]);
-
-error_idct_cb:
- vl_idct_cleanup_buffer(&ctx->idct_y, &buffer->idct[0]);
-
-error_idct_y:
-error_idct_surfaces:
-error_idct_views:
- vl_video_buffer_cleanup(&buffer->render_result);
-
-error_render_result:
- vl_video_buffer_cleanup(&buffer->idct_2_mc);
-
-error_idct_2_mc:
- vl_video_buffer_cleanup(&buffer->idct_source);
-
-error_idct_source:
- vl_vb_cleanup(&buffer->vertex_stream);
-
-error_vertex_stream:
- FREE(buffer);
- return NULL;
-}
-
-static boolean
-vl_mpeg12_is_format_supported(struct pipe_video_context *vpipe,
- enum pipe_format format,
- unsigned usage)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
-
- assert(vpipe);
-
- return ctx->pipe->screen->is_format_supported(ctx->pipe->screen, format,
- PIPE_TEXTURE_2D,
- 0, usage);
-}
-
-static void
-vl_mpeg12_clear_sampler(struct pipe_video_context *vpipe,
- struct pipe_sampler_view *dst,
- const struct pipe_box *dst_box,
- const float *rgba)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
- struct pipe_transfer *transfer;
- union util_color uc;
- void *map;
- unsigned i;
-
- assert(vpipe);
- assert(dst);
- assert(dst_box);
- assert(rgba);
-
- transfer = ctx->pipe->get_transfer(ctx->pipe, dst->texture, 0, PIPE_TRANSFER_WRITE, dst_box);
- if (!transfer)
- return;
-
- map = ctx->pipe->transfer_map(ctx->pipe, transfer);
- if (!transfer)
- goto error_map;
-
- for ( i = 0; i < 4; ++i)
- uc.f[i] = rgba[i];
-
- util_fill_rect(map, dst->texture->format, transfer->stride, 0, 0,
- dst_box->width, dst_box->height, &uc);
-
- ctx->pipe->transfer_unmap(ctx->pipe, transfer);
-
-error_map:
- ctx->pipe->transfer_destroy(ctx->pipe, transfer);
-}
-
-static void
-vl_mpeg12_upload_sampler(struct pipe_video_context *vpipe,
- struct pipe_sampler_view *dst,
- const struct pipe_box *dst_box,
- const void *src, unsigned src_stride,
- unsigned src_x, unsigned src_y)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
- struct pipe_transfer *transfer;
- void *map;
-
- assert(vpipe);
- assert(dst);
- assert(dst_box);
- assert(src);
-
- transfer = ctx->pipe->get_transfer(ctx->pipe, dst->texture, 0, PIPE_TRANSFER_WRITE, dst_box);
- if (!transfer)
- return;
-
- map = ctx->pipe->transfer_map(ctx->pipe, transfer);
- if (!transfer)
- goto error_map;
-
- util_copy_rect(map, dst->texture->format, transfer->stride, 0, 0,
- dst_box->width, dst_box->height,
- src, src_stride, src_x, src_y);
-
- ctx->pipe->transfer_unmap(ctx->pipe, transfer);
-
-error_map:
- ctx->pipe->transfer_destroy(ctx->pipe, transfer);
-}
-
-static struct pipe_video_compositor *
-vl_mpeg12_create_compositor(struct pipe_video_context *vpipe)
-{
- struct vl_mpeg12_context *ctx = (struct vl_mpeg12_context*)vpipe;
-
- assert(vpipe);
-
- return vl_compositor_init(vpipe, ctx->pipe);
-}
-
-static bool
-init_pipe_state(struct vl_mpeg12_context *ctx)
-{
- struct pipe_rasterizer_state rast;
- struct pipe_blend_state blend;
- struct pipe_depth_stencil_alpha_state dsa;
- unsigned i;
-
- assert(ctx);
-
- memset(&rast, 0, sizeof rast);
- rast.flatshade = 1;
- rast.flatshade_first = 0;
- rast.light_twoside = 0;
- rast.front_ccw = 1;
- rast.cull_face = PIPE_FACE_NONE;
- rast.fill_back = PIPE_POLYGON_MODE_FILL;
- rast.fill_front = PIPE_POLYGON_MODE_FILL;
- rast.offset_point = 0;
- rast.offset_line = 0;
- rast.scissor = 0;
- rast.poly_smooth = 0;
- rast.poly_stipple_enable = 0;
- rast.sprite_coord_enable = 0;
- rast.point_size_per_vertex = 0;
- rast.multisample = 0;
- rast.line_smooth = 0;
- rast.line_stipple_enable = 0;
- rast.line_stipple_factor = 0;
- rast.line_stipple_pattern = 0;
- rast.line_last_pixel = 0;
- rast.line_width = 1;
- rast.point_smooth = 0;
- rast.point_quad_rasterization = 0;
- rast.point_size_per_vertex = 1;
- rast.offset_units = 1;
- rast.offset_scale = 1;
- rast.gl_rasterization_rules = 1;
-
- ctx->rast = ctx->pipe->create_rasterizer_state(ctx->pipe, &rast);
- ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rast);
-
- memset(&blend, 0, sizeof blend);
-
- blend.independent_blend_enable = 0;
- blend.rt[0].blend_enable = 0;
- blend.rt[0].rgb_func = PIPE_BLEND_ADD;
- blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
- blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
- blend.rt[0].alpha_func = PIPE_BLEND_ADD;
- blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
- blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
- blend.logicop_enable = 0;
- blend.logicop_func = PIPE_LOGICOP_CLEAR;
- /* Needed to allow color writes to FB, even if blending disabled */
- blend.rt[0].colormask = PIPE_MASK_RGBA;
- blend.dither = 0;
- ctx->blend = ctx->pipe->create_blend_state(ctx->pipe, &blend);
-
- memset(&dsa, 0, sizeof dsa);
- dsa.depth.enabled = 0;
- dsa.depth.writemask = 0;
- dsa.depth.func = PIPE_FUNC_ALWAYS;
- for (i = 0; i < 2; ++i) {
- dsa.stencil[i].enabled = 0;
- dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
- dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
- dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
- dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
- dsa.stencil[i].valuemask = 0;
- dsa.stencil[i].writemask = 0;
- }
- dsa.alpha.enabled = 0;
- dsa.alpha.func = PIPE_FUNC_ALWAYS;
- dsa.alpha.ref_value = 0;
- ctx->dsa = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, &dsa);
- ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, ctx->dsa);
-
- return true;
-}
-
-static bool
-init_idct(struct vl_mpeg12_context *ctx, unsigned buffer_width, unsigned buffer_height)
-{
- unsigned chroma_width, chroma_height, chroma_blocks_x, chroma_blocks_y;
- struct pipe_sampler_view *idct_matrix;
-
- /* TODO: Implement 422, 444 */
- assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
- ctx->empty_block_mask = &const_empty_block_mask_420;
-
- if (!(idct_matrix = vl_idct_upload_matrix(ctx->pipe)))
- goto error_idct_matrix;
-
- if (!vl_idct_init(&ctx->idct_y, ctx->pipe, buffer_width, buffer_height,
- 2, 2, idct_matrix))
- goto error_idct_y;
-
- if (ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
- chroma_width = buffer_width / 2;
- chroma_height = buffer_height / 2;
- chroma_blocks_x = 1;
- chroma_blocks_y = 1;
- } else if (ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
- chroma_width = buffer_width;
- chroma_height = buffer_height / 2;
- chroma_blocks_x = 2;
- chroma_blocks_y = 1;
- } else {
- chroma_width = buffer_width;
- chroma_height = buffer_height;
- chroma_blocks_x = 2;
- chroma_blocks_y = 2;
- }
-
- if(!vl_idct_init(&ctx->idct_c, ctx->pipe, chroma_width, chroma_height,
- chroma_blocks_x, chroma_blocks_y, idct_matrix))
- goto error_idct_c;
-
- pipe_sampler_view_reference(&idct_matrix, NULL);
- return true;
-
-error_idct_c:
- vl_idct_cleanup(&ctx->idct_y);
-
-error_idct_y:
- pipe_sampler_view_reference(&idct_matrix, NULL);
-
-error_idct_matrix:
- return false;
-}
-
-struct pipe_video_context *
-vl_create_mpeg12_context(struct pipe_context *pipe,
- enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height,
- bool pot_buffers)
-{
- struct vl_mpeg12_context *ctx;
-
- assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
-
- ctx = CALLOC_STRUCT(vl_mpeg12_context);
-
- if (!ctx)
- return NULL;
-
- ctx->base.profile = profile;
- ctx->base.chroma_format = chroma_format;
- ctx->base.width = width;
- ctx->base.height = height;
-
- ctx->base.screen = pipe->screen;
-
- ctx->base.destroy = vl_mpeg12_destroy;
- ctx->base.get_param = vl_mpeg12_get_param;
- ctx->base.is_format_supported = vl_mpeg12_is_format_supported;
- ctx->base.create_surface = vl_mpeg12_create_surface;
- ctx->base.create_sampler_view = vl_mpeg12_create_sampler_view;
- ctx->base.create_buffer = vl_mpeg12_create_buffer;
- ctx->base.clear_sampler = vl_mpeg12_clear_sampler;
- ctx->base.upload_sampler = vl_mpeg12_upload_sampler;
- ctx->base.create_compositor = vl_mpeg12_create_compositor;
-
- ctx->pipe = pipe;
- ctx->pot_buffers = pot_buffers;
-
- ctx->quads = vl_vb_upload_quads(ctx->pipe, 2, 2);
- ctx->ves[0] = vl_vb_get_elems_state(ctx->pipe, TGSI_SWIZZLE_X);
- ctx->ves[1] = vl_vb_get_elems_state(ctx->pipe, TGSI_SWIZZLE_Y);
- ctx->ves[2] = vl_vb_get_elems_state(ctx->pipe, TGSI_SWIZZLE_Z);
-
- ctx->buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
- ctx->buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
-
- if (!init_idct(ctx, ctx->buffer_width, ctx->buffer_height))
- goto error_idct;
-
- if (!vl_mpeg12_mc_renderer_init(&ctx->mc, ctx->pipe, ctx->buffer_width, ctx->buffer_height))
- goto error_mc;
-
- if (!init_pipe_state(ctx))
- goto error_pipe_state;
-
- return &ctx->base;
-
-error_pipe_state:
- vl_mpeg12_mc_renderer_cleanup(&ctx->mc);
-
-error_mc:
- vl_idct_cleanup(&ctx->idct_y);
- vl_idct_cleanup(&ctx->idct_c);
-
-error_idct:
- ctx->pipe->destroy(ctx->pipe);
- FREE(ctx);
- return NULL;
-}
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
new file mode 100644
index 00000000000..c07b1bb369e
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
@@ -0,0 +1,561 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+//#include <pipe/p_shader_tokens.h>
+
+//#include "util/u_inlines.h"
+
+//#include <util/u_inlines.h>
+//#include <util/u_memory.h>
+//#include <util/u_keymap.h>
+
+//#include <util/u_video.h>
+//#include <util/u_surface.h>
+//#include <util/u_sampler.h>
+
+#include <util/u_memory.h>
+#include <util/u_rect.h>
+#include <util/u_video.h>
+
+#include "vl_mpeg12_decoder.h"
+#include "vl_defines.h"
+
+static const unsigned const_empty_block_mask_420[3][2][2] = {
+ { { 0x20, 0x10 }, { 0x08, 0x04 } },
+ { { 0x02, 0x02 }, { 0x02, 0x02 } },
+ { { 0x01, 0x01 }, { 0x01, 0x01 } }
+};
+
+static void
+upload_buffer(struct vl_mpeg12_decoder *ctx,
+ struct vl_mpeg12_buffer *buffer,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ short *blocks;
+ unsigned tb, x, y;
+
+ assert(ctx);
+ assert(buffer);
+ assert(mb);
+
+ blocks = mb->blocks;
+
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x, ++tb) {
+ if (mb->cbp & (*ctx->empty_block_mask)[0][y][x]) {
+ vl_idct_add_block(&buffer->idct[0], mb->mbx * 2 + x, mb->mby * 2 + y, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+ for (tb = 1; tb < 3; ++tb) {
+ if (mb->cbp & (*ctx->empty_block_mask)[tb][0][0]) {
+ vl_idct_add_block(&buffer->idct[tb], mb->mbx, mb->mby, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+}
+
+static void
+vl_mpeg12_buffer_destroy(struct pipe_video_decode_buffer *buffer)
+{
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)buf->base.decoder;
+ assert(buf && dec);
+
+ buf->idct_source->destroy(buf->idct_source);
+ buf->idct_2_mc->destroy(buf->idct_2_mc);
+ vl_vb_cleanup(&buf->vertex_stream);
+ vl_idct_cleanup_buffer(&dec->idct_y, &buf->idct[0]);
+ vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[1]);
+ vl_idct_cleanup_buffer(&dec->idct_c, &buf->idct[2]);
+ vl_mpeg12_mc_cleanup_buffer(&buf->mc[0]);
+ vl_mpeg12_mc_cleanup_buffer(&buf->mc[1]);
+ vl_mpeg12_mc_cleanup_buffer(&buf->mc[2]);
+
+ FREE(buf);
+}
+
+static void
+vl_mpeg12_buffer_map(struct pipe_video_decode_buffer *buffer)
+{
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
+ struct vl_mpeg12_decoder *dec;
+ assert(buf);
+
+ dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
+ assert(dec);
+
+ vl_vb_map(&buf->vertex_stream, dec->pipe);
+ vl_idct_map_buffers(&dec->idct_y, &buf->idct[0]);
+ vl_idct_map_buffers(&dec->idct_c, &buf->idct[1]);
+ vl_idct_map_buffers(&dec->idct_c, &buf->idct[2]);
+}
+
+static void
+vl_mpeg12_buffer_add_macroblocks(struct pipe_video_decode_buffer *buffer,
+ unsigned num_macroblocks,
+ struct pipe_macroblock *macroblocks)
+{
+ struct pipe_mpeg12_macroblock *mb = (struct pipe_mpeg12_macroblock*)macroblocks;
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
+ struct vl_mpeg12_decoder *dec;
+ unsigned i;
+
+ assert(buf);
+
+ dec = (struct vl_mpeg12_decoder*)buf->base.decoder;
+ assert(dec);
+
+ assert(num_macroblocks);
+ assert(macroblocks);
+ assert(macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
+
+ for ( i = 0; i < num_macroblocks; ++i ) {
+ vl_vb_add_block(&buf->vertex_stream, &mb[i], dec->empty_block_mask);
+ upload_buffer(dec, buf, &mb[i]);
+ }
+}
+
+static void
+vl_mpeg12_buffer_unmap(struct pipe_video_decode_buffer *buffer)
+{
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer*)buffer;
+ struct vl_mpeg12_decoder *dec;
+ assert(buf);
+
+ dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
+ assert(dec);
+
+ vl_vb_unmap(&buf->vertex_stream, dec->pipe);
+ vl_idct_unmap_buffers(&dec->idct_y, &buf->idct[0]);
+ vl_idct_unmap_buffers(&dec->idct_c, &buf->idct[1]);
+ vl_idct_unmap_buffers(&dec->idct_c, &buf->idct[2]);
+}
+
+static void
+vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
+{
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
+
+ assert(decoder);
+
+ /* Asserted in softpipe_delete_fs_state() for some reason */
+ dec->pipe->bind_vs_state(dec->pipe, NULL);
+ dec->pipe->bind_fs_state(dec->pipe, NULL);
+
+ dec->pipe->delete_blend_state(dec->pipe, dec->blend);
+ dec->pipe->delete_rasterizer_state(dec->pipe, dec->rast);
+ dec->pipe->delete_depth_stencil_alpha_state(dec->pipe, dec->dsa);
+
+ vl_mpeg12_mc_renderer_cleanup(&dec->mc);
+ vl_idct_cleanup(&dec->idct_y);
+ vl_idct_cleanup(&dec->idct_c);
+ dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[0]);
+ dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[1]);
+ dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves[2]);
+ pipe_resource_reference(&dec->quads.buffer, NULL);
+
+ FREE(dec);
+}
+
+static struct pipe_video_decode_buffer *
+vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
+{
+ const enum pipe_format idct_source_formats[3] = {
+ PIPE_FORMAT_R16G16B16A16_SNORM,
+ PIPE_FORMAT_R16G16B16A16_SNORM,
+ PIPE_FORMAT_R16G16B16A16_SNORM
+ };
+
+ const enum pipe_format idct_2_mc_formats[3] = {
+ PIPE_FORMAT_R16_SNORM,
+ PIPE_FORMAT_R16_SNORM,
+ PIPE_FORMAT_R16_SNORM
+ };
+
+ struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
+ struct vl_mpeg12_buffer *buffer;
+
+ struct pipe_sampler_view **idct_views, **mc_views;
+ struct pipe_surface **idct_surfaces;
+
+ assert(dec);
+
+ buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
+ if (buffer == NULL)
+ return NULL;
+
+ buffer->base.decoder = decoder;
+ buffer->base.destroy = vl_mpeg12_buffer_destroy;
+ buffer->base.map = vl_mpeg12_buffer_map;
+ buffer->base.add_macroblocks = vl_mpeg12_buffer_add_macroblocks;
+ buffer->base.unmap = vl_mpeg12_buffer_unmap;
+
+ buffer->vertex_bufs.individual.quad.stride = dec->quads.stride;
+ buffer->vertex_bufs.individual.quad.buffer_offset = dec->quads.buffer_offset;
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, dec->quads.buffer);
+
+ buffer->vertex_bufs.individual.stream = vl_vb_init(&buffer->vertex_stream, dec->pipe,
+ dec->base.width / MACROBLOCK_WIDTH *
+ dec->base.height / MACROBLOCK_HEIGHT);
+ if (!buffer->vertex_bufs.individual.stream.buffer)
+ goto error_vertex_stream;
+
+ buffer->idct_source = vl_video_buffer_init(dec->base.context, dec->pipe,
+ dec->base.width / 4, dec->base.height, 1,
+ dec->base.chroma_format, 3,
+ idct_source_formats,
+ PIPE_USAGE_STREAM);
+ if (!buffer->idct_source)
+ goto error_idct_source;
+
+ buffer->idct_2_mc = vl_video_buffer_init(dec->base.context, dec->pipe,
+ dec->base.width, dec->base.height, 1,
+ dec->base.chroma_format, 3,
+ idct_2_mc_formats,
+ PIPE_USAGE_STATIC);
+ if (!buffer->idct_2_mc)
+ goto error_idct_2_mc;
+
+ idct_views = buffer->idct_source->get_sampler_views(buffer->idct_source);
+ if (!idct_views)
+ goto error_idct_views;
+
+ idct_surfaces = buffer->idct_2_mc->get_surfaces(buffer->idct_2_mc);
+ if (!idct_surfaces)
+ goto error_idct_surfaces;
+
+ if (!vl_idct_init_buffer(&dec->idct_y, &buffer->idct[0],
+ idct_views[0], idct_surfaces[0]))
+ goto error_idct_y;
+
+ if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[1],
+ idct_views[1], idct_surfaces[1]))
+ goto error_idct_cb;
+
+ if (!vl_idct_init_buffer(&dec->idct_c, &buffer->idct[2],
+ idct_views[2], idct_surfaces[2]))
+ goto error_idct_cr;
+
+ mc_views = buffer->idct_2_mc->get_sampler_views(buffer->idct_2_mc);
+ if (!mc_views)
+ goto error_mc_views;
+
+ if(!vl_mpeg12_mc_init_buffer(&dec->mc, &buffer->mc[0], mc_views[0]))
+ goto error_mc_y;
+
+ if(!vl_mpeg12_mc_init_buffer(&dec->mc, &buffer->mc[1], mc_views[1]))
+ goto error_mc_cb;
+
+ if(!vl_mpeg12_mc_init_buffer(&dec->mc, &buffer->mc[2], mc_views[2]))
+ goto error_mc_cr;
+
+ return &buffer->base;
+
+error_mc_cr:
+ vl_mpeg12_mc_cleanup_buffer(&buffer->mc[1]);
+
+error_mc_cb:
+ vl_mpeg12_mc_cleanup_buffer(&buffer->mc[0]);
+
+error_mc_y:
+error_mc_views:
+ vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[2]);
+
+error_idct_cr:
+ vl_idct_cleanup_buffer(&dec->idct_c, &buffer->idct[1]);
+
+error_idct_cb:
+ vl_idct_cleanup_buffer(&dec->idct_y, &buffer->idct[0]);
+
+error_idct_y:
+error_idct_surfaces:
+error_idct_views:
+ buffer->idct_2_mc->destroy(buffer->idct_2_mc);
+
+error_idct_2_mc:
+ buffer->idct_source->destroy(buffer->idct_source);
+
+error_idct_source:
+ vl_vb_cleanup(&buffer->vertex_stream);
+
+error_vertex_stream:
+ FREE(buffer);
+ return NULL;
+}
+
+static void
+vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
+ struct pipe_video_buffer *refs[2],
+ struct pipe_video_buffer *dst,
+ struct pipe_fence_handle **fence)
+{
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer *)buffer;
+ struct vl_mpeg12_decoder *dec;
+
+ struct pipe_sampler_view **sv_past;
+ struct pipe_sampler_view **sv_future;
+ struct pipe_surface **surfaces;
+
+ struct pipe_sampler_view *sv_refs[2];
+ unsigned ne_start, ne_num, e_start, e_num;
+ unsigned i;
+
+ assert(buf);
+
+ dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
+ assert(dec);
+
+ sv_past = refs[0] ? refs[0]->get_sampler_views(refs[0]) : NULL;
+ sv_future = refs[1] ? refs[1]->get_sampler_views(refs[1]) : NULL;
+
+ surfaces = dst->get_surfaces(dst);
+
+ vl_vb_restart(&buf->vertex_stream, &ne_start, &ne_num, &e_start, &e_num);
+
+ dec->pipe->set_vertex_buffers(dec->pipe, 2, buf->vertex_bufs.all);
+ dec->pipe->bind_blend_state(dec->pipe, dec->blend);
+
+ for (i = 0; i < VL_MAX_PLANES; ++i) {
+ dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves[i]);
+ vl_idct_flush(i == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[i], ne_num);
+
+ sv_refs[0] = sv_past ? sv_past[i] : NULL;
+ sv_refs[1] = sv_future ? sv_future[i] : NULL;
+
+ vl_mpeg12_mc_renderer_flush(&dec->mc, &buf->mc[i], surfaces[i], sv_refs,
+ ne_start, ne_num, e_start, e_num, fence);
+ }
+}
+
+static void
+vl_mpeg12_decoder_clear_buffer(struct pipe_video_decode_buffer *buffer)
+{
+ struct vl_mpeg12_buffer *buf = (struct vl_mpeg12_buffer *)buffer;
+ unsigned ne_start, ne_num, e_start, e_num;
+
+ assert(buf);
+
+ vl_vb_restart(&buf->vertex_stream, &ne_start, &ne_num, &e_start, &e_num);
+}
+
+static bool
+init_pipe_state(struct vl_mpeg12_decoder *dec)
+{
+ struct pipe_rasterizer_state rast;
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state dsa;
+ unsigned i;
+
+ assert(dec);
+
+ memset(&rast, 0, sizeof rast);
+ rast.flatshade = 1;
+ rast.flatshade_first = 0;
+ rast.light_twoside = 0;
+ rast.front_ccw = 1;
+ rast.cull_face = PIPE_FACE_NONE;
+ rast.fill_back = PIPE_POLYGON_MODE_FILL;
+ rast.fill_front = PIPE_POLYGON_MODE_FILL;
+ rast.offset_point = 0;
+ rast.offset_line = 0;
+ rast.scissor = 0;
+ rast.poly_smooth = 0;
+ rast.poly_stipple_enable = 0;
+ rast.sprite_coord_enable = 0;
+ rast.point_size_per_vertex = 0;
+ rast.multisample = 0;
+ rast.line_smooth = 0;
+ rast.line_stipple_enable = 0;
+ rast.line_stipple_factor = 0;
+ rast.line_stipple_pattern = 0;
+ rast.line_last_pixel = 0;
+ rast.line_width = 1;
+ rast.point_smooth = 0;
+ rast.point_quad_rasterization = 0;
+ rast.point_size_per_vertex = 1;
+ rast.offset_units = 1;
+ rast.offset_scale = 1;
+ rast.gl_rasterization_rules = 1;
+
+ dec->rast = dec->pipe->create_rasterizer_state(dec->pipe, &rast);
+ dec->pipe->bind_rasterizer_state(dec->pipe, dec->rast);
+
+ memset(&blend, 0, sizeof blend);
+
+ blend.independent_blend_enable = 0;
+ blend.rt[0].blend_enable = 0;
+ blend.rt[0].rgb_func = PIPE_BLEND_ADD;
+ blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rt[0].alpha_func = PIPE_BLEND_ADD;
+ blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.logicop_enable = 0;
+ blend.logicop_func = PIPE_LOGICOP_CLEAR;
+ /* Needed to allow color writes to FB, even if blending disabled */
+ blend.rt[0].colormask = PIPE_MASK_RGBA;
+ blend.dither = 0;
+ dec->blend = dec->pipe->create_blend_state(dec->pipe, &blend);
+
+ memset(&dsa, 0, sizeof dsa);
+ dsa.depth.enabled = 0;
+ dsa.depth.writemask = 0;
+ dsa.depth.func = PIPE_FUNC_ALWAYS;
+ for (i = 0; i < 2; ++i) {
+ dsa.stencil[i].enabled = 0;
+ dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].valuemask = 0;
+ dsa.stencil[i].writemask = 0;
+ }
+ dsa.alpha.enabled = 0;
+ dsa.alpha.func = PIPE_FUNC_ALWAYS;
+ dsa.alpha.ref_value = 0;
+ dec->dsa = dec->pipe->create_depth_stencil_alpha_state(dec->pipe, &dsa);
+ dec->pipe->bind_depth_stencil_alpha_state(dec->pipe, dec->dsa);
+
+ return true;
+}
+
+static bool
+init_idct(struct vl_mpeg12_decoder *dec, unsigned buffer_width, unsigned buffer_height)
+{
+ unsigned chroma_width, chroma_height, chroma_blocks_x, chroma_blocks_y;
+ struct pipe_sampler_view *idct_matrix;
+
+ /* TODO: Implement 422, 444 */
+ assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+ dec->empty_block_mask = &const_empty_block_mask_420;
+
+ if (!(idct_matrix = vl_idct_upload_matrix(dec->pipe)))
+ goto error_idct_matrix;
+
+ if (!vl_idct_init(&dec->idct_y, dec->pipe, buffer_width, buffer_height,
+ 2, 2, idct_matrix))
+ goto error_idct_y;
+
+ if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ chroma_width = buffer_width / 2;
+ chroma_height = buffer_height / 2;
+ chroma_blocks_x = 1;
+ chroma_blocks_y = 1;
+ } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
+ chroma_width = buffer_width;
+ chroma_height = buffer_height / 2;
+ chroma_blocks_x = 2;
+ chroma_blocks_y = 1;
+ } else {
+ chroma_width = buffer_width;
+ chroma_height = buffer_height;
+ chroma_blocks_x = 2;
+ chroma_blocks_y = 2;
+ }
+
+ if(!vl_idct_init(&dec->idct_c, dec->pipe, chroma_width, chroma_height,
+ chroma_blocks_x, chroma_blocks_y, idct_matrix))
+ goto error_idct_c;
+
+ pipe_sampler_view_reference(&idct_matrix, NULL);
+ return true;
+
+error_idct_c:
+ vl_idct_cleanup(&dec->idct_y);
+
+error_idct_y:
+ pipe_sampler_view_reference(&idct_matrix, NULL);
+
+error_idct_matrix:
+ return false;
+}
+
+struct pipe_video_decoder *
+vl_create_mpeg12_decoder(struct pipe_video_context *context,
+ struct pipe_context *pipe,
+ enum pipe_video_profile profile,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height)
+{
+ struct vl_mpeg12_decoder *dec;
+ unsigned i;
+
+ assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
+
+ dec = CALLOC_STRUCT(vl_mpeg12_decoder);
+
+ if (!dec)
+ return NULL;
+
+ dec->base.context = context;
+ dec->base.profile = profile;
+ dec->base.chroma_format = chroma_format;
+ dec->base.width = width;
+ dec->base.height = height;
+
+ dec->base.destroy = vl_mpeg12_destroy;
+ dec->base.create_buffer = vl_mpeg12_create_buffer;
+ dec->base.flush_buffer = vl_mpeg12_decoder_flush_buffer;
+ dec->base.clear_buffer = vl_mpeg12_decoder_clear_buffer;
+
+ dec->pipe = pipe;
+
+ dec->quads = vl_vb_upload_quads(dec->pipe, 2, 2);
+ for (i = 0; i < VL_MAX_PLANES; ++i)
+ dec->ves[i] = vl_vb_get_elems_state(dec->pipe, i);
+
+ dec->base.width = align(width, MACROBLOCK_WIDTH);
+ dec->base.height = align(height, MACROBLOCK_HEIGHT);
+
+ if (!init_idct(dec, dec->base.width, dec->base.height))
+ goto error_idct;
+
+ if (!vl_mpeg12_mc_renderer_init(&dec->mc, dec->pipe, dec->base.width, dec->base.height))
+ goto error_mc;
+
+ if (!init_pipe_state(dec))
+ goto error_pipe_state;
+
+ return &dec->base;
+
+error_pipe_state:
+ vl_mpeg12_mc_renderer_cleanup(&dec->mc);
+
+error_mc:
+ vl_idct_cleanup(&dec->idct_y);
+ vl_idct_cleanup(&dec->idct_c);
+
+error_idct:
+ FREE(dec);
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_context.h b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
index 94a5dad5571..f7dc2d5799a 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_context.h
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
@@ -25,25 +25,24 @@
*
**************************************************************************/
-#ifndef VL_MPEG12_CONTEXT_H
-#define VL_MPEG12_CONTEXT_H
+#ifndef VL_MPEG12_DECODER_H
+#define VL_MPEG12_DECODER_H
#include <pipe/p_video_context.h>
+
#include "vl_idct.h"
#include "vl_mpeg12_mc_renderer.h"
-#include "vl_compositor.h"
-#include "vl_video_buffer.h"
+
#include "vl_vertex_buffers.h"
+#include "vl_video_buffer.h"
struct pipe_screen;
struct pipe_context;
-struct vl_mpeg12_context
+struct vl_mpeg12_decoder
{
- struct pipe_video_context base;
+ struct pipe_video_decoder base;
struct pipe_context *pipe;
- bool pot_buffers;
- unsigned buffer_width, buffer_height;
const unsigned (*empty_block_mask)[3][2][2];
@@ -60,14 +59,13 @@ struct vl_mpeg12_context
struct vl_mpeg12_buffer
{
- struct pipe_video_buffer base;
-
- struct vl_video_buffer idct_source;
- struct vl_video_buffer idct_2_mc;
- struct vl_video_buffer render_result;
+ struct pipe_video_decode_buffer base;
struct vl_vertex_buffer vertex_stream;
+ struct pipe_video_buffer *idct_source;
+ struct pipe_video_buffer *idct_2_mc;
+
union
{
struct pipe_vertex_buffer all[2];
@@ -82,11 +80,11 @@ struct vl_mpeg12_buffer
/* drivers can call this function in their pipe_video_context constructors and pass it
an accelerated pipe_context along with suitable buffering modes, etc */
-struct pipe_video_context *
-vl_create_mpeg12_context(struct pipe_context *pipe,
+struct pipe_video_decoder *
+vl_create_mpeg12_decoder(struct pipe_video_context *context,
+ struct pipe_context *pipe,
enum pipe_video_profile profile,
enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height,
- bool pot_buffers);
+ unsigned width, unsigned height);
-#endif /* VL_MPEG12_CONTEXT_H */
+#endif /* VL_MPEG12_DECODER_H */
diff --git a/src/gallium/auxiliary/vl/vl_video_buffer.c b/src/gallium/auxiliary/vl/vl_video_buffer.c
index f0b3d192eb5..5ea0dfa3736 100644
--- a/src/gallium/auxiliary/vl/vl_video_buffer.c
+++ b/src/gallium/auxiliary/vl/vl_video_buffer.c
@@ -25,85 +25,18 @@
*
**************************************************************************/
-#include "vl_video_buffer.h"
-#include <util/u_format.h>
-#include <util/u_inlines.h>
-#include <util/u_sampler.h>
-#include <pipe/p_screen.h>
-#include <pipe/p_context.h>
#include <assert.h>
-bool vl_video_buffer_init(struct vl_video_buffer *buffer,
- struct pipe_context *pipe,
- unsigned width, unsigned height, unsigned depth,
- enum pipe_video_chroma_format chroma_format,
- unsigned num_planes,
- const enum pipe_format resource_format[VL_MAX_PLANES],
- unsigned usage)
-{
- struct pipe_resource templ;
- unsigned i;
-
- assert(buffer && pipe);
- assert(num_planes > 0 && num_planes <= VL_MAX_PLANES);
-
- memset(buffer, 0, sizeof(struct vl_video_buffer));
- buffer->pipe = pipe;
- buffer->num_planes = num_planes;
-
- memset(&templ, 0, sizeof(templ));
- templ.target = PIPE_TEXTURE_2D;
- templ.format = resource_format[0];
- templ.width0 = width;
- templ.height0 = height;
- templ.depth0 = depth;
- templ.array_size = 1;
- templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
- templ.usage = usage;
-
- buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
- if (!buffer->resources[0])
- goto error;
-
- if (num_planes == 1) {
- assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444);
- return true;
- }
-
- templ.format = resource_format[1];
- if (chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
- if (depth > 1)
- templ.depth0 /= 2;
- else
- templ.width0 /= 2;
- templ.height0 /= 2;
- } else if (chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
- if (depth > 1)
- templ.depth0 /= 2;
- else
- templ.height0 /= 2;
- }
-
- buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
- if (!buffer->resources[1])
- goto error;
-
- if (num_planes == 2)
- return true;
-
- templ.format = resource_format[2];
- buffer->resources[2] = pipe->screen->resource_create(pipe->screen, &templ);
- if (!buffer->resources[2])
- goto error;
-
- return true;
+#include <pipe/p_screen.h>
+#include <pipe/p_context.h>
+#include <pipe/p_state.h>
-error:
- for (i = 0; i < VL_MAX_PLANES; ++i)
- pipe_resource_reference(&buffer->resources[i], NULL);
+#include <util/u_format.h>
+#include <util/u_inlines.h>
+#include <util/u_sampler.h>
+#include <util/u_memory.h>
- return false;
-}
+#include "vl_video_buffer.h"
static inline void
adjust_swizzle(struct pipe_sampler_view *sv_templ)
@@ -116,75 +49,160 @@ adjust_swizzle(struct pipe_sampler_view *sv_templ)
}
}
-vl_sampler_views *vl_video_buffer_sampler_views(struct vl_video_buffer *buffer)
+static void
+vl_video_buffer_destroy(struct pipe_video_buffer *buffer)
+{
+ struct vl_video_buffer *buf = (struct vl_video_buffer *)buffer;
+ unsigned i;
+
+ assert(buf);
+
+ for (i = 0; i < VL_MAX_PLANES; ++i) {
+ pipe_surface_reference(&buf->surfaces[i], NULL);
+ pipe_sampler_view_reference(&buf->sampler_views[i], NULL);
+ pipe_resource_reference(&buf->resources[i], NULL);
+ }
+}
+
+static struct pipe_sampler_view **
+vl_video_buffer_sampler_views(struct pipe_video_buffer *buffer)
{
+ struct vl_video_buffer *buf = (struct vl_video_buffer *)buffer;
struct pipe_sampler_view sv_templ;
struct pipe_context *pipe;
unsigned i;
- assert(buffer);
+ assert(buf);
- pipe = buffer->pipe;
+ pipe = buf->pipe;
- for (i = 0; i < buffer->num_planes; ++i ) {
- if (!buffer->sampler_views[i]) {
+ for (i = 0; i < buf->num_planes; ++i ) {
+ if (!buf->sampler_views[i]) {
memset(&sv_templ, 0, sizeof(sv_templ));
- u_sampler_view_default_template(&sv_templ, buffer->resources[i], buffer->resources[i]->format);
+ u_sampler_view_default_template(&sv_templ, buf->resources[i], buf->resources[i]->format);
adjust_swizzle(&sv_templ);
- buffer->sampler_views[i] = pipe->create_sampler_view(pipe, buffer->resources[i], &sv_templ);
- if (!buffer->sampler_views[i])
+ buf->sampler_views[i] = pipe->create_sampler_view(pipe, buf->resources[i], &sv_templ);
+ if (!buf->sampler_views[i])
goto error;
}
}
- return &buffer->sampler_views;
+ return buf->sampler_views;
error:
- for (i = 0; i < buffer->num_planes; ++i )
- pipe_sampler_view_reference(&buffer->sampler_views[i], NULL);
+ for (i = 0; i < buf->num_planes; ++i )
+ pipe_sampler_view_reference(&buf->sampler_views[i], NULL);
return NULL;
}
-vl_surfaces *vl_video_buffer_surfaces(struct vl_video_buffer *buffer)
+static struct pipe_surface **
+vl_video_buffer_surfaces(struct pipe_video_buffer *buffer)
{
+ struct vl_video_buffer *buf = (struct vl_video_buffer *)buffer;
struct pipe_surface surf_templ;
struct pipe_context *pipe;
unsigned i;
- assert(buffer);
+ assert(buf);
- pipe = buffer->pipe;
+ pipe = buf->pipe;
- for (i = 0; i < buffer->num_planes; ++i ) {
- if (!buffer->surfaces[i]) {
+ for (i = 0; i < buf->num_planes; ++i ) {
+ if (!buf->surfaces[i]) {
memset(&surf_templ, 0, sizeof(surf_templ));
- surf_templ.format = buffer->resources[i]->format;
+ surf_templ.format = buf->resources[i]->format;
surf_templ.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
- buffer->surfaces[i] = pipe->create_surface(pipe, buffer->resources[i], &surf_templ);
- if (!buffer->surfaces[i])
+ buf->surfaces[i] = pipe->create_surface(pipe, buf->resources[i], &surf_templ);
+ if (!buf->surfaces[i])
goto error;
}
}
- return &buffer->surfaces;
+ return buf->surfaces;
error:
- for (i = 0; i < buffer->num_planes; ++i )
- pipe_surface_reference(&buffer->surfaces[i], NULL);
+ for (i = 0; i < buf->num_planes; ++i )
+ pipe_surface_reference(&buf->surfaces[i], NULL);
return NULL;
}
-void vl_video_buffer_cleanup(struct vl_video_buffer *buffer)
+struct pipe_video_buffer *
+vl_video_buffer_init(struct pipe_video_context *context,
+ struct pipe_context *pipe,
+ unsigned width, unsigned height, unsigned depth,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned num_planes,
+ const enum pipe_format resource_formats[VL_MAX_PLANES],
+ unsigned usage)
{
+ struct vl_video_buffer *buffer;
+ struct pipe_resource templ;
unsigned i;
- assert(buffer);
+ assert(context && pipe);
+ assert(num_planes > 0 && num_planes <= VL_MAX_PLANES);
+
+ buffer = CALLOC_STRUCT(vl_video_buffer);
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- pipe_surface_reference(&buffer->surfaces[i], NULL);
- pipe_sampler_view_reference(&buffer->sampler_views[i], NULL);
- pipe_resource_reference(&buffer->resources[i], NULL);
+ buffer->base.destroy = vl_video_buffer_destroy;
+ buffer->base.get_sampler_views = vl_video_buffer_sampler_views;
+ buffer->base.get_surfaces = vl_video_buffer_surfaces;
+ buffer->pipe = pipe;
+ buffer->num_planes = num_planes;
+
+ memset(&templ, 0, sizeof(templ));
+ templ.target = PIPE_TEXTURE_2D;
+ templ.format = resource_formats[0];
+ templ.width0 = width;
+ templ.height0 = height;
+ templ.depth0 = depth;
+ templ.array_size = 1;
+ templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+ templ.usage = usage;
+
+ buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
+ if (!buffer->resources[0])
+ goto error;
+
+ if (num_planes == 1) {
+ assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444);
+ return &buffer->base;
+ }
+
+ templ.format = resource_formats[1];
+ if (chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ if (depth > 1)
+ templ.depth0 /= 2;
+ else
+ templ.width0 /= 2;
+ templ.height0 /= 2;
+ } else if (chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
+ if (depth > 1)
+ templ.depth0 /= 2;
+ else
+ templ.height0 /= 2;
}
+
+ buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
+ if (!buffer->resources[1])
+ goto error;
+
+ if (num_planes == 2)
+ return &buffer->base;
+
+ templ.format = resource_formats[2];
+ buffer->resources[2] = pipe->screen->resource_create(pipe->screen, &templ);
+ if (!buffer->resources[2])
+ goto error;
+
+ return &buffer->base;
+
+error:
+ for (i = 0; i < VL_MAX_PLANES; ++i)
+ pipe_resource_reference(&buffer->resources[i], NULL);
+ FREE(buffer);
+
+ return NULL;
}
diff --git a/src/gallium/auxiliary/vl/vl_video_buffer.h b/src/gallium/auxiliary/vl/vl_video_buffer.h
index adba6c56e88..1acc9f49622 100644
--- a/src/gallium/auxiliary/vl/vl_video_buffer.h
+++ b/src/gallium/auxiliary/vl/vl_video_buffer.h
@@ -28,7 +28,8 @@
#ifndef vl_ycbcr_buffer_h
#define vl_ycbcr_buffer_h
-#include <pipe/p_state.h>
+#include <pipe/p_context.h>
+#include <pipe/p_video_context.h>
#define VL_MAX_PLANES 3
@@ -36,49 +37,26 @@
* implementation of a planar ycbcr buffer
*/
-/* resources of a buffer */
-typedef struct pipe_resource *vl_resources[VL_MAX_PLANES];
-
-/* sampler views of a buffer */
-typedef struct pipe_sampler_view *vl_sampler_views[VL_MAX_PLANES];
-
-/* surfaces of a buffer */
-typedef struct pipe_surface *vl_surfaces[VL_MAX_PLANES];
-
/* planar buffer for vl data upload and manipulation */
struct vl_video_buffer
{
- struct pipe_context *pipe;
- unsigned num_planes;
- vl_resources resources;
- vl_sampler_views sampler_views;
- vl_surfaces surfaces;
+ struct pipe_video_buffer base;
+ struct pipe_context *pipe;
+ unsigned num_planes;
+ struct pipe_resource *resources[VL_MAX_PLANES];
+ struct pipe_sampler_view *sampler_views[VL_MAX_PLANES];
+ struct pipe_surface *surfaces[VL_MAX_PLANES];
};
/**
* initialize a buffer, creating its resources
*/
-bool vl_video_buffer_init(struct vl_video_buffer *buffer,
- struct pipe_context *pipe,
- unsigned width, unsigned height, unsigned depth,
- enum pipe_video_chroma_format chroma_format,
- unsigned num_planes,
- const enum pipe_format resource_formats[VL_MAX_PLANES],
- unsigned usage);
-
-/**
- * create default sampler views for the buffer on demand
- */
-vl_sampler_views *vl_video_buffer_sampler_views(struct vl_video_buffer *buffer);
-
-/**
- * create default surfaces for the buffer on demand
- */
-vl_surfaces *vl_video_buffer_surfaces(struct vl_video_buffer *buffer);
-
-/**
- * cleanup the buffer destroying all its resources
- */
-void vl_video_buffer_cleanup(struct vl_video_buffer *buffer);
-
+struct pipe_video_buffer *
+vl_video_buffer_init(struct pipe_video_context *context,
+ struct pipe_context *pipe,
+ unsigned width, unsigned height, unsigned depth,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned num_planes,
+ const enum pipe_format resource_formats[VL_MAX_PLANES],
+ unsigned usage);
#endif
diff --git a/src/gallium/drivers/nv40/nv40_video_context.c b/src/gallium/drivers/nv40/nv40_video_context.c
index 34bb7cdbdda..cd231e434a5 100644
--- a/src/gallium/drivers/nv40/nv40_video_context.c
+++ b/src/gallium/drivers/nv40/nv40_video_context.c
@@ -27,12 +27,10 @@
#include "nv40_video_context.h"
#include "util/u_video.h"
-#include <vl/vl_mpeg12_context.h>
+#include <vl/vl_context.h>
struct pipe_video_context *
-nv40_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv)
+nv40_video_create(struct pipe_screen *screen, void *priv)
{
struct pipe_context *pipe;
@@ -42,13 +40,5 @@ nv40_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
if (!pipe)
return NULL;
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return vl_create_mpeg12_context(pipe, profile,
- chroma_format,
- width, height,
- true);
- default:
- return NULL;
- }
+ return vl_create_context(pipe, true);
}
diff --git a/src/gallium/drivers/nv40/nv40_video_context.h b/src/gallium/drivers/nv40/nv40_video_context.h
index 64196caca72..d34ab7ab130 100644
--- a/src/gallium/drivers/nv40/nv40_video_context.h
+++ b/src/gallium/drivers/nv40/nv40_video_context.h
@@ -31,8 +31,6 @@
#include <pipe/p_video_context.h>
struct pipe_video_context *
-nv40_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv);
+nv40_video_create(struct pipe_screen *screen, void *priv);
#endif
diff --git a/src/gallium/drivers/nvfx/nvfx_video_context.c b/src/gallium/drivers/nvfx/nvfx_video_context.c
index 01a84f2ebc6..9c9ccfe317d 100644
--- a/src/gallium/drivers/nvfx/nvfx_video_context.c
+++ b/src/gallium/drivers/nvfx/nvfx_video_context.c
@@ -27,12 +27,10 @@
#include "nvfx_video_context.h"
#include "util/u_video.h"
-#include <vl/vl_mpeg12_context.h>
+#include <vl/vl_context.h>
struct pipe_video_context *
-nvfx_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv)
+nvfx_video_create(struct pipe_screen *screen, void *priv)
{
struct pipe_context *pipe;
@@ -42,13 +40,5 @@ nvfx_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
if (!pipe)
return NULL;
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return vl_create_mpeg12_context(pipe, profile,
- chroma_format,
- width, height,
- true);
- default:
- return NULL;
- }
+ return vl_create_context(pipe, profile, true);
}
diff --git a/src/gallium/drivers/nvfx/nvfx_video_context.h b/src/gallium/drivers/nvfx/nvfx_video_context.h
index 6619427cc29..b220b9f82dc 100644
--- a/src/gallium/drivers/nvfx/nvfx_video_context.h
+++ b/src/gallium/drivers/nvfx/nvfx_video_context.h
@@ -31,8 +31,6 @@
#include <pipe/p_video_context.h>
struct pipe_video_context *
-nvfx_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv);
+nvfx_video_create(struct pipe_screen *screen, void *priv);
#endif
diff --git a/src/gallium/drivers/r600/r600_video_context.c b/src/gallium/drivers/r600/r600_video_context.c
index c1b0c098245..a0ab3475fde 100644
--- a/src/gallium/drivers/r600/r600_video_context.c
+++ b/src/gallium/drivers/r600/r600_video_context.c
@@ -27,12 +27,10 @@
#include "r600_video_context.h"
#include "util/u_video.h"
-#include <vl/vl_mpeg12_context.h>
+#include <vl/vl_context.h>
struct pipe_video_context *
-r600_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv)
+r600_video_create(struct pipe_screen *screen, void *priv)
{
struct pipe_context *pipe;
@@ -42,13 +40,5 @@ r600_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
if (!pipe)
return NULL;
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return vl_create_mpeg12_context(pipe, profile,
- chroma_format,
- width, height,
- false);
- default:
- return NULL;
- }
+ return vl_create_context(pipe, false);
}
diff --git a/src/gallium/drivers/r600/r600_video_context.h b/src/gallium/drivers/r600/r600_video_context.h
index bda33a00d44..f579980bd36 100644
--- a/src/gallium/drivers/r600/r600_video_context.h
+++ b/src/gallium/drivers/r600/r600_video_context.h
@@ -4,8 +4,6 @@
#include <pipe/p_video_context.h>
struct pipe_video_context *
-r600_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv);
+r600_video_create(struct pipe_screen *screen, void *priv);
#endif
diff --git a/src/gallium/drivers/softpipe/sp_screen.c b/src/gallium/drivers/softpipe/sp_screen.c
index d5936641ba5..bb72cf63bc5 100644
--- a/src/gallium/drivers/softpipe/sp_screen.c
+++ b/src/gallium/drivers/softpipe/sp_screen.c
@@ -33,7 +33,7 @@
#include "pipe/p_defines.h"
#include "pipe/p_screen.h"
#include "draw/draw_context.h"
-#include "vl/vl_mpeg12_context.h"
+#include "vl/vl_context.h"
#include "state_tracker/sw_winsys.h"
#include "tgsi/tgsi_exec.h"
@@ -288,29 +288,18 @@ softpipe_flush_frontbuffer(struct pipe_screen *_screen,
}
static struct pipe_video_context *
-sp_video_create(struct pipe_screen *screen, enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv)
+sp_video_create(struct pipe_screen *screen, void *priv)
{
struct pipe_context *pipe;
assert(screen);
- assert(width && height);
pipe = screen->context_create(screen, NULL);
if (!pipe)
return NULL;
/* TODO: Use slice buffering for softpipe when implemented, no advantage to buffering an entire picture with softpipe */
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return vl_create_mpeg12_context(pipe, profile,
- chroma_format,
- width, height,
- true);
- default:
- return NULL;
- }
+ return vl_create_context(pipe, true);
}
/**
diff --git a/src/gallium/include/pipe/p_screen.h b/src/gallium/include/pipe/p_screen.h
index 0d1b1094550..4f95fa1b2e1 100644
--- a/src/gallium/include/pipe/p_screen.h
+++ b/src/gallium/include/pipe/p_screen.h
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,11 +10,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@@ -22,12 +22,12 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
/**
* @file
- *
+ *
* Screen, Adapter or GPU
*
* These are driver functions/facilities that are context independent.
@@ -94,11 +94,7 @@ struct pipe_screen {
struct pipe_context * (*context_create)( struct pipe_screen *, void *priv );
- struct pipe_video_context * (*video_context_create)( struct pipe_screen *screen,
- enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height, void *priv );
-
+ struct pipe_video_context * (*video_context_create)( struct pipe_screen *screen, void *priv );
/**
* Check if the given pipe_format is supported as a texture or
diff --git a/src/gallium/include/pipe/p_video_context.h b/src/gallium/include/pipe/p_video_context.h
index dec524d5290..33c64baf7c4 100644
--- a/src/gallium/include/pipe/p_video_context.h
+++ b/src/gallium/include/pipe/p_video_context.h
@@ -35,12 +35,10 @@ extern "C" {
#include <pipe/p_video_state.h>
struct pipe_screen;
-struct pipe_buffer;
struct pipe_surface;
struct pipe_macroblock;
struct pipe_picture_desc;
struct pipe_fence_handle;
-struct pipe_video_buffer;
/**
* Gallium video rendering context
@@ -48,36 +46,33 @@ struct pipe_video_buffer;
struct pipe_video_context
{
struct pipe_screen *screen;
- enum pipe_video_profile profile;
- enum pipe_video_chroma_format chroma_format;
- unsigned width;
- unsigned height;
void *priv; /**< context private data (for DRI for example) */
/**
+ * destroy context, all objects created from this context
+ * (buffers, decoders, compositors etc...) must be freed before calling this
+ */
+ void (*destroy)(struct pipe_video_context *context);
+
+ /**
* Query an integer-valued capability/parameter/limit
* \param param one of PIPE_CAP_x
*/
- int (*get_param)(struct pipe_video_context *vpipe, int param);
+ int (*get_param)(struct pipe_video_context *context, int param);
/**
* Check if the given pipe_format is supported as a texture or
* drawing surface.
*/
- boolean (*is_format_supported)(struct pipe_video_context *vpipe,
+ boolean (*is_format_supported)(struct pipe_video_context *context,
enum pipe_format format,
unsigned usage);
/**
- * destroy context, all buffers must be freed before calling this
- */
- void (*destroy)(struct pipe_video_context *vpipe);
-
- /**
* create a surface of a texture
*/
- struct pipe_surface *(*create_surface)(struct pipe_video_context *vpipe,
+ struct pipe_surface *(*create_surface)(struct pipe_video_context *context,
struct pipe_resource *resource,
const struct pipe_surface *templ);
@@ -89,14 +84,14 @@ struct pipe_video_context
/**
* create a sampler view of a texture, for subpictures for example
*/
- struct pipe_sampler_view *(*create_sampler_view)(struct pipe_video_context *vpipe,
+ struct pipe_sampler_view *(*create_sampler_view)(struct pipe_video_context *context,
struct pipe_resource *resource,
const struct pipe_sampler_view *templ);
/**
* upload image data to a sampler
*/
- void (*upload_sampler)(struct pipe_video_context *vpipe,
+ void (*upload_sampler)(struct pipe_video_context *context,
struct pipe_sampler_view *dst,
const struct pipe_box *dst_box,
const void *src, unsigned src_stride,
@@ -105,74 +100,145 @@ struct pipe_video_context
/**
* clear a sampler with a specific rgba color
*/
- void (*clear_sampler)(struct pipe_video_context *vpipe,
+ void (*clear_sampler)(struct pipe_video_context *context,
struct pipe_sampler_view *dst,
const struct pipe_box *dst_box,
const float *rgba);
+ /*}@*/
+
+ /**
+ * create a decoder for a specific video profile
+ */
+ struct pipe_video_decoder *(*create_decoder)(struct pipe_video_context *context,
+ enum pipe_video_profile profile,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height);
+
/**
* Creates a buffer as decoding target
*/
- struct pipe_video_buffer *(*create_buffer)(struct pipe_video_context *vpipe);
+ struct pipe_video_buffer *(*create_buffer)(struct pipe_video_context *context,
+ enum pipe_format buffer_format,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height);
/**
* Creates a video compositor
*/
- struct pipe_video_compositor *(*create_compositor)(struct pipe_video_context *vpipe);
+ struct pipe_video_compositor *(*create_compositor)(struct pipe_video_context *context);
+};
+
+/**
+ * decoder for a specific video codec
+ */
+struct pipe_video_decoder
+{
+ struct pipe_video_context *context;
+
+ enum pipe_video_profile profile;
+ enum pipe_video_chroma_format chroma_format;
+ unsigned width;
+ unsigned height;
/**
- * Picture decoding and displaying
+ * destroy this video decoder
*/
+ void (*destroy)(struct pipe_video_decoder *decoder);
-#if 0
- void (*decode_bitstream)(struct pipe_video_context *vpipe,
- unsigned num_bufs,
- struct pipe_buffer **bitstream_buf);
-#endif
+ /**
+ * Creates a buffer as decoding input
+ */
+ struct pipe_video_decode_buffer *(*create_buffer)(struct pipe_video_decoder *decoder);
+
+ /**
+ * flush decoder buffer to video hardware
+ */
+ void (*flush_buffer)(struct pipe_video_decode_buffer *decbuf,
+ struct pipe_video_buffer *ref_frames[2],
+ struct pipe_video_buffer *dst,
+ struct pipe_fence_handle **fence);
+
+ /**
+ * clear decoder buffers todo list
+ */
+ void (*clear_buffer)(struct pipe_video_decode_buffer *decbuf);
};
-struct pipe_video_buffer
+/**
+ * input buffer for a decoder
+ */
+struct pipe_video_decode_buffer
{
- struct pipe_video_context* context;
+ struct pipe_video_decoder *decoder;
/**
- * destroy this video buffer
+ * destroy this decode buffer
*/
- void (*destroy)(struct pipe_video_buffer *buffer);
+ void (*destroy)(struct pipe_video_decode_buffer *decbuf);
/**
- * map the buffer into memory before calling add_macroblocks
+ * map the input buffer into memory before starting decoding
*/
- void (*map)(struct pipe_video_buffer *buffer);
+ void (*map)(struct pipe_video_decode_buffer *decbuf);
+#if 0
/**
- * add macroblocks to buffer for decoding
+ * decode a bitstream
*/
- void (*add_macroblocks)(struct pipe_video_buffer *buffer,
+ void (*decode_bitstream)(struct pipe_video_decode_buffer *decbuf,
+ unsigned num_bufs,
+ struct pipe_buffer **bitstream_buf);
+#endif
+
+ /**
+ * add macroblocks to decoder buffer
+ */
+ void (*add_macroblocks)(struct pipe_video_decode_buffer *decbuf,
unsigned num_macroblocks,
struct pipe_macroblock *macroblocks);
/**
- * unmap buffer before flushing
+ * unmap decoder buffer before flushing
*/
- void (*unmap)(struct pipe_video_buffer *buffer);
+ void (*unmap)(struct pipe_video_decode_buffer *decbuf);
+};
+
+/**
+ * output for decoding / input for displaying
+ */
+struct pipe_video_buffer
+{
+ struct pipe_video_context *context;
+
+ enum pipe_format buffer_format;
+ enum pipe_video_chroma_format chroma_format;
+ unsigned width;
+ unsigned height;
/**
- * flush buffer to video hardware
+ * destroy this video buffer
*/
- void (*flush)(struct pipe_video_buffer *buffer,
- struct pipe_video_buffer *ref_frames[2],
- struct pipe_fence_handle **fence);
+ void (*destroy)(struct pipe_video_buffer *buffer);
+ /**
+ * get a individual sampler view for each plane
+ */
+ struct pipe_sampler_view **(*get_sampler_views)(struct pipe_video_buffer *buffer);
- void (*get_sampler_views)(struct pipe_video_buffer *buffer,
- struct pipe_sampler_view *sampler_views[3]);
+ /**
+ * get a individual surfaces for each plane
+ */
+ struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
};
+/**
+ * composing and displaying of image data
+ */
struct pipe_video_compositor
{
- struct pipe_video_context* context;
+ struct pipe_video_context *context;
/**
* destroy this compositor
diff --git a/src/gallium/state_trackers/xorg/xvmc/context.c b/src/gallium/state_trackers/xorg/xvmc/context.c
index bdcba72d7cf..2690f8046a8 100644
--- a/src/gallium/state_trackers/xorg/xvmc/context.c
+++ b/src/gallium/state_trackers/xorg/xvmc/context.c
@@ -231,9 +231,7 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id,
return BadAlloc;
}
- vctx = vl_video_create(vscreen, ProfileToPipe(mc_type),
- FormatToPipe(chroma_format), width, height);
-
+ vctx = vl_video_create(vscreen);
if (!vctx) {
XVMC_MSG(XVMC_ERR, "[XvMC] Could not create VL context.\n");
vl_screen_destroy(vscreen);
@@ -241,9 +239,23 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id,
return BadAlloc;
}
+ context_priv->decoder = vctx->vpipe->create_decoder(vctx->vpipe,
+ ProfileToPipe(mc_type),
+ FormatToPipe(chroma_format),
+ width, height);
+
+ if (!context_priv->decoder) {
+ XVMC_MSG(XVMC_ERR, "[XvMC] Could not create VL decoder.\n");
+ vl_video_destroy(vctx);
+ vl_screen_destroy(vscreen);
+ FREE(context_priv);
+ return BadAlloc;
+ }
+
context_priv->compositor = vctx->vpipe->create_compositor(vctx->vpipe);
if (!context_priv->compositor) {
XVMC_MSG(XVMC_ERR, "[XvMC] Could not create VL compositor.\n");
+ context_priv->decoder->destroy(context_priv->decoder);
vl_video_destroy(vctx);
vl_screen_destroy(vscreen);
FREE(context_priv);
@@ -295,6 +307,7 @@ Status XvMCDestroyContext(Display *dpy, XvMCContext *context)
context_priv = context->privData;
vctx = context_priv->vctx;
vscreen = vctx->vscreen;
+ context_priv->decoder->destroy(context_priv->decoder);
context_priv->compositor->destroy(context_priv->compositor);
vl_video_destroy(vctx);
vl_screen_destroy(vscreen);
diff --git a/src/gallium/state_trackers/xorg/xvmc/surface.c b/src/gallium/state_trackers/xorg/xvmc/surface.c
index efbebaa4ccf..ba55d9ddb22 100644
--- a/src/gallium/state_trackers/xorg/xvmc/surface.c
+++ b/src/gallium/state_trackers/xorg/xvmc/surface.c
@@ -151,10 +151,13 @@ static void
unmap_and_flush_surface(XvMCSurfacePrivate *surface)
{
struct pipe_video_buffer *ref_frames[2];
+ XvMCContextPrivate *context_priv;
unsigned i;
assert(surface);
+ context_priv = surface->context->privData;
+
for ( i = 0; i < 2; ++i ) {
if (surface->ref_surfaces[i]) {
XvMCSurfacePrivate *ref = surface->ref_surfaces[i]->privData;
@@ -163,17 +166,18 @@ unmap_and_flush_surface(XvMCSurfacePrivate *surface)
unmap_and_flush_surface(ref);
surface->ref_surfaces[i] = NULL;
- ref_frames[i] = ref->pipe_buffer;
+ ref_frames[i] = ref->video_buffer;
} else {
ref_frames[i] = NULL;
}
}
if (surface->mapped) {
- surface->pipe_buffer->unmap(surface->pipe_buffer);
- surface->pipe_buffer->flush(surface->pipe_buffer,
- ref_frames,
- &surface->flush_fence);
+ surface->decode_buffer->unmap(surface->decode_buffer);
+ context_priv->decoder->flush_buffer(surface->decode_buffer,
+ ref_frames,
+ surface->video_buffer,
+ &surface->flush_fence);
surface->mapped = 0;
}
}
@@ -201,7 +205,11 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
if (!surface_priv)
return BadAlloc;
- surface_priv->pipe_buffer = vpipe->create_buffer(vpipe);
+ surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder);
+ surface_priv->video_buffer = vpipe->create_buffer(vpipe, PIPE_FORMAT_YV12, //TODO
+ context_priv->decoder->chroma_format,
+ context_priv->decoder->width,
+ context_priv->decoder->height);
surface_priv->context = context;
surface->surface_id = XAllocID(dpy);
@@ -226,7 +234,7 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
)
{
struct pipe_video_context *vpipe;
- struct pipe_video_buffer *t_buffer;
+ struct pipe_video_decode_buffer *t_buffer;
XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
XvMCSurfacePrivate *past_surface_priv;
@@ -274,7 +282,7 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
context_priv = context->privData;
vpipe = context_priv->vctx->vpipe;
- t_buffer = target_surface_priv->pipe_buffer;
+ t_buffer = target_surface_priv->decode_buffer;
// enshure that all reference frames are flushed
// not really nessasary, but speeds ups rendering
@@ -395,7 +403,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
unmap_and_flush_surface(surface_priv);
compositor->clear_layers(compositor);
- compositor->set_buffer_layer(compositor, 0, surface_priv->pipe_buffer, &src_rect, NULL);
+ compositor->set_buffer_layer(compositor, 0, surface_priv->video_buffer, &src_rect, NULL);
if (subpicture_priv) {
struct pipe_video_rect src_rect = {surface_priv->subx, surface_priv->suby, surface_priv->subw, surface_priv->subh};
@@ -471,7 +479,8 @@ Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
return XvMCBadSurface;
surface_priv = surface->privData;
- surface_priv->pipe_buffer->destroy(surface_priv->pipe_buffer);
+ surface_priv->decode_buffer->destroy(surface_priv->decode_buffer);
+ surface_priv->video_buffer->destroy(surface_priv->video_buffer);
FREE(surface_priv);
surface->privData = NULL;
diff --git a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
index 29518b36dbf..9a5338c2923 100644
--- a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
+++ b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
@@ -42,6 +42,7 @@ struct pipe_fence_handle;
typedef struct
{
struct vl_context *vctx;
+ struct pipe_video_decoder *decoder;
struct pipe_video_compositor *compositor;
unsigned short subpicture_max_width;
@@ -50,7 +51,9 @@ typedef struct
typedef struct
{
- struct pipe_video_buffer *pipe_buffer;
+ struct pipe_video_decode_buffer *decode_buffer;
+ struct pipe_video_buffer *video_buffer;
+
bool mapped; // are we still mapped to memory?
XvMCSurface *ref_surfaces[2];
diff --git a/src/gallium/winsys/g3dvl/dri/dri_winsys.c b/src/gallium/winsys/g3dvl/dri/dri_winsys.c
index 8588ddd17cb..ffb94de4a7b 100644
--- a/src/gallium/winsys/g3dvl/dri/dri_winsys.c
+++ b/src/gallium/winsys/g3dvl/dri/dri_winsys.c
@@ -232,10 +232,7 @@ void vl_screen_destroy(struct vl_screen *vscreen)
}
struct vl_context*
-vl_video_create(struct vl_screen *vscreen,
- enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height)
+vl_video_create(struct vl_screen *vscreen)
{
struct vl_dri_screen *vl_dri_scrn = (struct vl_dri_screen*)vscreen;
struct vl_dri_context *vl_dri_ctx;
@@ -251,10 +248,7 @@ vl_video_create(struct vl_screen *vscreen,
goto no_vpipe;
}
- vl_dri_ctx->base.vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen,
- profile, chroma_format,
- width, height,
- vl_dri_ctx);
+ vl_dri_ctx->base.vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, vl_dri_ctx);
if (!vl_dri_ctx->base.vpipe)
goto no_vpipe;
diff --git a/src/gallium/winsys/g3dvl/vl_winsys.h b/src/gallium/winsys/g3dvl/vl_winsys.h
index 58f548849f6..152a4a62292 100644
--- a/src/gallium/winsys/g3dvl/vl_winsys.h
+++ b/src/gallium/winsys/g3dvl/vl_winsys.h
@@ -53,10 +53,7 @@ vl_screen_create(Display *display, int screen);
void vl_screen_destroy(struct vl_screen *vscreen);
struct vl_context*
-vl_video_create(struct vl_screen *vscreen,
- enum pipe_video_profile profile,
- enum pipe_video_chroma_format chroma_format,
- unsigned width, unsigned height);
+vl_video_create(struct vl_screen *vscreen);
void vl_video_destroy(struct vl_context *vctx);