summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2013-05-16 09:27:11 -0700
committerEric Anholt <eric@anholt.net>2013-05-16 14:24:07 -0700
commit4fce57359f5ca202b0addb1a1c80c15273bfc20e (patch)
tree9a66bdf7ac328fa6778fd988f779e82e8188664b
parent76396d296a6483113d701efaee00959ff005c0c3 (diff)
intel: Add batchbuffer setup support.batch
-rw-r--r--intel/Makefile.am1
-rw-r--r--intel/intel_batchbuffer.c251
-rw-r--r--intel/intel_bufmgr.h22
3 files changed, 274 insertions, 0 deletions
diff --git a/intel/Makefile.am b/intel/Makefile.am
index f49b0998..695fd5c7 100644
--- a/intel/Makefile.am
+++ b/intel/Makefile.am
@@ -40,6 +40,7 @@ libdrm_intel_la_LIBADD = ../libdrm.la \
@CLOCK_LIB@
libdrm_intel_la_SOURCES = \
+ intel_batchbuffer.c \
intel_bufmgr.c \
intel_bufmgr_priv.h \
intel_bufmgr_fake.c \
diff --git a/intel/intel_batchbuffer.c b/intel/intel_batchbuffer.c
new file mode 100644
index 00000000..893a7875
--- /dev/null
+++ b/intel/intel_batchbuffer.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <pthread.h>
+#include "i915_drm.h"
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+
+struct drm_intel_gem_batch {
+ struct drm_intel_batch base;
+ int size;
+ drm_intel_context *hw_ctx;
+
+ int saved_used;
+ int saved_reloc_count;
+
+ bool dump;
+};
+
+static void
+drm_intel_batch_next(struct drm_intel_gem_batch *batch)
+{
+ drm_intel_bufmgr *bufmgr = batch->base.bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ batch->base.bo = drm_intel_bo_alloc(bufmgr, "batchbuffer",
+ batch->size, 4096);
+
+ if (bufmgr_gem->has_llc) {
+ drm_intel_bo_map(batch->base.bo, true);
+ batch->base.map = batch->base.bo->virtual;
+ }
+
+ batch->base.used = 0;
+}
+
+struct drm_intel_batch *
+drm_intel_batch_init(drm_intel_bufmgr *bufmgr, uint32_t hw_ctx)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ struct drm_intel_gem_batch *batch;
+
+ batch = calloc(1, sizeof(struct drm_intel_gem_batch));
+ if (!batch)
+ return NULL;
+
+ batch->base.bufmgr = bufmgr;
+ batch->base.type = I915_EXEC_RENDER;
+
+ if (bufmgr_gem->gen < 4)
+ batch->size = 4096;
+ else
+ batch->size = 32768;
+
+ /* If we don't have cache coherency for batchbuffers and have
+ * to clflush between a CPU map and GPU use, then just
+ * allocate a local buffer and upload it through pwrite at
+ * batch exec tie. We can't use a direct GTT mapping because
+ * batchbuffer writes are too spread out over time and the CPU
+ * write combining buffers don't successfully merge the
+ * writes.
+ */
+ if (!bufmgr_gem->has_llc)
+ batch->base.map = malloc(batch->size);
+
+ drm_intel_batch_next(batch);
+
+ return &batch->base;
+}
+
+void
+drm_intel_batch_set_checkpoint(struct drm_intel_batch *base_batch)
+{
+ struct drm_intel_gem_batch *batch =
+ (struct drm_intel_gem_batch *)base_batch;
+
+ batch->saved_used = batch->base.used;
+ batch->saved_reloc_count =
+ drm_intel_gem_bo_get_reloc_count(batch->base.bo);
+}
+
+void
+drm_intel_batch_reset_to_checkpoint(struct drm_intel_batch *base_batch)
+{
+ struct drm_intel_gem_batch *batch =
+ (struct drm_intel_gem_batch *)base_batch;
+
+ drm_intel_gem_bo_clear_relocs(batch->base.bo,
+ batch->saved_reloc_count);
+
+ batch->base.used = batch->saved_used;
+}
+
+void
+drm_intel_batch_free(struct drm_intel_batch *base_batch)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *)base_batch->bufmgr;
+
+ if (bufmgr_gem->has_llc)
+ free(base_batch->map);
+ drm_intel_bo_unreference(base_batch->bo);
+ free(base_batch);
+}
+
+static void
+drm_intel_batch_dump(struct drm_intel_batch *base_batch)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *)base_batch->bufmgr;
+ struct drm_intel_gem_batch *batch =
+ (struct drm_intel_gem_batch *)base_batch;
+ struct drm_intel_decode *decode;
+ int ret;
+
+ decode = drm_intel_decode_context_alloc(bufmgr_gem->pci_device);
+ if (!decode)
+ return;
+
+ ret = drm_intel_bo_map(batch->base.bo, false);
+ if (ret == 0) {
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->base.bo->virtual,
+ batch->base.bo->offset,
+ batch->base.used);
+ } else if (bufmgr_gem->has_llc) {
+ fprintf(stderr,
+ "WARNING: failed to map batchbuffer (%s), "
+ "dumping uploaded data instead.\n", strerror(ret));
+
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->base.map,
+ batch->base.bo->offset,
+ batch->base.used);
+ } else {
+ fprintf(stderr,
+ "WARNING: failed to map batchbuffer (%s)",
+ strerror(ret));
+ }
+
+ drm_intel_decode(decode);
+
+ drm_intel_decode_context_free(decode);
+
+ if (ret == 0)
+ drm_intel_bo_unmap(batch->base.bo);
+}
+
+int
+drm_intel_batch_exec(struct drm_intel_batch *base_batch)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *)base_batch->bufmgr;
+ struct drm_intel_gem_batch *batch =
+ (struct drm_intel_gem_batch *)base_batch;
+ int ret = 0;
+
+ if (batch->base.used == 0)
+ return 0;
+
+ if (bufmgr_gem->has_llc) {
+ drm_intel_bo_unmap(batch->base.bo);
+ } else {
+ ret = drm_intel_bo_subdata(batch->base.bo,
+ 0,
+ batch->base.used * 4,
+ batch->base.map);
+ }
+
+ if (ret == 0) {
+ if (batch->hw_ctx == NULL ||
+ batch->base.type != I915_EXEC_RENDER) {
+ ret = drm_intel_bo_mrb_exec(bo,
+ batch->base.used * 4,
+ NULL, 0, 0,
+ batch->base.type);
+ } else {
+ ret = drm_intel_gem_bo_context_exec(batch->base.bo,
+ batch->hw_ctx,
+ batch->used * 4,
+ batch->base.type);
+ }
+ }
+
+ if (ret != 0) {
+ fprintf(stderr, "intel_batch_exec failed: %s\n",
+ strerror(-ret));
+ }
+
+ if (batch->dump)
+ drm_intel_batch_dump(batch);
+
+ drm_intel_batchbuffer_next(intel);
+
+ return ret;
+}
+
+void
+drm_intel_reloc_with_batch(struct drm_intel_batch *batch,
+ struct drm_intel_bo *reloc_bo,
+ uint64_t reloc_offset,
+ struct drm_intel_bo *target_bo,
+ uint64_t target_offset,
+ uint32_t read_domain,
+ uint32_t write_domain)
+{
+ uint32_t predicted_offset;
+
+ predicted_offset = drm_intel_gem_bo_emit_reloc(reloc_bo,
+ reloc_offset,
+ target_bo,
+ target_offset,
+ read_domain,
+ write_domain);
+
+ if (reloc_bo == batch->base.bo) {
+ /* Update our possibly local copy of the batch
+ * contents on non-LLC systems.
+ */
+ *(uint32_t *)((void *)batch->map + reloc_offset) =
+ predicted_offset;
+ } else if (reloc_bo->virtual) {
+ *(uint32_t *)(reloc_bo->virtual + reloc_offset) =
+ predicted_offset;
+ } else {
+ drm_intel_bo_subdata(reloc_bo, reloc_offset, 4,
+ &predicted_offset);
+ }
+}
diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index 771b27d0..8cc56b2f 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -82,6 +82,14 @@ struct _drm_intel_bo {
int handle;
};
+struct drm_intel_batch {
+ drm_intel_bufmgr *bufmgr;
+ drm_intel_bo *bo;
+ uint32_t *map;
+ uint32_t type;
+ int used;
+};
+
enum aub_dump_bmp_format {
AUB_DUMP_BMP_FORMAT_8BIT = 1,
AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
@@ -241,6 +249,20 @@ int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result);
+struct drm_intel_batch *drm_intel_batch_init(drm_intel_bufmgr *bufmgr,
+ uint32_t hw_ctx);
+void drm_intel_batch_free(struct drm_intel_batch *batch);
+int drm_intel_batch_exec(struct drm_intel_batch *base_batch);
+void drm_intel_batch_set_checkpoint(struct drm_intel_batch *base_batch);
+void drm_intel_batch_reset_to_checkpoint(struct drm_intel_batch *base_batch);
+void drm_intel_reloc_with_batch(struct drm_intel_batch *batch,
+ drm_intel_bo *reloc_bo,
+ uint64_t reloc_offset,
+ drm_intel_bo *target_bo,
+ uint64_t target_offset,
+ uint32_t read_domain,
+ uint32_t write_domain);
+
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
*/