summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-11-04 11:57:09 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2011-11-04 13:11:14 +0000
commit34758895cdd93bd7671a78464e79b3891bca113d (patch)
treeb09712856d2cdc3a3c131bdc59d396a6fa8d2a20
parent5525691eb024f2a04b486652c24e0e34176fdd5e (diff)
sna: Ensure operations on a ShmPixmap are synchronous with clients
If we are rendering to or from a ShmPixmap, we need to be sure that the operation is complete prior to sending an XSync response to client in order to preserve mixed rendering coherency. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/kgem.c38
-rw-r--r--src/sna/kgem.h3
-rw-r--r--src/sna/sna.h7
-rw-r--r--src/sna/sna_accel.c21
-rw-r--r--src/sna/sna_blt.c9
5 files changed, 60 insertions, 18 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e6df36f5..310f8b59 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -509,8 +509,12 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
bo->exec = kgem_add_handle(kgem, bo);
bo->rq = kgem->next_request;
bo->gpu = true;
+
list_move(&bo->request, &kgem->next_request->buffers);
+
+ /* XXX is it worth working around gcc here?
kgem->flush |= bo->flush;
+ kgem->sync |= bo->sync;
}
static uint32_t kgem_end_batch(struct kgem *kgem)
@@ -766,7 +770,7 @@ static void kgem_finish_partials(struct kgem *kgem)
}
/* transfer the handle to a minimum bo */
- if (bo->base.refcnt == 1 && !bo->base.sync) {
+ if (bo->base.refcnt == 1 && !bo->base.vmap) {
struct kgem_bo *base = malloc(sizeof(*base));
if (base) {
memcpy(base, &bo->base, sizeof (*base));
@@ -1845,13 +1849,35 @@ void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write)
drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
bo->needs_flush = false;
- if (bo->gpu)
+ if (bo->gpu) {
+ kgem->sync = false;
kgem_retire(kgem);
+ }
bo->cpu_read = true;
if (for_write)
bo->cpu_write = true;
}
+void kgem_sync(struct kgem *kgem)
+{
+ if (!list_is_empty(&kgem->requests)) {
+ struct drm_i915_gem_set_domain set_domain;
+ struct kgem_request *rq;
+
+ rq = list_first_entry(&kgem->requests,
+ struct kgem_request,
+ list);
+ set_domain.handle = rq->bo->handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+
+ drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+ kgem_retire(kgem);
+ }
+
+ kgem->sync = false;
+}
+
void kgem_clear_dirty(struct kgem *kgem)
{
struct kgem_request *rq = kgem->next_request;
@@ -2023,7 +2049,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
bo->need_io = true;
} else {
__kgem_bo_init(&bo->base, handle, alloc);
- bo->base.sync = true;
+ bo->base.vmap = true;
bo->need_io = 0;
}
bo->base.reusable = false;
@@ -2161,10 +2187,10 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
bo = (struct kgem_partial_bo *)_bo;
- DBG(("%s(offset=%d, length=%d, sync=%d)\n", __FUNCTION__,
- offset, length, bo->base.sync));
+ DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
+ offset, length, bo->base.vmap));
- if (!bo->base.sync) {
+ if (!bo->base.vmap) {
gem_read(kgem->fd, bo->base.handle,
(char *)(bo+1)+offset, length);
bo->base.needs_flush = false;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0453ac51..70a58106 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -72,6 +72,7 @@ struct kgem_bo {
uint32_t needs_flush : 1;
uint32_t cpu_read : 1;
uint32_t cpu_write : 1;
+ uint32_t vmap : 1;
uint32_t flush : 1;
uint32_t sync : 1;
uint32_t deleted : 1;
@@ -110,6 +111,7 @@ struct kgem {
uint16_t nfence;
uint32_t flush:1;
+ uint32_t sync:1;
uint32_t need_expire:1;
uint32_t need_purge:1;
uint32_t need_retire:1;
@@ -334,6 +336,7 @@ static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
}
void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write);
+void kgem_sync(struct kgem *kgem);
#define KGEM_BUFFER_WRITE 0x1
#define KGEM_BUFFER_LAST 0x2
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 1097d74e..7a7db453 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -520,7 +520,7 @@ static inline struct kgem_bo *pixmap_vmap(struct kgem *kgem, PixmapPtr pixmap)
{
struct sna_pixmap *priv;
- if (kgem->wedged)
+ if (unlikely(kgem->wedged))
return NULL;
priv = sna_pixmap_attach(pixmap);
@@ -532,8 +532,11 @@ static inline struct kgem_bo *pixmap_vmap(struct kgem *kgem, PixmapPtr pixmap)
pixmap->devPrivate.ptr,
pixmap_size(pixmap),
0);
- if (priv->cpu_bo)
+ if (priv->cpu_bo) {
priv->cpu_bo->pitch = pixmap->devKind;
+ if (pixmap->usage_hint == CREATE_PIXMAP_USAGE_SCRATCH_HEADER)
+ priv->cpu_bo->sync = true;
+ }
}
return priv->cpu_bo;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9e1f75fe..48d3d6a4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -188,8 +188,7 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
if (priv->cpu_bo) {
- if (pixmap->usage_hint != CREATE_PIXMAP_USAGE_SCRATCH_HEADER &&
- kgem_bo_is_busy(priv->cpu_bo)) {
+ if (kgem_bo_is_busy(priv->cpu_bo)) {
list_add_tail(&priv->list, &sna->deferred_free);
return false;
}
@@ -7965,7 +7964,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
{
struct sna *sna = user_data;
- if (sna->kgem.flush == 0 && list_is_empty(&sna->dirty_pixmaps))
+ if ((sna->kgem.sync|sna->kgem.flush) == 0 &&
+ list_is_empty(&sna->dirty_pixmaps))
return;
DBG(("%s\n", __FUNCTION__));
@@ -7979,6 +7979,21 @@ sna_accel_flush_callback(CallbackListPtr *list,
}
kgem_submit(&sna->kgem);
+
+ if (sna->kgem.sync) {
+ kgem_sync(&sna->kgem);
+
+ while (!list_is_empty(&sna->deferred_free)) {
+ struct sna_pixmap *priv =
+ list_first_entry(&sna->deferred_free,
+ struct sna_pixmap,
+ list);
+ list_del(&priv->list);
+ kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
+ fbDestroyPixmap(priv->pixmap);
+ free(priv);
+ }
+ }
}
static void sna_deferred_free(struct sna *sna)
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 5420712f..9ea3efb9 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1120,13 +1120,8 @@ prepare_blt_put(struct sna *sna,
if (priv) {
if (!priv->gpu_only) {
src_bo = priv->cpu_bo;
- if (!src_bo) {
- src_bo = kgem_create_map(&sna->kgem,
- src->devPrivate.ptr,
- pixmap_size(src),
- 1);
- priv->cpu_bo = src_bo;
- }
+ if (!src_bo)
+ src_bo = pixmap_vmap(&sna->kgem, src);
}
} else {
src_bo = kgem_create_map(&sna->kgem,