summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-01-15 15:35:57 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-01-15 15:35:57 +0000
commit37ced44a53008debaf869ec9ef4ba2e5d6982e76 (patch)
tree41eb0a19712d2b5f48f3fd44265f1812859eb7b3
parente3732a6f7f61a959521be9a668bba045591e633c (diff)
sna: Be a little more lenient wrt damage migration if we have CPU bo
The idea being that they facilitate copying to and from the CPU, but also we want to avoid stalling on any pixels help by the CPU bo. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/sna_accel.c42
1 files changed, 34 insertions, 8 deletions
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 932eef06..b9e8caf1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1505,15 +1505,32 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable,
return FALSE;
}
+ if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+ *damage = NULL;
+ return TRUE;
+ }
+
if (DAMAGE_IS_ALL(priv->cpu_damage))
return FALSE;
- if (priv->gpu_bo == NULL &&
- (sna_pixmap_choose_tiling(pixmap) == I915_TILING_NONE ||
- (priv->cpu_damage && !box_inplace(pixmap, box)) ||
- !sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ))) {
- DBG(("%s: no GPU bo allocated\n", __FUNCTION__));
- return FALSE;
+ if (priv->gpu_bo == NULL) {
+ if (sna_pixmap_choose_tiling(pixmap) == I915_TILING_NONE) {
+ DBG(("%s: untiled, will not force allocation\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+
+ if (priv->cpu_damage && !box_inplace(pixmap, box)) {
+ DBG(("%s: damaged with a small operation, will not force allocation\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+
+ if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ))
+ return FALSE;
+
+ DBG(("%s: allocated GPU bo for operation\n", __FUNCTION__));
+ goto done;
}
get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -1536,13 +1553,20 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable,
return TRUE;
}
- if (ret != PIXMAN_REGION_OUT && kgem_bo_is_busy(priv->gpu_bo)) {
+ if (ret != PIXMAN_REGION_OUT &&
+ (priv->cpu_bo || kgem_bo_is_busy(priv->gpu_bo))) {
DBG(("%s: region partially contained within busy GPU damage\n",
__FUNCTION__));
goto move_to_gpu;
}
}
+ if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
+ DBG(("%s: busy CPU bo, prefer to use GPU\n",
+ __FUNCTION__));
+ goto move_to_gpu;
+ }
+
if (priv->cpu_damage) {
int ret = sna_damage_contains_box(priv->cpu_damage, &extents);
if (ret == PIXMAN_REGION_IN) {
@@ -1556,7 +1580,8 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable,
goto move_to_gpu;
}
- if (ret != PIXMAN_REGION_OUT && !kgem_bo_is_busy(priv->gpu_bo)) {
+ if (ret != PIXMAN_REGION_OUT &&
+ (priv->cpu_bo || !kgem_bo_is_busy(priv->gpu_bo))) {
DBG(("%s: region partially contained within idle CPU damage\n",
__FUNCTION__));
return FALSE;
@@ -1569,6 +1594,7 @@ move_to_gpu:
return FALSE;
}
+done:
*damage = DAMAGE_IS_ALL(priv->gpu_damage) ? NULL : &priv->gpu_damage;
return TRUE;
}