diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-11 16:39:45 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-12 02:16:49 +0000 |
commit | a3c42565a8f557b2e7f7ff7bfa45b13b606f2968 (patch) | |
tree | 13330caa23d7a116afcb43492930c593f397b03a | |
parent | c64a9d0683e047a7eb041df78db746f6dd387b5e (diff) |
sna: Store damage-all in the low bit of the damage pointer
Avoid the function call overhead by inspecting the low bit to see if it
is all-damaged already.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/gen3_render.c | 12 | ||||
-rw-r--r-- | src/sna/kgem.c | 7 | ||||
-rw-r--r-- | src/sna/kgem.h | 11 | ||||
-rw-r--r-- | src/sna/kgem_debug_gen6.c | 6 | ||||
-rw-r--r-- | src/sna/kgem_debug_gen7.c | 6 | ||||
-rw-r--r-- | src/sna/sna_accel.c | 532 | ||||
-rw-r--r-- | src/sna/sna_composite.c | 20 | ||||
-rw-r--r-- | src/sna/sna_damage.c | 68 | ||||
-rw-r--r-- | src/sna/sna_damage.h | 139 | ||||
-rw-r--r-- | src/sna/sna_display.c | 3 | ||||
-rw-r--r-- | src/sna/sna_dri.c | 3 | ||||
-rw-r--r-- | src/sna/sna_io.c | 17 | ||||
-rw-r--r-- | src/sna/sna_render.c | 6 | ||||
-rw-r--r-- | src/sna/sna_render_inline.h | 9 | ||||
-rw-r--r-- | src/sna/sna_video.c | 2 |
15 files changed, 528 insertions, 313 deletions
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c index f0255153..f5946c12 100644 --- a/src/sna/gen3_render.c +++ b/src/sna/gen3_render.c @@ -2261,12 +2261,13 @@ gen3_composite_set_target(struct sna *sna, get_drawable_deltas(dst->pDrawable, op->dst.pixmap, &op->dst.x, &op->dst.y); - DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d)\n", + DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n", __FUNCTION__, op->dst.pixmap, (int)op->dst.format, op->dst.width, op->dst.height, op->dst.bo->pitch, - op->dst.x, op->dst.y)); + op->dst.x, op->dst.y, + op->damage ? *op->damage : (void *)-1)); return TRUE; } @@ -3054,8 +3055,11 @@ gen3_render_composite_spans(struct sna *sna, if (gen3_composite_fallback(sna, op, src, NULL, dst)) return FALSE; - if (need_tiling(sna, width, height)) + if (need_tiling(sna, width, height)) { + DBG(("%s: fallback, operation (%dx%d) too wide for pipeline\n", + __FUNCTION__, width, height)); return FALSE; + } if (!gen3_composite_set_target(sna, &tmp->base, dst)) { DBG(("%s: unable to set render target\n", @@ -3913,7 +3917,7 @@ gen3_render_fill_boxes_try_blt(struct sna *sna, color->blue, color->alpha, format)) { - DBG(("%s: unknown format %x\n", __FUNCTION__, format)); + DBG(("%s: unknown format %lx\n", __FUNCTION__, format)); return FALSE; } } else { diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 7636387f..f5c3cfba 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -2568,13 +2568,13 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) } } -void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot) +void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) { void *ptr; assert(bo->refcnt); - assert(bo->exec == NULL); assert(!bo->purged); + assert(bo->exec == NULL); assert(list_is_empty(&bo->list)); if (IS_CPU_MAP(bo->map)) @@ -3083,6 +3083,9 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, stride = ALIGN(width, 2) * bpp >> 3; stride = ALIGN(stride, kgem->min_alignment); + DBG(("%s: %dx%d, %d bpp, stride=%d\n", + __FUNCTION__, width, height, bpp, stride)); + bo = kgem_create_buffer(kgem, stride * ALIGN(height, 2), flags, ret); if (bo == NULL) return NULL; diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 12e5c2fa..3186a991 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -344,7 +344,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem, uint32_t read_write_domains, uint32_t delta); -void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot); +void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo); void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo); void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo); @@ -358,12 +358,15 @@ int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo); static inline bool kgem_bo_is_mappable(struct kgem *kgem, struct kgem_bo *bo) { - DBG_HDR(("%s: offset: %d size: %d\n", - __FUNCTION__, bo->presumed_offset, bo->size)); + DBG_HDR(("%s: domain=%d, offset: %d size: %d\n", + __FUNCTION__, bo->domain, bo->presumed_offset, bo->size)); + + if (bo->domain == DOMAIN_GTT) + return true; if (kgem->gen < 40 && bo->tiling && bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1)) - return false; + return false; return bo->presumed_offset + bo->size <= kgem->aperture_mappable; } diff --git a/src/sna/kgem_debug_gen6.c b/src/sna/kgem_debug_gen6.c index d23e2d93..f748da6c 100644 --- a/src/sna/kgem_debug_gen6.c +++ b/src/sna/kgem_debug_gen6.c @@ -83,7 +83,7 @@ static void gen6_update_vertex_buffer(struct kgem *kgem, const uint32_t *data) if (bo->handle == reloc) break; assert(&bo->request != &kgem->next_request->buffers); - base = kgem_bo_map(kgem, bo, PROT_READ); + base = kgem_bo_map__debug(kgem, bo); } ptr = (char *)base + kgem->reloc[i].delta; @@ -118,7 +118,7 @@ static void gen6_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset) if (bo->handle == reloc) break; assert(&bo->request != &kgem->next_request->buffers); - base = kgem_bo_map(kgem, bo, PROT_READ); + base = kgem_bo_map__debug(kgem, bo); } ptr = (char *)base + (kgem->reloc[i].delta & ~1); } else { @@ -450,7 +450,7 @@ get_reloc(struct kgem *kgem, if (bo->handle == handle) break; assert(&bo->request != &kgem->next_request->buffers); - base = kgem_bo_map(kgem, bo, PROT_READ); + base = kgem_bo_map__debug(kgem, bo); r->bo = bo; r->base = base; } diff --git a/src/sna/kgem_debug_gen7.c b/src/sna/kgem_debug_gen7.c index c13e96f2..a7dbbf21 100644 --- a/src/sna/kgem_debug_gen7.c +++ b/src/sna/kgem_debug_gen7.c @@ -83,7 +83,7 @@ static void gen7_update_vertex_buffer(struct kgem *kgem, const uint32_t *data) if (bo->handle == reloc) break; assert(&bo->request != &kgem->next_request->buffers); - base = kgem_bo_map(kgem, bo, PROT_READ); + base = kgem_bo_map__debug(kgem, bo); } ptr = (char *)base + kgem->reloc[i].delta; @@ -118,7 +118,7 @@ static void gen7_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset) if (bo->handle == reloc) break; assert(&bo->request != &kgem->next_request->buffers); - base = kgem_bo_map(kgem, bo, PROT_READ); + base = kgem_bo_map__debug(kgem, bo); } ptr = (char *)base + (kgem->reloc[i].delta & ~1); } else { @@ -450,7 +450,7 @@ get_reloc(struct kgem *kgem, if (bo->handle == handle) break; assert(&bo->request != &kgem->next_request->buffers); - base = kgem_bo_map(kgem, bo, PROT_READ); + base = kgem_bo_map__debug(kgem, bo); r->bo = bo; r->base = base; } diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index ef0c5773..bc51112c 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -304,8 +304,8 @@ static inline uint32_t default_tiling(PixmapPtr pixmap) } if (sna_damage_is_all(&priv->cpu_damage, - pixmap->drawable.width, - pixmap->drawable.height)) { + pixmap->drawable.width, + pixmap->drawable.height)) { sna_damage_destroy(&priv->gpu_damage); return I915_TILING_Y; } @@ -539,10 +539,10 @@ sna_pixmap_create_scratch(ScreenPtr screen, if (tiling == I915_TILING_Y && !sna->have_render) tiling = I915_TILING_X; - if (tiling == I915_TILING_Y && - (width > sna->render.max_3d_size || - height > sna->render.max_3d_size)) - tiling = I915_TILING_X; + if (tiling == I915_TILING_Y && + (width > sna->render.max_3d_size || + height > sna->render.max_3d_size)) + tiling = I915_TILING_X; bpp = BitsPerPixel(depth); tiling = kgem_choose_tiling(&sna->kgem, tiling, width, height, bpp); @@ -636,9 +636,9 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen, width, height, depth, I915_TILING_X); #else - return create_pixmap(sna, screen, - width, height, depth, - usage); + return create_pixmap(sna, screen, + width, height, depth, + usage); #endif if (usage == SNA_CREATE_SCRATCH) @@ -746,9 +746,6 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags) priv->gpu_bo ? priv->gpu_bo->handle : 0, priv->gpu_damage)); - if (priv->cpu_damage && priv->cpu_damage->mode == DAMAGE_ALL) - goto done; - if ((flags & MOVE_READ) == 0) { assert(flags == MOVE_WRITE); @@ -768,8 +765,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags) } pixmap->devPrivate.ptr = - kgem_bo_map(&sna->kgem, priv->gpu_bo, - PROT_WRITE); + kgem_bo_map(&sna->kgem, priv->gpu_bo); if (pixmap->devPrivate.ptr == NULL) goto skip_inplace_map; @@ -797,7 +793,7 @@ skip_inplace_map: } } - if (priv->cpu_damage && priv->cpu_damage->mode == DAMAGE_ALL) { + if (DAMAGE_IS_ALL(priv->cpu_damage)) { DBG(("%s: CPU all-damaged\n", __FUNCTION__)); goto done; } @@ -837,7 +833,7 @@ skip_inplace_map: box, n); } - __sna_damage_destroy(priv->gpu_damage); + __sna_damage_destroy(DAMAGE_PTR(priv->gpu_damage)); priv->gpu_damage = NULL; } @@ -878,22 +874,50 @@ region_subsumes_drawable(RegionPtr region, DrawablePtr drawable) } static bool -region_subsumes_gpu_damage(const RegionRec *region, struct sna_pixmap *priv) +region_subsumes_damage(const RegionRec *region, struct sna_damage *damage) { - if (region->data) + const BoxRec *re, *de; + + DBG(("%s?\n", __FUNCTION__)); + assert(damage); + + re = ®ion->extents; + de = &DAMAGE_PTR(damage)->extents; + DBG(("%s: region (%d, %d), (%d, %d), extents (%d, %d), (%d, %d)\n", + __FUNCTION__, + re->x1, re->y1, re->x2, re->y2, + de->x1, de->y1, de->x2, de->y2)); + + if (re->x2 < de->x2 || re->x1 > de->x1 || + re->y2 < de->y2 || re->y1 > de->y1) { + DBG(("%s: no overlap\n", __FUNCTION__)); return false; + } - if (priv->gpu_damage) { - const BoxRec *extents = ®ion->extents; - const BoxRec *damage = &priv->gpu_damage->extents; - if (extents->x2 < damage->x2 || extents->x1 > damage->x1 || - extents->y2 < damage->y2 || extents->y1 > damage->y1) - return false; + if (region->data == NULL) { + DBG(("%s: singular region contains damage\n", __FUNCTION__)); + return true; } - return true; + return pixman_region_contains_rectangle((RegionPtr)region, + (BoxPtr)de) == PIXMAN_REGION_IN; } +#ifndef NDEBUG +static bool +pixmap_contains_damage(PixmapPtr pixmap, struct sna_damage *damage) +{ + if (damage == NULL) + return true; + + damage = DAMAGE_PTR(damage); + return damage->extents.x2 <= pixmap->drawable.width && + damage->extents.y2 <= pixmap->drawable.height && + damage->extents.x1 >= 0 && + damage->extents.y1 >= 0; +} +#endif + static bool sync_will_stall(struct kgem_bo *bo) { return kgem_bo_is_busy(bo); @@ -910,19 +934,20 @@ static inline bool region_inplace(struct sna *sna, if (wedged(sna)) return false; + if (priv->flush) { + DBG(("%s: exported via dri, will flush\n", __FUNCTION__)); + return true; + } + if (priv->mapped) { DBG(("%s: already mapped\n", __FUNCTION__)); return true; } - if (priv->cpu_damage) { - const BoxRec *extents = ®ion->extents; - const BoxRec *damage = &priv->cpu_damage->extents; - if (extents->x2 < damage->x2 || extents->x1 > damage->x1 || - extents->y2 < damage->y2 || extents->y1 > damage->y1) { - DBG(("%s: uncovered CPU damage pending\n", __FUNCTION__)); - return false; - } + if (priv->cpu_damage && + region_subsumes_damage(region, priv->cpu_damage)) { + DBG(("%s: uncovered CPU damage pending\n", __FUNCTION__)); + return false; } DBG(("%s: (%dx%d), inplace? %d\n", @@ -970,8 +995,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, struct sna_pixmap *priv; int16_t dx, dy; - DBG(("%s(pixmap=%p (%dx%d), [(%d, %d), (%d, %d)], flags=%d)\n", - __FUNCTION__, pixmap, + DBG(("%s(pixmap=%ld (%dx%d), [(%d, %d), (%d, %d)], flags=%d)\n", + __FUNCTION__, pixmap->drawable.serialNumber, pixmap->drawable.width, pixmap->drawable.height, RegionExtents(region)->x1, RegionExtents(region)->y1, RegionExtents(region)->x2, RegionExtents(region)->y2, @@ -998,7 +1023,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, return _sna_pixmap_move_to_cpu(pixmap, flags); } - if (priv->cpu_damage && priv->cpu_damage->mode == DAMAGE_ALL) + if (DAMAGE_IS_ALL(priv->cpu_damage)) goto done; if ((flags & MOVE_READ) == 0) { @@ -1012,8 +1037,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) { pixmap->devPrivate.ptr = - kgem_bo_map(&sna->kgem, priv->gpu_bo, - PROT_WRITE); + kgem_bo_map(&sna->kgem, priv->gpu_bo); if (pixmap->devPrivate.ptr == NULL) return false; @@ -1050,8 +1074,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, region_inplace(sna, pixmap, region, priv) && sna_pixmap_create_mappable_gpu(pixmap)) { pixmap->devPrivate.ptr = - kgem_bo_map(&sna->kgem, priv->gpu_bo, - PROT_WRITE); + kgem_bo_map(&sna->kgem, priv->gpu_bo); if (pixmap->devPrivate.ptr == NULL) return false; @@ -1104,11 +1127,14 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, pixmap, 0, 0, ®ion->extents, 1); } else { - RegionRec want, need, *r; + RegionRec want, *r = region; - r = region; - /* expand the region to move 32x32 pixel blocks at a time */ - if (priv->cpu_damage == NULL) { + /* Expand the region to move 32x32 pixel blocks at a + * time, as we assume that we will continue writing + * afterwards and so aim to coallesce subsequent + * reads. + */ + if (flags & MOVE_WRITE) { int n = REGION_NUM_RECTS(region), i; BoxPtr boxes = REGION_RECTS(region); BoxPtr blocks = malloc(sizeof(BoxRec) * REGION_NUM_RECTS(region)); @@ -1136,20 +1162,36 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, } } - pixman_region_init(&need); - if (sna_damage_intersect(priv->gpu_damage, r, &need)) { - BoxPtr box = REGION_RECTS(&need); - int n = REGION_NUM_RECTS(&need); - struct kgem_bo *dst_bo; - Bool ok = FALSE; - - dst_bo = NULL; - if (sna->kgem.gen >= 30) - dst_bo = priv->cpu_bo; - if (dst_bo) + if (region_subsumes_damage(r, priv->gpu_damage)) { + BoxPtr box = REGION_RECTS(&DAMAGE_PTR(priv->gpu_damage)->region); + int n = REGION_NUM_RECTS(&DAMAGE_PTR(priv->gpu_damage)->region); + Bool ok; + + ok = FALSE; + if (priv->cpu_bo && sna->kgem.gen >= 30) + ok = sna->render.copy_boxes(sna, GXcopy, + pixmap, priv->gpu_bo, 0, 0, + pixmap, priv->cpu_bo, 0, 0, + box, n); + if (!ok) + sna_read_boxes(sna, + priv->gpu_bo, 0, 0, + pixmap, 0, 0, + box, n); + + sna_damage_destroy(&priv->gpu_damage); + } else if (DAMAGE_IS_ALL(priv->gpu_damage) || + sna_damage_contains_box__no_reduce(priv->gpu_damage, + &r->extents)) { + BoxPtr box = REGION_RECTS(r); + int n = REGION_NUM_RECTS(r); + Bool ok; + + ok = FALSE; + if (priv->cpu_bo && sna->kgem.gen >= 30) ok = sna->render.copy_boxes(sna, GXcopy, pixmap, priv->gpu_bo, 0, 0, - pixmap, dst_bo, 0, 0, + pixmap, priv->cpu_bo, 0, 0, box, n); if (!ok) sna_read_boxes(sna, @@ -1158,7 +1200,30 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, box, n); sna_damage_subtract(&priv->gpu_damage, r); - RegionUninit(&need); + } else { + RegionRec need; + + pixman_region_init(&need); + if (sna_damage_intersect(priv->gpu_damage, r, &need)) { + BoxPtr box = REGION_RECTS(&need); + int n = REGION_NUM_RECTS(&need); + Bool ok = FALSE; + + ok = FALSE; + if (priv->cpu_bo && sna->kgem.gen >= 30) + ok = sna->render.copy_boxes(sna, GXcopy, + pixmap, priv->gpu_bo, 0, 0, + pixmap, priv->cpu_bo, 0, 0, + box, n); + if (!ok) + sna_read_boxes(sna, + priv->gpu_bo, 0, 0, + pixmap, 0, 0, + box, n); + + sna_damage_subtract(&priv->gpu_damage, r); + RegionUninit(&need); + } } if (r == &want) pixman_region_fini(&want); @@ -1171,7 +1236,7 @@ done: kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo); } - if (flags & MOVE_WRITE) { + if (flags & MOVE_WRITE && !DAMAGE_IS_ALL(priv->cpu_damage)) { DBG(("%s: applying cpu damage\n", __FUNCTION__)); assert_pixmap_contains_box(pixmap, RegionExtents(region)); sna_damage_add(&priv->cpu_damage, region); @@ -1202,11 +1267,10 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box) DBG(("%s()\n", __FUNCTION__)); - if (priv->gpu_damage && priv->gpu_damage->mode == DAMAGE_ALL) + if (DAMAGE_IS_ALL(priv->gpu_damage)) goto done; if (priv->gpu_bo == NULL) { - struct sna *sna = to_sna_from_pixmap(pixmap); unsigned flags; flags = 0; @@ -1234,16 +1298,15 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box) goto done; region_set(&r, box); - if (sna_damage_intersect(priv->cpu_damage, &r, &i)) { - int n = REGION_NUM_RECTS(&i); - struct kgem_bo *src_bo; - Bool ok = FALSE; + if (region_subsumes_damage(&r, priv->cpu_damage)) { + int n = REGION_NUM_RECTS(&DAMAGE_PTR(priv->cpu_damage)->region); + Bool ok; - box = REGION_RECTS(&i); - src_bo = priv->cpu_bo; - if (src_bo) + box = REGION_RECTS(&DAMAGE_PTR(priv->cpu_damage)->region); + ok = FALSE; + if (priv->cpu_bo) ok = sna->render.copy_boxes(sna, GXcopy, - pixmap, src_bo, 0, 0, + pixmap, priv->cpu_bo, 0, 0, pixmap, priv->gpu_bo, 0, 0, box, n); if (!ok) { @@ -1267,6 +1330,46 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box) } } + sna_damage_destroy(&priv->cpu_damage); + list_del(&priv->list); + } else if (DAMAGE_IS_ALL(priv->gpu_damage) || + sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) { + Bool ok = FALSE; + if (priv->cpu_bo) + ok = sna->render.copy_boxes(sna, GXcopy, + pixmap, priv->cpu_bo, 0, 0, + pixmap, priv->gpu_bo, 0, 0, + box, 1); + if (!ok) + sna_write_boxes(sna, pixmap, + priv->gpu_bo, 0, 0, + pixmap->devPrivate.ptr, + pixmap->devKind, + 0, 0, + box, 1); + + sna_damage_subtract(&priv->cpu_damage, &r); + if (priv->cpu_damage == NULL) + list_del(&priv->list); + } else if (sna_damage_intersect(priv->cpu_damage, &r, &i)) { + int n = REGION_NUM_RECTS(&i); + Bool ok; + + box = REGION_RECTS(&i); + ok = FALSE; + if (priv->cpu_bo) + ok = sna->render.copy_boxes(sna, GXcopy, + pixmap, priv->cpu_bo, 0, 0, + pixmap, priv->gpu_bo, 0, 0, + box, n); + if (!ok) + sna_write_boxes(sna, pixmap, + priv->gpu_bo, 0, 0, + pixmap->devPrivate.ptr, + pixmap->devKind, + 0, 0, + box, n); + sna_damage_subtract(&priv->cpu_damage, &r); RegionUninit(&i); @@ -1302,6 +1405,9 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable, return FALSE; } + if (DAMAGE_IS_ALL(priv->cpu_damage)) + return FALSE; + if (priv->gpu_bo == NULL && (sna_pixmap_choose_tiling(pixmap) == I915_TILING_NONE || (priv->cpu_damage && !box_inplace(pixmap, box)) || @@ -1363,7 +1469,7 @@ move_to_gpu: return FALSE; } - *damage = &priv->gpu_damage; + *damage = DAMAGE_IS_ALL(priv->gpu_damage) ? NULL : &priv->gpu_damage; return TRUE; } @@ -1395,11 +1501,14 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable, BoxRec extents; int16_t dx, dy; - if (priv == NULL) - return FALSE; - if (priv->cpu_bo == NULL) + if (priv == NULL || priv->cpu_bo == NULL) return FALSE; + if (DAMAGE_IS_ALL(priv->cpu_damage)) { + *damage = NULL; + return TRUE; + } + get_drawable_deltas(drawable, pixmap, &dx, &dy); extents = *box; @@ -1408,18 +1517,9 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable, extents.y1 += dy; extents.y2 += dy; - if (priv->gpu_damage == NULL) - goto done; - - if (sna_damage_contains_box(priv->gpu_damage, - &extents) != PIXMAN_REGION_OUT) - return FALSE; - -done: *damage = &priv->cpu_damage; if (priv->cpu_damage && - (priv->cpu_damage->mode == DAMAGE_ALL || - sna_damage_contains_box__no_reduce(priv->cpu_damage, &extents))) + sna_damage_contains_box__no_reduce(priv->cpu_damage, &extents)) *damage = NULL; return TRUE; @@ -1528,20 +1628,20 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags) /* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */ if (priv->gpu_bo == NULL) { struct sna *sna = to_sna_from_pixmap(pixmap); - unsigned flags; + unsigned mode; - flags = 0; + mode = 0; if (priv->cpu_damage) - flags |= CREATE_INACTIVE; + mode |= CREATE_INACTIVE; if (pixmap->usage_hint == SNA_CREATE_FB) - flags |= CREATE_EXACT | CREATE_SCANOUT; + mode |= CREATE_EXACT | CREATE_SCANOUT; priv->gpu_bo = kgem_create_2d(&sna->kgem, pixmap->drawable.width, pixmap->drawable.height, pixmap->drawable.bitsPerPixel, sna_pixmap_choose_tiling(pixmap), - flags); + mode); if (priv->gpu_bo == NULL) return NULL; @@ -1585,12 +1685,14 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) if (priv == NULL) return NULL; - if (priv->gpu_damage && priv->gpu_damage->mode == DAMAGE_ALL) + if (DAMAGE_IS_ALL(priv->gpu_damage)) goto done; + if ((flags & MOVE_READ) == 0) + sna_damage_destroy(&priv->cpu_damage); + sna_damage_reduce(&priv->cpu_damage); DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL)); - if (priv->gpu_bo == NULL) { if (!wedged(sna)) priv->gpu_bo = @@ -1620,9 +1722,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) } } - if ((flags & MOVE_READ) == 0) - sna_damage_destroy(&priv->cpu_damage); - if (priv->cpu_damage == NULL) goto done; @@ -1630,13 +1729,14 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) n = sna_damage_get_boxes(priv->cpu_damage, &box); if (n) { - struct kgem_bo *src_bo; - Bool ok = FALSE; + Bool ok; - src_bo = priv->cpu_bo; - if (src_bo) + assert(pixmap_contains_damage(pixmap, priv->cpu_damage)); + + ok = FALSE; + if (priv->cpu_bo) ok = sna->render.copy_boxes(sna, GXcopy, - pixmap, src_bo, 0, 0, + pixmap, priv->cpu_bo, 0, 0, pixmap, priv->gpu_bo, 0, 0, box, n); if (!ok) { @@ -1660,7 +1760,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) } } - __sna_damage_destroy(priv->cpu_damage); + __sna_damage_destroy(DAMAGE_PTR(priv->cpu_damage)); priv->cpu_damage = NULL; list_del(&priv->list); @@ -1927,6 +2027,26 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, return ok; } +static bool upload_inplace(struct sna *sna, + PixmapPtr pixmap, + struct sna_pixmap *priv, + RegionRec *region) +{ + if (!region_inplace(sna, pixmap, region, priv)) + return false; + + if (priv->gpu_bo) { + if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) + return true; + + if (!priv->pinned && + region_subsumes_drawable(region, &pixmap->drawable)) + return true; + } + + return priv->gpu_bo == NULL && priv->cpu_bo == NULL; +} + static Bool sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, int x, int y, int w, int h, char *bits, int stride) @@ -1952,36 +2072,32 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, goto blt; } - if (!priv->pinned && priv->gpu_bo && - region_subsumes_gpu_damage(region, priv) && - kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) - sna_pixmap_free_gpu(sna, priv); - /* XXX performing the upload inplace is currently about 20x slower * for putimage10 on gen6 -- mostly due to slow page faulting in kernel. * So we try again with vma caching and only for pixmaps who will be * immediately flushed... */ - if ((priv->flush || - (region_inplace(sna, pixmap, region, priv) && - ((priv->gpu_bo == NULL && priv->cpu_bo == NULL) || - (priv->gpu_bo != NULL && !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo))))) && + if (upload_inplace(sna, pixmap, priv, region) && sna_put_image_upload_blt(drawable, gc, region, x, y, w, h, bits, stride)) { - if (region_subsumes_drawable(region, &pixmap->drawable)) { - sna_damage_destroy(&priv->cpu_damage); - sna_damage_all(&priv->gpu_damage, - pixmap->drawable.width, - pixmap->drawable.height); - } else { - sna_damage_subtract(&priv->cpu_damage, region); - sna_damage_add(&priv->gpu_damage, region); + if (!DAMAGE_IS_ALL(priv->gpu_damage)) { + if (region_subsumes_drawable(region, &pixmap->drawable)) + sna_damage_destroy(&priv->cpu_damage); + else + sna_damage_subtract(&priv->cpu_damage, region); + if (priv->cpu_damage == NULL) + sna_damage_all(&priv->gpu_damage, + pixmap->drawable.width, + pixmap->drawable.height); + else + sna_damage_add(&priv->gpu_damage, region); } /* And mark as having a valid GTT mapping for future uploads */ - if (priv->stride) { + if (priv->stride && + !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) { pixmap->devPrivate.ptr = - kgem_bo_map(&sna->kgem, priv->gpu_bo, PROT_WRITE); + kgem_bo_map(&sna->kgem, priv->gpu_bo); if (pixmap->devPrivate.ptr) { priv->mapped = 1; pixmap->devKind = priv->gpu_bo->pitch; @@ -2005,14 +2121,17 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, if (priv->cpu_bo->vmap) { if (sna_put_image_upload_blt(drawable, gc, region, x, y, w, h, bits, stride)) { - if (region_subsumes_drawable(region, &pixmap->drawable)) { - sna_damage_destroy(&priv->cpu_damage); - sna_damage_all(&priv->gpu_damage, - pixmap->drawable.width, - pixmap->drawable.height); - } else { - sna_damage_subtract(&priv->cpu_damage, region); - sna_damage_add(&priv->gpu_damage, region); + if (!DAMAGE_IS_ALL(priv->gpu_damage)) { + if (region_subsumes_drawable(region, &pixmap->drawable)) + sna_damage_destroy(&priv->cpu_damage); + else + sna_damage_subtract(&priv->cpu_damage, region); + if (priv->cpu_damage == NULL) + sna_damage_all(&priv->gpu_damage, + pixmap->drawable.width, + pixmap->drawable.height); + else + sna_damage_add(&priv->gpu_damage, region); } return true; @@ -2043,26 +2162,27 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, !sna_pixmap_alloc_cpu(sna, pixmap, priv, false)) return true; - if (region_subsumes_drawable(region, &pixmap->drawable)) { - DBG(("%s: replacing entire pixmap\n", __FUNCTION__)); - sna_damage_all(&priv->cpu_damage, - pixmap->drawable.width, - pixmap->drawable.height); - sna_pixmap_free_gpu(sna, priv); - } else { - sna_damage_subtract(&priv->gpu_damage, region); - sna_damage_add(&priv->cpu_damage, region); - if (priv->gpu_bo && - sna_damage_is_all(&priv->cpu_damage, - pixmap->drawable.width, - pixmap->drawable.height)) { - DBG(("%s: replaced entire pixmap\n", __FUNCTION__)); + if (!DAMAGE_IS_ALL(priv->cpu_damage)) { + if (region_subsumes_drawable(region, &pixmap->drawable)) { + DBG(("%s: replacing entire pixmap\n", __FUNCTION__)); + sna_damage_all(&priv->cpu_damage, + pixmap->drawable.width, + pixmap->drawable.height); sna_pixmap_free_gpu(sna, priv); + } else { + sna_damage_subtract(&priv->gpu_damage, region); + sna_damage_add(&priv->cpu_damage, region); + if (priv->gpu_bo && + sna_damage_is_all(&priv->cpu_damage, + pixmap->drawable.width, + pixmap->drawable.height)) { + DBG(("%s: replaced entire pixmap\n", __FUNCTION__)); + sna_pixmap_free_gpu(sna, priv); + } } + if (priv->flush) + list_move(&priv->list, &sna->dirty_pixmaps); } - if (priv->flush) - list_move(&priv->list, &sna->dirty_pixmaps); - priv->source_count = SOURCE_BIAS; blt: get_drawable_deltas(drawable, pixmap, &dx, &dy); @@ -2117,7 +2237,7 @@ static inline uint8_t blt_depth(int depth) static Bool sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, - int x, int y, int w, int h, char *bits) + int x, int y, int w, int h, char *bits) { PixmapPtr pixmap = get_drawable_pixmap(drawable); struct sna *sna = to_sna_from_pixmap(pixmap); @@ -2236,7 +2356,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, static Bool sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, - int x, int y, int w, int h, int left,char *bits) + int x, int y, int w, int h, int left,char *bits) { PixmapPtr pixmap = get_drawable_pixmap(drawable); struct sna *sna = to_sna_from_pixmap(pixmap); @@ -2535,7 +2655,8 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, goto fallback; } - sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty); + if (!DAMAGE_IS_ALL(priv->gpu_damage)) + sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty); } else { FbBits *dst_bits, *src_bits; int stride, bpp; @@ -2593,14 +2714,11 @@ static bool copy_use_gpu_bo(struct sna *sna, struct sna_pixmap *priv, RegionPtr region) { - if (priv->flush) - return true; - if (region_inplace(sna, priv->pixmap, region, priv)) return true; if (!priv->cpu_bo) - return false; + return false; if (kgem_bo_is_busy(priv->cpu_bo)) { if (priv->cpu_bo->exec) @@ -2726,20 +2844,22 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, goto fallback; } - if (replaces) { - sna_damage_destroy(&dst_priv->cpu_damage); - sna_damage_all(&dst_priv->gpu_damage, - dst_pixmap->drawable.width, - dst_pixmap->drawable.height); - } else { - RegionTranslate(®ion, dst_dx, dst_dy); - assert_pixmap_contains_box(dst_pixmap, - RegionExtents(®ion)); - sna_damage_add(&dst_priv->gpu_damage, ®ion); - if (alu == GXcopy) - sna_damage_subtract(&dst_priv->cpu_damage, - ®ion); - RegionTranslate(®ion, -dst_dx, -dst_dy); + if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) { + if (replaces) { + sna_damage_destroy(&dst_priv->cpu_damage); + sna_damage_all(&dst_priv->gpu_damage, + dst_pixmap->drawable.width, + dst_pixmap->drawable.height); + } else { + RegionTranslate(®ion, dst_dx, dst_dy); + assert_pixmap_contains_box(dst_pixmap, + RegionExtents(®ion)); + sna_damage_add(&dst_priv->gpu_damage, ®ion); + if (alu == GXcopy) + sna_damage_subtract(&dst_priv->cpu_damage, + ®ion); + RegionTranslate(®ion, -dst_dx, -dst_dy); + } } } else if ((src_priv || (src_priv = _sna_pixmap_attach(src_pixmap))) && @@ -2753,20 +2873,22 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, goto fallback; } - if (replaces) { - sna_damage_destroy(&dst_priv->cpu_damage); - sna_damage_all(&dst_priv->gpu_damage, - dst_pixmap->drawable.width, - dst_pixmap->drawable.height); - } else { - RegionTranslate(®ion, dst_dx, dst_dy); - assert_pixmap_contains_box(dst_pixmap, - RegionExtents(®ion)); - sna_damage_add(&dst_priv->gpu_damage, ®ion); - if (alu == GXcopy) - sna_damage_subtract(&dst_priv->cpu_damage, - ®ion); - RegionTranslate(®ion, -dst_dx, -dst_dy); + if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) { + if (replaces) { + sna_damage_destroy(&dst_priv->cpu_damage); + sna_damage_all(&dst_priv->gpu_damage, + dst_pixmap->drawable.width, + dst_pixmap->drawable.height); + } else { + RegionTranslate(®ion, dst_dx, dst_dy); + assert_pixmap_contains_box(dst_pixmap, + RegionExtents(®ion)); + sna_damage_add(&dst_priv->gpu_damage, ®ion); + if (alu == GXcopy) + sna_damage_subtract(&dst_priv->cpu_damage, + ®ion); + RegionTranslate(®ion, -dst_dx, -dst_dy); + } } } else if (alu != GXcopy) { PixmapPtr tmp; @@ -2819,11 +2941,13 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, } tmp->drawable.pScreen->DestroyPixmap(tmp); - RegionTranslate(®ion, dst_dx, dst_dy); - assert_pixmap_contains_box(dst_pixmap, - RegionExtents(®ion)); - sna_damage_add(&dst_priv->gpu_damage, ®ion); - RegionTranslate(®ion, -dst_dx, -dst_dy); + if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) { + RegionTranslate(®ion, dst_dx, dst_dy); + assert_pixmap_contains_box(dst_pixmap, + RegionExtents(®ion)); + sna_damage_add(&dst_priv->gpu_damage, ®ion); + RegionTranslate(®ion, -dst_dx, -dst_dy); + } } else { if (src_priv) { RegionTranslate(®ion, src_dx, src_dy); @@ -2846,10 +2970,12 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, dst_priv->gpu_bo, bits, stride); - sna_damage_destroy(&dst_priv->cpu_damage); - sna_damage_all(&dst_priv->gpu_damage, - dst_pixmap->drawable.width, - dst_pixmap->drawable.height); + if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) { + sna_damage_destroy(&dst_priv->cpu_damage); + sna_damage_all(&dst_priv->gpu_damage, + dst_pixmap->drawable.width, + dst_pixmap->drawable.height); + } } else { DBG(("%s: dst is on the GPU, src is on the CPU, uploading\n", __FUNCTION__)); @@ -2860,14 +2986,16 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, src_dx, src_dy, box, n); - RegionTranslate(®ion, dst_dx, dst_dy); - assert_pixmap_contains_box(dst_pixmap, - RegionExtents(®ion)); - sna_damage_add(&dst_priv->gpu_damage, - ®ion); - sna_damage_subtract(&dst_priv->cpu_damage, - ®ion); - RegionTranslate(®ion, -dst_dx, -dst_dy); + if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) { + RegionTranslate(®ion, dst_dx, dst_dy); + assert_pixmap_contains_box(dst_pixmap, + RegionExtents(®ion)); + sna_damage_add(&dst_priv->gpu_damage, + ®ion); + sna_damage_subtract(&dst_priv->cpu_damage, + ®ion); + RegionTranslate(®ion, -dst_dx, -dst_dy); + } } } } else { @@ -6412,14 +6540,14 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r) sna_poly_rectangle_blt(drawable, priv->cpu_bo, damage, gc, n, r, ®ion.extents, flags&2)) return; - } - - /* Not a trivial outline, but we still maybe able to break it - * down into simpler operations that we can accelerate. - */ - if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage)) { - miPolyRectangle(drawable, gc, n, r); - return; + } else { + /* Not a trivial outline, but we still maybe able to break it + * down into simpler operations that we can accelerate. + */ + if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage)) { + miPolyRectangle(drawable, gc, n, r); + return; + } } fallback: diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c index b1bee98b..f8f61f5a 100644 --- a/src/sna/sna_composite.c +++ b/src/sna/sna_composite.c @@ -763,18 +763,20 @@ sna_composite_rectangles(CARD8 op, goto fallback; } - assert_pixmap_contains_box(pixmap, RegionExtents(®ion)); - /* Clearing a pixmap after creation is a common operation, so take * advantage and reduce further damage operations. */ - if (region.data == NULL && - region.extents.x2 - region.extents.x1 == pixmap->drawable.width && - region.extents.y2 - region.extents.y1 == pixmap->drawable.height) - sna_damage_all(&priv->gpu_damage, - pixmap->drawable.width, pixmap->drawable.height); - else - sna_damage_add(&priv->gpu_damage, ®ion); + if (!DAMAGE_IS_ALL(priv->gpu_damage)) { + assert_pixmap_contains_box(pixmap, RegionExtents(®ion)); + + if (region.data == NULL && + region.extents.x2 - region.extents.x1 == pixmap->drawable.width && + region.extents.y2 - region.extents.y1 == pixmap->drawable.height) + sna_damage_all(&priv->gpu_damage, + pixmap->drawable.width, pixmap->drawable.height); + else + sna_damage_add(&priv->gpu_damage, ®ion); + } goto done; diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c index 18ca10dc..ea981573 100644 --- a/src/sna/sna_damage.c +++ b/src/sna/sna_damage.c @@ -172,7 +172,7 @@ static struct sna_damage *_sna_damage_create(void) if (__freed_damage) { damage = __freed_damage; - __freed_damage = NULL; + __freed_damage = *(void **)__freed_damage; } else { damage = malloc(sizeof(*damage)); if (damage == NULL) @@ -929,8 +929,8 @@ fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage, } #endif -struct sna_damage *_sna_damage_all(struct sna_damage *damage, - int width, int height) +struct sna_damage *__sna_damage_all(struct sna_damage *damage, + int width, int height) { DBG(("%s(%d, %d)\n", __FUNCTION__, width, height)); @@ -1157,8 +1157,8 @@ fastcall struct sna_damage *_sna_damage_subtract_box(struct sna_damage *damage, } #endif -static int _sna_damage_contains_box(struct sna_damage *damage, - const BoxRec *box) +static int __sna_damage_contains_box(struct sna_damage *damage, + const BoxRec *box) { int ret; @@ -1183,8 +1183,8 @@ static int _sna_damage_contains_box(struct sna_damage *damage, } #if DEBUG_DAMAGE -int sna_damage_contains_box(struct sna_damage *damage, - const BoxRec *box) +int _sna_damage_contains_box(struct sna_damage *damage, + const BoxRec *box) { char damage_buf[1000]; int ret; @@ -1193,7 +1193,7 @@ int sna_damage_contains_box(struct sna_damage *damage, _debug_describe_damage(damage_buf, sizeof(damage_buf), damage), box->x1, box->y1, box->x2, box->y2)); - ret = _sna_damage_contains_box(damage, box); + ret = __sna_damage_contains_box(damage, box); ErrorF(" = %d", ret); if (ret) ErrorF(" [(%d, %d), (%d, %d)...]", @@ -1203,15 +1203,15 @@ int sna_damage_contains_box(struct sna_damage *damage, return ret; } #else -int sna_damage_contains_box(struct sna_damage *damage, - const BoxRec *box) +int _sna_damage_contains_box(struct sna_damage *damage, + const BoxRec *box) { - return _sna_damage_contains_box(damage, box); + return __sna_damage_contains_box(damage, box); } #endif -bool sna_damage_contains_box__no_reduce(const struct sna_damage *damage, - const BoxRec *box) +bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage, + const BoxRec *box) { int ret; @@ -1225,17 +1225,10 @@ bool sna_damage_contains_box__no_reduce(const struct sna_damage *damage, ret == PIXMAN_REGION_IN; } -static Bool _sna_damage_intersect(struct sna_damage *damage, - RegionPtr region, RegionPtr result) +static Bool __sna_damage_intersect(struct sna_damage *damage, + RegionPtr region, RegionPtr result) { - if (!damage) - return FALSE; - - if (damage->mode == DAMAGE_ALL) { - RegionCopy(result, region); - return TRUE; - } - + assert(damage && damage->mode != DAMAGE_ALL); if (region->extents.x2 <= damage->extents.x1 || region->extents.x1 >= damage->extents.x2) return FALSE; @@ -1257,8 +1250,8 @@ static Bool _sna_damage_intersect(struct sna_damage *damage, } #if DEBUG_DAMAGE -Bool sna_damage_intersect(struct sna_damage *damage, - RegionPtr region, RegionPtr result) +Bool _sna_damage_intersect(struct sna_damage *damage, + RegionPtr region, RegionPtr result) { char damage_buf[1000]; char region_buf[120]; @@ -1268,7 +1261,7 @@ Bool sna_damage_intersect(struct sna_damage *damage, _debug_describe_damage(damage_buf, sizeof(damage_buf), damage), _debug_describe_region(region_buf, sizeof(region_buf), region)); - ret = _sna_damage_intersect(damage, region, result); + ret = __sna_damage_intersect(damage, region, result); if (ret) ErrorF(" = %s\n", _debug_describe_region(region_buf, sizeof(region_buf), result)); @@ -1278,17 +1271,16 @@ Bool sna_damage_intersect(struct sna_damage *damage, return ret; } #else -Bool sna_damage_intersect(struct sna_damage *damage, +Bool _sna_damage_intersect(struct sna_damage *damage, RegionPtr region, RegionPtr result) { - return _sna_damage_intersect(damage, region, result); + return __sna_damage_intersect(damage, region, result); } #endif -static int _sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) +static int __sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) { - if (!damage) - return 0; + assert(damage && damage->mode != DAMAGE_ALL); if (damage->dirty) __sna_damage_reduce(damage); @@ -1311,7 +1303,7 @@ struct sna_damage *_sna_damage_reduce(struct sna_damage *damage) } #if DEBUG_DAMAGE -int sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) +int _sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) { char damage_buf[1000]; int count; @@ -1319,15 +1311,15 @@ int sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) ErrorF("%s(%s)...\n", __FUNCTION__, _debug_describe_damage(damage_buf, sizeof(damage_buf), damage)); - count = _sna_damage_get_boxes(damage, boxes); + count = __sna_damage_get_boxes(damage, boxes); ErrorF(" = %d\n", count); return count; } #else -int sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) +int _sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) { - return _sna_damage_get_boxes(damage, boxes); + return __sna_damage_get_boxes(damage, boxes); } #endif @@ -1336,10 +1328,8 @@ void __sna_damage_destroy(struct sna_damage *damage) free_list(&damage->embedded_box.list); pixman_region_fini(&damage->region); - if (__freed_damage == NULL) - __freed_damage = damage; - else - free(damage); + *(void **)damage = __freed_damage; + __freed_damage = damage; } #if DEBUG_DAMAGE && TEST_DAMAGE diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h index abe222fe..5c003ca9 100644 --- a/src/sna/sna_damage.h +++ b/src/sna/sna_damage.h @@ -23,11 +23,16 @@ struct sna_damage { } embedded_box; }; +#define DAMAGE_IS_ALL(ptr) (((uintptr_t)(ptr))&1) +#define DAMAGE_MARK_ALL(ptr) ((struct sna_damage *)(((uintptr_t)(ptr))|1)) +#define DAMAGE_PTR(ptr) ((struct sna_damage *)(((uintptr_t)(ptr))&~1)) + fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage, RegionPtr region); static inline void sna_damage_add(struct sna_damage **damage, RegionPtr region) { + assert(!DAMAGE_IS_ALL(*damage)); *damage = _sna_damage_add(*damage, region); } @@ -36,6 +41,7 @@ fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage, static inline void sna_damage_add_box(struct sna_damage **damage, const BoxRec *box) { + assert(!DAMAGE_IS_ALL(*damage)); *damage = _sna_damage_add_box(*damage, box); } @@ -46,6 +52,7 @@ static inline void sna_damage_add_boxes(struct sna_damage **damage, const BoxRec *box, int n, int16_t dx, int16_t dy) { + assert(!DAMAGE_IS_ALL(*damage)); *damage = _sna_damage_add_boxes(*damage, box, n, dx, dy); } @@ -56,8 +63,10 @@ static inline void sna_damage_add_rectangles(struct sna_damage **damage, const xRectangle *r, int n, int16_t dx, int16_t dy) { - if (damage) + if (damage) { + assert(!DAMAGE_IS_ALL(*damage)); *damage = _sna_damage_add_rectangles(*damage, r, n, dx, dy); + } } struct sna_damage *_sna_damage_add_points(struct sna_damage *damage, @@ -67,40 +76,62 @@ static inline void sna_damage_add_points(struct sna_damage **damage, const DDXPointRec *p, int n, int16_t dx, int16_t dy) { - if (damage) + if (damage) { + assert(!DAMAGE_IS_ALL(*damage)); *damage = _sna_damage_add_points(*damage, p, n, dx, dy); + } } struct sna_damage *_sna_damage_is_all(struct sna_damage *damage, int width, int height); -static inline bool sna_damage_is_all(struct sna_damage **damage, +static inline bool sna_damage_is_all(struct sna_damage **_damage, int width, int height) { - if (*damage == NULL) + struct sna_damage *damage = *_damage; + + if (damage == NULL) return false; + if (DAMAGE_IS_ALL(damage)) + return true; - switch ((*damage)->mode) { + switch (damage->mode) { case DAMAGE_ALL: + assert(0); return true; case DAMAGE_SUBTRACT: return false; default: case DAMAGE_ADD: - if ((*damage)->extents.x2 < width || (*damage)->extents.x1 > 0) + if (damage->extents.x2 < width || damage->extents.x1 > 0) return false; - if ((*damage)->extents.y2 < height || (*damage)->extents.y1 > 0) + if (damage->extents.y2 < height || damage->extents.y1 > 0) return false; - *damage = _sna_damage_is_all(*damage, width, height); - return (*damage)->mode == DAMAGE_ALL; + damage = _sna_damage_is_all(damage, width, height); + if (damage->mode == DAMAGE_ALL) { + *_damage = DAMAGE_MARK_ALL(damage); + return true; + } else { + *_damage = damage; + return false; + } } } -struct sna_damage *_sna_damage_all(struct sna_damage *damage, - int width, int height); +struct sna_damage *__sna_damage_all(struct sna_damage *damage, + int width, int height); +static inline struct sna_damage * +_sna_damage_all(struct sna_damage *damage, + int width, int height) +{ + damage = __sna_damage_all(damage, width, height); + return DAMAGE_MARK_ALL(damage); +} + static inline void sna_damage_all(struct sna_damage **damage, int width, int height) { - *damage = _sna_damage_all(*damage, width, height); + if (!DAMAGE_IS_ALL(*damage)) + *damage = _sna_damage_all(*damage, width, height); } fastcall struct sna_damage *_sna_damage_subtract(struct sna_damage *damage, @@ -108,7 +139,7 @@ fastcall struct sna_damage *_sna_damage_subtract(struct sna_damage *damage, static inline void sna_damage_subtract(struct sna_damage **damage, RegionPtr region) { - *damage = _sna_damage_subtract(*damage, region); + *damage = _sna_damage_subtract(DAMAGE_PTR(*damage), region); } fastcall struct sna_damage *_sna_damage_subtract_box(struct sna_damage *damage, @@ -116,18 +147,54 @@ fastcall struct sna_damage *_sna_damage_subtract_box(struct sna_damage *damage, static inline void sna_damage_subtract_box(struct sna_damage **damage, const BoxRec *box) { - *damage = _sna_damage_subtract_box(*damage, box); + *damage = _sna_damage_subtract_box(DAMAGE_PTR(*damage), box); } -Bool sna_damage_intersect(struct sna_damage *damage, +Bool _sna_damage_intersect(struct sna_damage *damage, RegionPtr region, RegionPtr result); -int sna_damage_contains_box(struct sna_damage *damage, - const BoxRec *box); -bool sna_damage_contains_box__no_reduce(const struct sna_damage *damage, +static inline Bool sna_damage_intersect(struct sna_damage *damage, + RegionPtr region, RegionPtr result) +{ + assert(damage); + assert(RegionNotEmpty(region)); + assert(!DAMAGE_IS_ALL(damage)); + + return _sna_damage_intersect(damage, region, result); +} + +int _sna_damage_contains_box(struct sna_damage *damage, + const BoxRec *box); +static inline int sna_damage_contains_box(struct sna_damage *damage, + const BoxRec *box) +{ + if (DAMAGE_IS_ALL(damage)) + return PIXMAN_REGION_IN; + + return _sna_damage_contains_box(damage, box); +} +bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage, const BoxRec *box); +static inline bool +sna_damage_contains_box__no_reduce(const struct sna_damage *damage, + const BoxRec *box) +{ + assert(!DAMAGE_IS_ALL(damage)); + return _sna_damage_contains_box__no_reduce(damage, box); +} + +int _sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes); +static inline int +sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes) +{ + assert(damage); -int sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes); + if (DAMAGE_IS_ALL(damage)) { + *boxes = &DAMAGE_PTR(damage)->extents; + return 1; + } else + return _sna_damage_get_boxes(damage, boxes); +} struct sna_damage *_sna_damage_reduce(struct sna_damage *damage); static inline void sna_damage_reduce(struct sna_damage **damage) @@ -135,29 +202,33 @@ static inline void sna_damage_reduce(struct sna_damage **damage) if (*damage == NULL) return; - if ((*damage)->dirty) + if (!DAMAGE_IS_ALL(*damage) && (*damage)->dirty) *damage = _sna_damage_reduce(*damage); } -static inline void sna_damage_reduce_all(struct sna_damage **damage, +static inline void sna_damage_reduce_all(struct sna_damage **_damage, int width, int height) { + struct sna_damage *damage = *_damage; + DBG(("%s(width=%d, height=%d)\n", __FUNCTION__, width, height)); - if (*damage == NULL) + if (damage == NULL || DAMAGE_IS_ALL(damage)) return; - if ((*damage)->mode == DAMAGE_ADD && - (*damage)->extents.x1 <= 0 && - (*damage)->extents.y1 <= 0 && - (*damage)->extents.x2 >= width && - (*damage)->extents.y2 >= height) { - if ((*damage)->dirty && - (*damage = _sna_damage_reduce(*damage)) == NULL) - return; - - if ((*damage)->region.data == NULL) - *damage = _sna_damage_all(*damage, width, height); + if (damage->mode == DAMAGE_ADD && + damage->extents.x1 <= 0 && + damage->extents.y1 <= 0 && + damage->extents.x2 >= width && + damage->extents.y2 >= height) { + if (damage->dirty) { + damage = *_damage = _sna_damage_reduce(damage); + if (damage == NULL) + return; + } + + if (damage->region.data == NULL) + *_damage = _sna_damage_all(damage, width, height); } } @@ -167,7 +238,7 @@ static inline void sna_damage_destroy(struct sna_damage **damage) if (*damage == NULL) return; - __sna_damage_destroy(*damage); + __sna_damage_destroy(DAMAGE_PTR(*damage)); *damage = NULL; } diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c index 2b0dbc6b..b2779aa1 100644 --- a/src/sna/sna_display.c +++ b/src/sna/sna_display.c @@ -578,7 +578,8 @@ void sna_copy_fbcon(struct sna *sna) scratch, bo, sx, sy, sna->front, priv->gpu_bo, dx, dy, &box, 1); - sna_damage_add_box(&priv->gpu_damage, &box); + if (!DAMAGE_IS_ALL(priv->gpu_damage)) + sna_damage_add_box(&priv->gpu_damage, &box); kgem_bo_destroy(&sna->kgem, bo); diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c index 9a703c20..3beaee19 100644 --- a/src/sna/sna_dri.c +++ b/src/sna/sna_dri.c @@ -317,6 +317,9 @@ static void damage(PixmapPtr pixmap, RegionPtr region) struct sna_pixmap *priv; priv = sna_pixmap(pixmap); + if (DAMAGE_IS_ALL(priv->gpu_damage)) + return; + if (region == NULL) { damage_all: sna_damage_all(&priv->gpu_damage, diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index a7d4da5f..7670211c 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -58,7 +58,7 @@ static void read_boxes_inplace(struct kgem *kgem, kgem_bo_submit(kgem, bo); - src = kgem_bo_map(kgem, bo, PROT_READ); + src = kgem_bo_map(kgem, bo); if (src == NULL) return; @@ -285,7 +285,7 @@ static void write_boxes_inplace(struct kgem *kgem, kgem_bo_submit(kgem, bo); - dst = kgem_bo_map(kgem, bo, PROT_READ | PROT_WRITE); + dst = kgem_bo_map(kgem, bo); if (dst == NULL) return; @@ -555,7 +555,7 @@ write_boxes_inplace__xor(struct kgem *kgem, kgem_bo_submit(kgem, bo); - dst = kgem_bo_map(kgem, bo, PROT_READ | PROT_WRITE); + dst = kgem_bo_map(kgem, bo); if (dst == NULL) return; @@ -788,7 +788,12 @@ indirect_replace(struct sna *sna, void *ptr; bool ret; - if (pixmap->devKind * pixmap->drawable.height >> 12 > kgem->half_cpu_cache_pages) + DBG(("%s: size=%d vs %d\n", + __FUNCTION__, + (int)pixmap->devKind * pixmap->drawable.height >> 12, + kgem->half_cpu_cache_pages)); + + if ((int)pixmap->devKind * pixmap->drawable.height >> 12 > kgem->half_cpu_cache_pages) return false; if (bo->tiling == I915_TILING_Y || kgem->ring == KGEM_RENDER) { @@ -924,7 +929,7 @@ struct kgem_bo *sna_replace(struct sna *sna, kgem_bo_write(kgem, bo, src, (pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8); } else { - dst = kgem_bo_map(kgem, bo, PROT_READ | PROT_WRITE); + dst = kgem_bo_map(kgem, bo); if (dst) { memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel, stride, bo->pitch, @@ -969,7 +974,7 @@ struct kgem_bo *sna_replace__xor(struct sna *sna, } } - dst = kgem_bo_map(kgem, bo, PROT_READ | PROT_WRITE); + dst = kgem_bo_map(kgem, bo); if (dst) { memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel, stride, bo->pitch, diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c index d39aa16e..9052d687 100644 --- a/src/sna/sna_render.c +++ b/src/sna/sna_render.c @@ -394,7 +394,7 @@ _texture_is_cpu(PixmapPtr pixmap, const BoxRec *box) if (!priv->cpu_damage) return FALSE; - if (priv->cpu_damage->mode == DAMAGE_ALL) + if (DAMAGE_IS_ALL(priv->cpu_damage)) return TRUE; if (sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) @@ -483,7 +483,7 @@ sna_render_pixmap_bo(struct sna *sna, } if (priv->cpu_bo && - priv->cpu_damage && priv->cpu_damage->mode == DAMAGE_ALL && + DAMAGE_IS_ALL(priv->cpu_damage) && priv->cpu_bo->pitch < 4096) { channel->bo = kgem_bo_reference(priv->cpu_bo); return 1; @@ -1448,7 +1448,7 @@ sna_render_composite_redirect(struct sna *sna, op->dst.y = -y; op->dst.width = width; op->dst.height = height; - op->damage = &priv->gpu_damage; + op->damage = NULL; return TRUE; } diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h index 33fb166e..ca8e8391 100644 --- a/src/sna/sna_render_inline.h +++ b/src/sna/sna_render_inline.h @@ -134,7 +134,9 @@ sna_render_reduce_damage(struct sna_composite_op *op, if (op->damage == NULL || *op->damage == NULL) return; - if ((*op->damage)->mode == DAMAGE_ALL) { + if (DAMAGE_IS_ALL(*op->damage)) { + DBG(("%s: damage-all, dicarding damage\n", + __FUNCTION__)); op->damage = NULL; return; } @@ -148,8 +150,11 @@ sna_render_reduce_damage(struct sna_composite_op *op, r.y1 = dst_y + op->dst.y; r.y2 = r.y1 + height; - if (sna_damage_contains_box__no_reduce(*op->damage, &r)) + if (sna_damage_contains_box__no_reduce(*op->damage, &r)) { + DBG(("%s: damage contains render extents, dicarding damage\n", + __FUNCTION__)); op->damage = NULL; + } } #endif /* SNA_RENDER_INLINE_H */ diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c index d6d56f40..7b759a7e 100644 --- a/src/sna/sna_video.c +++ b/src/sna/sna_video.c @@ -472,7 +472,7 @@ sna_video_copy_data(struct sna *sna, } /* copy data */ - dst = kgem_bo_map(&sna->kgem, frame->bo, PROT_READ | PROT_WRITE); + dst = kgem_bo_map(&sna->kgem, frame->bo); if (dst == NULL) return FALSE; |