diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-26 14:34:36 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-26 14:36:19 +0000 |
commit | d35b6955dbb5d652d8685d2c1ea82c5e08de55ea (patch) | |
tree | 57079ec8b8dcebee263b080883b137227b676882 | |
parent | 7c81bcd0c425cc0f7ddf2ad8289bb739c8d44289 (diff) |
sna: Prevent mapping through the GTT for large bo
If the bo is larger than a quarter of the aperture, it is unlikely that
we will be able to evict enough contiguous space in the GATT to
accommodate that buffer. So don't attempt to map them and use the
indirect access instead.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/kgem.h | 8 | ||||
-rw-r--r-- | src/sna/sna_accel.c | 9 | ||||
-rw-r--r-- | src/sna/sna_io.c | 69 |
3 files changed, 58 insertions, 28 deletions
diff --git a/src/sna/kgem.h b/src/sna/kgem.h index db4f061b..0cc4fd3b 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -394,13 +394,19 @@ static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo) __FUNCTION__, bo->handle, bo->domain, bo->presumed_offset, bo->size)); + if (!kgem_bo_is_mappable(kgem, bo)) + return true; + + if (kgem->wedged) + return false; + if (kgem_bo_is_busy(bo)) return true; if (bo->presumed_offset == 0) return !list_is_empty(&kgem->requests); - return !kgem_bo_is_mappable(kgem, bo); + return false; } static inline bool kgem_bo_is_dirty(struct kgem_bo *bo) diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index b3b968c9..7fe06de0 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -797,7 +797,7 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap) sna_pixmap_choose_tiling(pixmap), CREATE_GTT_MAP | CREATE_INACTIVE); - return priv->gpu_bo != NULL; + return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo); } bool @@ -835,7 +835,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags) priv->gpu_bo->exec == NULL) kgem_retire(&sna->kgem); - if (kgem_bo_is_busy(priv->gpu_bo)) { + if (kgem_bo_map_will_stall(&sna->kgem, + priv->gpu_bo)) { if (priv->pinned) goto skip_inplace_map; @@ -897,7 +898,7 @@ skip_inplace_map: if (flags & MOVE_INPLACE_HINT && priv->stride && priv->gpu_bo && - !kgem_bo_is_busy(priv->gpu_bo) && + !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) && pixmap_inplace(sna, pixmap, priv) && sna_pixmap_move_to_gpu(pixmap, flags)) { assert(flags & MOVE_WRITE); @@ -1250,7 +1251,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, if (flags & MOVE_INPLACE_HINT && priv->stride && priv->gpu_bo && - !kgem_bo_is_busy(priv->gpu_bo) && + !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) && region_inplace(sna, pixmap, region, priv) && sna_pixmap_move_area_to_gpu(pixmap, ®ion->extents, flags)) { assert(flags & MOVE_WRITE); diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index a2e7a590..f3ca212c 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -122,8 +122,7 @@ void sna_read_boxes(struct sna *sna, * this path. */ - if (DEBUG_NO_IO || kgem->wedged || - !kgem_bo_map_will_stall(kgem, src_bo) || + if (!kgem_bo_map_will_stall(kgem, src_bo) || src_bo->tiling == I915_TILING_NONE) { fallback: read_boxes_inplace(kgem, @@ -386,10 +385,7 @@ static bool upload_inplace(struct kgem *kgem, int n, int bpp) { if (DEBUG_NO_IO) - return true; - - if (unlikely(kgem->wedged)) - return true; + return kgem_bo_is_mappable(kgem, bo); /* If we are writing through the GTT, check first if we might be * able to almagamate a series of small writes into a single @@ -993,14 +989,27 @@ struct kgem_bo *sna_replace(struct sna *sna, kgem_bo_write(kgem, bo, src, (pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8); } else { - dst = kgem_bo_map(kgem, bo); - if (dst) { - memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel, - stride, bo->pitch, - 0, 0, - 0, 0, - pixmap->drawable.width, - pixmap->drawable.height); + if (kgem_bo_is_mappable(kgem, bo)) { + dst = kgem_bo_map(kgem, bo); + if (dst) { + memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel, + stride, bo->pitch, + 0, 0, + 0, 0, + pixmap->drawable.width, + pixmap->drawable.height); + } + } else { + BoxRec box; + + box.x1 = box.y1 = 0; + box.x2 = pixmap->drawable.width; + box.y2 = pixmap->drawable.height; + + sna_write_boxes(sna, pixmap, + bo, 0, 0, + src, stride, 0, 0, + &box, 1); } } @@ -1038,15 +1047,29 @@ struct kgem_bo *sna_replace__xor(struct sna *sna, } } - dst = kgem_bo_map(kgem, bo); - if (dst) { - memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel, - stride, bo->pitch, - 0, 0, - 0, 0, - pixmap->drawable.width, - pixmap->drawable.height, - and, or); + if (kgem_bo_is_mappable(kgem, bo)) { + dst = kgem_bo_map(kgem, bo); + if (dst) { + memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel, + stride, bo->pitch, + 0, 0, + 0, 0, + pixmap->drawable.width, + pixmap->drawable.height, + and, or); + } + } else { + BoxRec box; + + box.x1 = box.y1 = 0; + box.x2 = pixmap->drawable.width; + box.y2 = pixmap->drawable.height; + + sna_write_boxes__xor(sna, pixmap, + bo, 0, 0, + src, stride, 0, 0, + &box, 1, + and, or); } return bo; |