diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-15 10:04:11 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-15 10:06:01 +0000 |
commit | 349e9a7b94199e759acaaccac3abf5e28f3c246f (patch) | |
tree | d4d9baa96594418d5741c92de6540384a766925a | |
parent | 09dc8b1b358aa33836d511b75f92e8d096bc7e59 (diff) |
sna: Prefer read-boxes inplace again
Using the gpu to do the detiling just incurs extra latency and an extra
copy, so go back to using a fence and GTT mapping for the common path.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/sna_io.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index d6b988f9..5d3e9e5e 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -115,16 +115,15 @@ void sna_read_boxes(struct sna *sna, } #endif - if (DEBUG_NO_IO || kgem->wedged || src_bo->tiling == I915_TILING_Y) { - read_boxes_inplace(kgem, - src_bo, src_dx, src_dy, - dst, dst_dx, dst_dy, - box, nbox); - return; - } + /* XXX The gpu is faster to perform detiling in bulk, but takes + * longer to setup and retrieve the results, with an additional + * copy. The long term solution is to use snoopable bo and avoid + * this path. + */ - if (src_bo->tiling != I915_TILING_X && - !kgem_bo_map_will_stall(kgem, src_bo)) { + if (DEBUG_NO_IO || kgem->wedged || + !kgem_bo_map_will_stall(kgem, src_bo) || + src_bo->tiling != I915_TILING_X) { read_boxes_inplace(kgem, src_bo, src_dx, src_dy, dst, dst_dx, dst_dy, |