diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-12-18 22:18:02 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-12-18 23:00:46 +0000 |
commit | 15a769a66fa1afbcffc642ef980387cffefc6bef (patch) | |
tree | 26733b403293a7e57abcaa7214fbd598d924e5c3 | |
parent | d0ee695ef091671e2cc69b773f517030ebe961b2 (diff) |
sna: Distinguish between GTT and CPU maps when searching for VMA
Similarly try to avoid borrowing any vma when all we intend to do is
pwrite.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/kgem.c | 99 | ||||
-rw-r--r-- | src/sna/kgem.h | 4 | ||||
-rw-r--r-- | src/sna/sna_accel.c | 4 | ||||
-rw-r--r-- | src/sna/sna_io.c | 3 |
4 files changed, 89 insertions, 21 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 3d5d4b0c..c0034d5f 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1446,9 +1446,10 @@ void kgem_cleanup_cache(struct kgem *kgem) } static struct kgem_bo * -search_linear_cache(struct kgem *kgem, unsigned int size, bool use_active) +search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags) { - struct kgem_bo *bo, *next; + struct kgem_bo *bo, *next, *first = NULL; + bool use_active = (flags & CREATE_INACTIVE) == 0; struct list *cache; cache = use_active ? active(kgem, size): inactive(kgem, size); @@ -1469,10 +1470,29 @@ search_linear_cache(struct kgem *kgem, unsigned int size, bool use_active) continue; } - if (I915_TILING_NONE != bo->tiling && - gem_set_tiling(kgem->fd, bo->handle, - I915_TILING_NONE, 0) != I915_TILING_NONE) - continue; + if (bo->map) { + if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { + int for_cpu = !!(flags & CREATE_CPU_MAP); + if (IS_CPU_MAP(bo->map) != for_cpu) { + if (first == NULL) + first = bo; + continue; + } + } else { + if (first == NULL) + first = bo; + continue; + } + } + + if (I915_TILING_NONE != bo->tiling) { + if (use_active) + continue; + + if (gem_set_tiling(kgem->fd, bo->handle, + I915_TILING_NONE, 0) != I915_TILING_NONE) + continue; + } list_del(&bo->list); if (bo->rq == &_kgem_static_request) @@ -1494,6 +1514,44 @@ search_linear_cache(struct kgem *kgem, unsigned int size, bool use_active) return bo; } + if (first) { + if (I915_TILING_NONE != first->tiling) { + if (use_active) + return NULL; + + if (gem_set_tiling(kgem->fd, first->handle, + I915_TILING_NONE, 0) != I915_TILING_NONE) + return NULL; + + if (first->map) { + munmap(CPU_MAP(first->map), first->size); + first->map = NULL; + + list_del(&first->vma); + kgem->vma_count--; + } + } + + list_del(&first->list); + if (first->rq == &_kgem_static_request) + list_del(&first->request); + if (first->map) { + assert(!list_is_empty(&first->vma)); + list_move_tail(&first->vma, &kgem->vma_cache); + } + + first->tiling = I915_TILING_NONE; + first->pitch = 0; + first->delta = 0; + DBG((" %s: found handle=%d (size=%d) in linear %s cache\n", + __FUNCTION__, first->handle, first->size, + use_active ? "active" : "inactive")); + assert(use_active || first->domain != DOMAIN_GPU); + assert(!first->needs_flush || use_active); + //assert(use_active || !kgem_busy(kgem, first->handle)); + return first; + } + return NULL; } @@ -1528,13 +1586,13 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size) DBG(("%s(%d)\n", __FUNCTION__, size)); size = ALIGN(size, PAGE_SIZE); - bo = search_linear_cache(kgem, size, false); + bo = search_linear_cache(kgem, size, CREATE_INACTIVE); if (bo) return kgem_bo_reference(bo); if (!list_is_empty(&kgem->requests)) { if (kgem_retire(kgem)) { - bo = search_linear_cache(kgem, size, false); + bo = search_linear_cache(kgem, size, CREATE_INACTIVE); if (bo) return kgem_bo_reference(bo); } @@ -1719,9 +1777,12 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem, if (tiling < 0) tiling = -tiling, exact = 1; - DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, scanout?=%d)\n", __FUNCTION__, + DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d)\n", __FUNCTION__, width, height, bpp, tiling, - !!exact, !!(flags & CREATE_INACTIVE), !!(flags & CREATE_SCANOUT))); + !!exact, !!(flags & CREATE_INACTIVE), + !!(flags & CREATE_CPU_MAP), + !!(flags & CREATE_GTT_MAP), + !!(flags & CREATE_SCANOUT))); assert(_kgem_can_create_2d(kgem, width, height, bpp, exact ? -tiling : tiling)); size = kgem_surface_size(kgem, @@ -1730,7 +1791,8 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem, width, height, bpp, tiling, &pitch); assert(size && size <= kgem->max_object_size); - if (flags & CREATE_INACTIVE) { + if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { + int for_cpu = !!(flags & CREATE_CPU_MAP); /* We presume that we will need to upload to this bo, * and so would prefer to have an active VMA. */ @@ -1741,6 +1803,9 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem, assert(bo->rq == NULL); assert(list_is_empty(&bo->request)); + if (IS_CPU_MAP(bo->map) != for_cpu) + continue; + if (size > bo->size || 2*size < bo->size) { DBG(("inactive vma too small/large: %d < %d\n", bo->size, size)); @@ -2580,9 +2645,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, old = NULL; if (!write) - old = search_linear_cache(kgem, alloc, true); + old = search_linear_cache(kgem, alloc, CREATE_CPU_MAP); if (old == NULL) - old = search_linear_cache(kgem, alloc, false); + old = search_linear_cache(kgem, alloc, CREATE_INACTIVE | CREATE_CPU_MAP); if (old) { DBG(("%s: reusing handle=%d for buffer\n", __FUNCTION__, old->handle)); @@ -2621,9 +2686,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, old = NULL; if (!write) - old = search_linear_cache(kgem, alloc, true); + old = search_linear_cache(kgem, alloc, 0); if (old == NULL) - old = search_linear_cache(kgem, alloc, false); + old = search_linear_cache(kgem, alloc, CREATE_INACTIVE); if (old) { alloc = old->size; bo = malloc(sizeof(*bo) + alloc); @@ -2873,9 +2938,9 @@ kgem_replace_bo(struct kgem *kgem, size = height * pitch; - dst = search_linear_cache(kgem, size, true); + dst = search_linear_cache(kgem, size, 0); if (dst == NULL) - dst = search_linear_cache(kgem, size, false); + dst = search_linear_cache(kgem, size, CREATE_INACTIVE); if (dst == NULL) { handle = gem_create(kgem->fd, size); if (handle == 0) diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 97eedb5d..4e30d58b 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -189,7 +189,9 @@ kgem_replace_bo(struct kgem *kgem, enum { CREATE_EXACT = 0x1, CREATE_INACTIVE = 0x2, - CREATE_SCANOUT = 0x4, + CREATE_CPU_MAP = 0x4, + CREATE_GTT_MAP = 0x8, + CREATE_SCANOUT = 0x10, }; struct kgem_bo *kgem_create_2d(struct kgem *kgem, int width, diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index aef002f3..c44dcc17 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -202,7 +202,7 @@ sna_pixmap_alloc_cpu(struct sna *sna, pixmap->drawable.height, pixmap->drawable.bitsPerPixel, I915_TILING_NONE, - from_gpu ? 0 : CREATE_INACTIVE); + from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE); DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__, priv->cpu_bo->handle)); @@ -1286,7 +1286,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap) pixmap->drawable.bitsPerPixel, sna_pixmap_choose_tiling(pixmap, default_tiling(pixmap)), - priv->cpu_damage ? CREATE_INACTIVE : 0); + priv->cpu_damage ? CREATE_GTT_MAP | CREATE_INACTIVE : 0); if (priv->gpu_bo == NULL) { assert(list_is_empty(&priv->list)); return NULL; diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index 767824fa..48418456 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -441,7 +441,8 @@ struct kgem_bo *sna_replace(struct sna *sna, pixmap->drawable.width, pixmap->drawable.height, pixmap->drawable.bitsPerPixel, - bo->tiling, CREATE_INACTIVE); + bo->tiling, + CREATE_GTT_MAP | CREATE_INACTIVE); if (new_bo) { kgem_bo_destroy(kgem, bo); bo = new_bo; |