summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-01-26 10:47:01 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-01-26 13:01:24 +0000
commitb76a6da3fa0148ef32600dd9505e22b90de037df (patch)
treefff1ca10c4771191eb94c6364a6b2c08086f713e
parente2b8b1c145932e2254a705905c60f18c200cf2e8 (diff)
sna: Search the buckets above the desired size in the bo cache
It is preferrable to reuse a slightly larger bo, than it is to create a fresh one and map it into the aperture. So search the bucket above us as well. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/kgem.c39
1 files changed, 29 insertions, 10 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a2fcefc4..6cd86e6f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2189,7 +2189,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
struct kgem_bo *bo, *next;
uint32_t pitch, untiled_pitch, tiled_height, size;
uint32_t handle;
- int i;
+ int i, bucket, retry;
if (tiling < 0)
tiling = -tiling, flags |= CREATE_EXACT;
@@ -2208,6 +2208,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
width, height, bpp, tiling, &pitch);
assert(size && size < kgem->max_cpu_size);
assert(tiling == I915_TILING_NONE || size < kgem->max_gpu_size);
+ bucket = cache_bucket(size);
if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
int for_cpu = !!(flags & CREATE_CPU_MAP);
@@ -2216,10 +2217,10 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
/* We presume that we will need to upload to this bo,
* and so would prefer to have an active VMA.
*/
- cache = &kgem->vma[for_cpu].inactive[cache_bucket(size)];
+ cache = &kgem->vma[for_cpu].inactive[bucket];
do {
list_for_each_entry(bo, cache, vma) {
- assert(bo->bucket == cache_bucket(size));
+ assert(bo->bucket == bucket);
assert(bo->refcnt == 0);
assert(bo->map);
assert(IS_CPU_MAP(bo->map) == for_cpu);
@@ -2263,13 +2264,17 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
goto skip_active_search;
/* Best active match */
- cache = active(kgem, size, tiling);
+ retry = NUM_CACHE_BUCKETS - bucket;
+ if (retry > 3)
+ retry = 3;
+search_again:
+ cache = &kgem->active[bucket][tiling];
if (tiling) {
tiled_height = kgem_aligned_height(kgem, height, tiling);
list_for_each_entry(bo, cache, list) {
- assert(bo->bucket == cache_bucket(size));
assert(!bo->purged);
assert(bo->refcnt == 0);
+ assert(bo->bucket == bucket);
assert(bo->reusable);
assert(bo->tiling == tiling);
@@ -2280,7 +2285,6 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
continue;
}
-
if (bo->pitch * tiled_height > bo->size)
continue;
@@ -2294,7 +2298,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
}
} else {
list_for_each_entry(bo, cache, list) {
- assert(bo->bucket == cache_bucket(size));
+ assert(bo->bucket == bucket);
assert(!bo->purged);
assert(bo->refcnt == 0);
assert(bo->reusable);
@@ -2314,6 +2318,11 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
}
}
+ if (--retry && flags & CREATE_EXACT) {
+ bucket++;
+ goto search_again;
+ }
+
if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
untiled_pitch = kgem_untiled_pitch(kgem,
width, bpp,
@@ -2356,10 +2365,15 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
}
skip_active_search:
+ bucket = cache_bucket(size);
+ retry = NUM_CACHE_BUCKETS - bucket;
+ if (retry > 3)
+ retry = 3;
+search_inactive:
/* Now just look for a close match and prefer any currently active */
- cache = inactive(kgem, size);
+ cache = &kgem->inactive[bucket];
list_for_each_entry_safe(bo, next, cache, list) {
- assert(bo->bucket == cache_bucket(size));
+ assert(bo->bucket == bucket);
if (size > bo->size) {
DBG(("inactive too small: %d < %d\n",
@@ -2409,10 +2423,15 @@ skip_active_search:
if (flags & CREATE_INACTIVE && !list_is_empty(&kgem->requests)) {
if (kgem_retire(kgem)) {
flags &= ~CREATE_INACTIVE;
- goto skip_active_search;
+ goto search_inactive;
}
}
+ if (--retry) {
+ bucket++;
+ goto search_inactive;
+ }
+
handle = gem_create(kgem->fd, size);
if (handle == 0)
return NULL;