summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-02-25 11:07:16 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-02-25 11:42:16 +0000
commit8cb773e7c809e1de23cd64d3db862d1f8e7e955a (patch)
tree68d6b9fc333172436781ff6ddac82393bf56b863
parentb1b4db8942e69d47aabfad3751165dc2252fa448 (diff)
sna: Ensure we trigger a retire for search_linear_cache
Bo used for batch buffers are handled differently and not tracked through the active cache, so we failed to notice when we might be able to run retire and recover a suitable buffer for reuse. So simply always run retire when we might need to create a new linear buffer. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/kgem.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d73fc304..40518927 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1922,22 +1922,32 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
bool use_active = (flags & CREATE_INACTIVE) == 0;
struct list *cache;
+ DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n",
+ __FUNCTION__, num_pages, flags, use_active));
+
if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
return NULL;
if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
- if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE)))
- return NULL;
+ DBG(("%s: inactive and cache bucket empty\n",
+ __FUNCTION__));
- if (!kgem_retire(kgem))
+ if (!kgem->need_retire || !kgem_retire(kgem)) {
+ DBG(("%s: nothing retired\n", __FUNCTION__));
return NULL;
+ }
- if (list_is_empty(inactive(kgem, num_pages)))
+ if (list_is_empty(inactive(kgem, num_pages))) {
+ DBG(("%s: active cache bucket still empty after retire\n",
+ __FUNCTION__));
return NULL;
+ }
}
if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
int for_cpu = !!(flags & CREATE_CPU_MAP);
+ DBG(("%s: searching for inactive %s map\n",
+ __FUNCTION__, for_cpu ? "cpu" : "gtt"));
cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
list_for_each_entry(bo, cache, vma) {
assert(IS_CPU_MAP(bo->map) == for_cpu);
@@ -2111,7 +2121,7 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size)
if (handle == 0)
return NULL;
- DBG(("%s: new handle=%d\n", __FUNCTION__, handle));
+ DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size));
bo = __kgem_bo_alloc(handle, size);
if (bo == NULL) {
gem_close(kgem->fd, handle);