summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-09-28 23:04:03 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2011-09-28 23:03:00 +0100
commitd8fe941bc245e24c83c417ccff5c57e83baac3f7 (patch)
tree6457f7745a4ddb972a69f5bfc19190564f6338f2
parente74a39b45446e2c43df5419efc4a2e0e73275c45 (diff)
sna: Check for request retires after every batch
In the beginning, I did perform a retire after ever batch. Then I decided that it was too much CPU overhead for too little gain. On reflection, i.e. further benchmarking, we do see a performance improvement for recycling active buffers faster. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/kgem.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 50374250..4dd876b2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -591,12 +591,13 @@ void kgem_retire(struct kgem *kgem)
struct kgem_bo *bo, *next;
list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
- if (!kgem_busy(kgem, bo->handle)) {
- bo->needs_flush = 0;
- bo->gpu = false;
- list_move(&bo->list, inactive(kgem, bo->size));
- list_del(&bo->request);
- }
+ if (kgem_busy(kgem, bo->handle))
+ break;
+
+ bo->needs_flush = 0;
+ bo->gpu = false;
+ list_move(&bo->list, inactive(kgem, bo->size));
+ list_del(&bo->request);
}
while (!list_is_empty(&kgem->requests)) {
@@ -1019,6 +1020,7 @@ void _kgem_submit(struct kgem *kgem)
}
}
+ kgem_retire(kgem);
kgem_commit(kgem);
if (kgem->wedged)
kgem_cleanup(kgem);