diff options
-rw-r--r-- | .pick_status.json | 2 | ||||
-rw-r--r-- | src/gallium/drivers/iris/iris_fence.c | 20 |
2 files changed, 12 insertions, 10 deletions
diff --git a/.pick_status.json b/.pick_status.json index c482ee2d014..d12869e05d2 100644 --- a/.pick_status.json +++ b/.pick_status.json @@ -589,7 +589,7 @@ "description": "iris: Reorder the loops in iris_fence_await() for clarity.", "nominated": true, "nomination_type": 1, - "resolution": 0, + "resolution": 1, "master_sha": null, "because_sha": "f459c56be6bf33439cccc11e932b2b5b52ba7ad8" }, diff --git a/src/gallium/drivers/iris/iris_fence.c b/src/gallium/drivers/iris/iris_fence.c index 4600c1c4238..22acc317c0c 100644 --- a/src/gallium/drivers/iris/iris_fence.c +++ b/src/gallium/drivers/iris/iris_fence.c @@ -256,19 +256,21 @@ iris_fence_await(struct pipe_context *ctx, "is unlikely to work without kernel 5.8+\n"); } - /* Flush any current work in our context as it doesn't need to wait - * for this fence. Any future work in our context must wait. - */ - for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { - struct iris_batch *batch = &ice->batches[b]; + for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) { + struct iris_fine_fence *fine = fence->fine[i]; - for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) { - struct iris_fine_fence *fine = fence->fine[i]; + if (iris_fine_fence_signaled(fine)) + continue; - if (iris_fine_fence_signaled(fine)) - continue; + for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { + struct iris_batch *batch = &ice->batches[b]; + /* We're going to make any future work in this batch wait for our + * fence to have gone by. But any currently queued work doesn't + * need to wait. Flush the batch now, so it can happen sooner. + */ iris_batch_flush(batch); + iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT); } } |