summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Ekstrand <jason.ekstrand@intel.com>2015-09-15 17:43:47 -0700
committerJason Ekstrand <jason.ekstrand@intel.com>2015-09-17 17:44:20 -0700
commit55daed947d3a0a7802733443a5f922dcc28a5770 (patch)
tree52b50c102b97bbb665e3c3bf2a3c1a62ae4ae12d
parentc55fa89251a1188b312aa09ba260cba7a411a282 (diff)
vk/allocator: Split block_pool_alloc into two functions
-rw-r--r--src/vulkan/anv_allocator.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/src/vulkan/anv_allocator.c b/src/vulkan/anv_allocator.c
index 6c7c85d5e74..6393233f0c4 100644
--- a/src/vulkan/anv_allocator.c
+++ b/src/vulkan/anv_allocator.c
@@ -359,11 +359,40 @@ fail:
return 0;
}
+static uint32_t
+anv_block_pool_alloc_new(struct anv_block_pool *pool,
+ struct anv_block_state *pool_state)
+{
+ struct anv_block_state state, old, new;
+
+ while (1) {
+ state.u64 = __sync_fetch_and_add(&pool_state->u64, pool->block_size);
+ if (state.next < state.end) {
+ assert(pool->map);
+ return state.next;
+ } else if (state.next == state.end) {
+ /* We allocated the first block outside the pool, we have to grow it.
+ * pool->next_block acts a mutex: threads who try to allocate now will
+ * get block indexes above the current limit and hit futex_wait
+ * below. */
+ new.next = state.next + pool->block_size;
+ new.end = anv_block_pool_grow(pool, state.end);
+ assert(new.end > 0);
+ old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
+ if (old.next != state.next)
+ futex_wake(&pool_state->end, INT_MAX);
+ return state.next;
+ } else {
+ futex_wait(&pool_state->end, state.end);
+ continue;
+ }
+ }
+}
+
uint32_t
anv_block_pool_alloc(struct anv_block_pool *pool)
{
int32_t offset;
- struct anv_block_state state, old, new;
/* Try free list first. */
if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) {
@@ -372,27 +401,7 @@ anv_block_pool_alloc(struct anv_block_pool *pool)
return offset;
}
- restart:
- state.u64 = __sync_fetch_and_add(&pool->state.u64, pool->block_size);
- if (state.next < state.end) {
- assert(pool->map);
- return state.next;
- } else if (state.next == state.end) {
- /* We allocated the first block outside the pool, we have to grow it.
- * pool->next_block acts a mutex: threads who try to allocate now will
- * get block indexes above the current limit and hit futex_wait
- * below. */
- new.next = state.next + pool->block_size;
- new.end = anv_block_pool_grow(pool, state.end);
- assert(new.end > 0);
- old.u64 = __sync_lock_test_and_set(&pool->state.u64, new.u64);
- if (old.next != state.next)
- futex_wake(&pool->state.end, INT_MAX);
- return state.next;
- } else {
- futex_wait(&pool->state.end, state.end);
- goto restart;
- }
+ return anv_block_pool_alloc_new(pool, &pool->state);
}
void