summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael Antognolli <rafael.antognolli@intel.com>2018-11-29 14:31:15 -0800
committerRafael Antognolli <rafael.antognolli@intel.com>2019-01-17 15:07:52 -0800
commite2179aceaf628b7d14a3e78791f1e181ac766157 (patch)
tree9a9c868fc5d96ad360bdd285c665ece40b260ef1
parentd18267fb48106872dbd08acac33c16dd3dd910c0 (diff)
anv/allocator: Use anv_state_table on back_alloc too.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
-rw-r--r--src/intel/vulkan/anv_allocator.c35
-rw-r--r--src/intel/vulkan/anv_private.h2
2 files changed, 22 insertions, 15 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 38705cf65f7..ff880751fe5 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -885,7 +885,7 @@ anv_state_pool_init(struct anv_state_pool *pool,
assert(util_is_power_of_two_or_zero(block_size));
pool->block_size = block_size;
- pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
+ pool->back_alloc_free_list = ANV_FREE_LIST2_EMPTY;
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
pool->buckets[i].free_list = ANV_FREE_LIST2_EMPTY;
pool->buckets[i].block.next = 0;
@@ -1098,22 +1098,30 @@ anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool *pool)
{
- struct anv_state state;
- state.alloc_size = pool->block_size;
+ struct anv_state *state;
+ uint32_t alloc_size = pool->block_size;
- if (anv_free_list_pop(&pool->back_alloc_free_list,
- &pool->block_pool.map, &state.offset)) {
- assert(state.offset < 0);
+ state = anv_free_list_pop2(&pool->back_alloc_free_list, &pool->table);
+ if (state) {
+ assert(state->offset < 0);
goto done;
}
- state.offset = anv_block_pool_alloc_back(&pool->block_pool,
- pool->block_size);
+ int32_t offset;
+ offset = anv_block_pool_alloc_back(&pool->block_pool,
+ pool->block_size);
+ uint32_t idx;
+ VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ assert(result == VK_SUCCESS);
+
+ state = anv_state_table_get(&pool->table, idx);
+ state->offset = offset;
+ state->alloc_size = alloc_size;
+ state->map = pool->block_pool.map + state->offset;
done:
- state.map = pool->block_pool.map + state.offset;
- VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
- return state;
+ VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
+ return *state;
}
static void
@@ -1124,9 +1132,8 @@ anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
if (state.offset < 0) {
assert(state.alloc_size == pool->block_size);
- anv_free_list_push(&pool->back_alloc_free_list,
- pool->block_pool.map, state.offset,
- state.alloc_size, 1);
+ anv_free_list_push2(&pool->back_alloc_free_list,
+ &pool->table, state.idx, 1);
} else {
anv_free_list_push2(&pool->buckets[bucket].free_list,
&pool->table, state.idx, 1);
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 5ee087fb5c9..497386c0cba 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -737,7 +737,7 @@ struct anv_state_pool {
uint32_t block_size;
/** Free list for "back" allocations */
- union anv_free_list back_alloc_free_list;
+ union anv_free_list2 back_alloc_free_list;
struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
};