summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/zink/zink_bo.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/zink/zink_bo.h')
-rw-r--r--src/gallium/drivers/zink/zink_bo.h199
1 files changed, 84 insertions, 115 deletions
diff --git a/src/gallium/drivers/zink/zink_bo.h b/src/gallium/drivers/zink/zink_bo.h
index 0fd74cb7923..cd7338aff5a 100644
--- a/src/gallium/drivers/zink/zink_bo.h
+++ b/src/gallium/drivers/zink/zink_bo.h
@@ -26,116 +26,48 @@
#ifndef ZINK_BO_H
#define ZINK_BO_H
-#include <vulkan/vulkan.h>
-#include "pipebuffer/pb_cache.h"
-#include "pipebuffer/pb_slab.h"
+#include "zink_types.h"
#include "zink_batch.h"
#define VK_VIS_VRAM (VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
-enum zink_resource_access {
- ZINK_RESOURCE_ACCESS_READ = 1,
- ZINK_RESOURCE_ACCESS_WRITE = 32,
- ZINK_RESOURCE_ACCESS_RW = ZINK_RESOURCE_ACCESS_READ | ZINK_RESOURCE_ACCESS_WRITE,
-};
-
-
-enum zink_heap {
- ZINK_HEAP_DEVICE_LOCAL,
- ZINK_HEAP_DEVICE_LOCAL_SPARSE,
- ZINK_HEAP_DEVICE_LOCAL_VISIBLE,
- ZINK_HEAP_HOST_VISIBLE_COHERENT,
- ZINK_HEAP_HOST_VISIBLE_CACHED,
- ZINK_HEAP_MAX,
-};
-
-enum zink_alloc_flag {
- ZINK_ALLOC_SPARSE = 1<<0,
- ZINK_ALLOC_NO_SUBALLOC = 1<<1,
-};
-
-
-struct zink_bo {
- struct pb_buffer base;
-
- union {
- struct {
- void *cpu_ptr; /* for user_ptr and permanent maps */
- int map_count;
-
- bool is_user_ptr;
- bool use_reusable_pool;
-
- /* Whether buffer_get_handle or buffer_from_handle has been called,
- * it can only transition from false to true. Protected by lock.
- */
- bool is_shared;
- } real;
- struct {
- struct pb_slab_entry entry;
- struct zink_bo *real;
- } slab;
- struct {
- uint32_t num_va_pages;
- uint32_t num_backing_pages;
-
- struct list_head backing;
-
- /* Commitment information for each page of the virtual memory area. */
- struct zink_sparse_commitment *commitments;
- } sparse;
- } u;
-
- VkDeviceMemory mem;
- uint64_t offset;
-
- uint32_t unique_id;
-
- simple_mtx_t lock;
-
- struct zink_batch_usage *reads;
- struct zink_batch_usage *writes;
-
- struct pb_cache_entry cache_entry[];
-};
-
-static inline struct zink_bo *
-zink_bo(struct pb_buffer *pbuf)
-{
- return (struct zink_bo*)pbuf;
-}
+#define VK_STAGING_RAM (VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
+#define VK_LAZY_VRAM (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+
-static inline enum zink_alloc_flag
+static ALWAYS_INLINE enum zink_alloc_flag
zink_alloc_flags_from_heap(enum zink_heap heap)
{
- enum zink_alloc_flag flags = 0;
switch (heap) {
case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
- flags |= ZINK_ALLOC_SPARSE;
+ return ZINK_ALLOC_SPARSE;
break;
default:
break;
}
- return flags;
+ return (enum zink_alloc_flag)0;
}
-static inline VkMemoryPropertyFlags
+static ALWAYS_INLINE VkMemoryPropertyFlags
vk_domain_from_heap(enum zink_heap heap)
{
- VkMemoryPropertyFlags domains = 0;
+ VkMemoryPropertyFlags domains = (VkMemoryPropertyFlags)0;
switch (heap) {
case ZINK_HEAP_DEVICE_LOCAL:
case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
domains = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
break;
+ case ZINK_HEAP_DEVICE_LOCAL_LAZY:
+ domains = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ break;
case ZINK_HEAP_DEVICE_LOCAL_VISIBLE:
domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
break;
case ZINK_HEAP_HOST_VISIBLE_COHERENT:
domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
break;
- case ZINK_HEAP_HOST_VISIBLE_CACHED:
- domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ case ZINK_HEAP_HOST_VISIBLE_COHERENT_CACHED:
+ domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
break;
default:
break;
@@ -143,7 +75,7 @@ vk_domain_from_heap(enum zink_heap heap)
return domains;
}
-static inline enum zink_heap
+static ALWAYS_INLINE enum zink_heap
zink_heap_from_domain_flags(VkMemoryPropertyFlags domains, enum zink_alloc_flag flags)
{
if (flags & ZINK_ALLOC_SPARSE)
@@ -156,11 +88,22 @@ zink_heap_from_domain_flags(VkMemoryPropertyFlags domains, enum zink_alloc_flag
return ZINK_HEAP_DEVICE_LOCAL;
if (domains & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
- return ZINK_HEAP_HOST_VISIBLE_CACHED;
+ return ZINK_HEAP_HOST_VISIBLE_COHERENT_CACHED;
return ZINK_HEAP_HOST_VISIBLE_COHERENT;
}
+static ALWAYS_INLINE unsigned
+zink_mem_type_idx_from_types(struct zink_screen *screen, enum zink_heap heap, uint32_t types)
+{
+ for (unsigned i = 0; i < screen->heap_count[heap]; i++) {
+ if (types & BITFIELD_BIT(screen->heap_map[heap][i])) {
+ return screen->heap_map[heap][i];
+ }
+ }
+ return UINT32_MAX;
+}
+
bool
zink_bo_init(struct zink_screen *screen);
@@ -168,24 +111,27 @@ void
zink_bo_deinit(struct zink_screen *screen);
struct pb_buffer *
-zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext);
+zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, unsigned mem_type_idx, const void *pNext);
-static inline uint64_t
+bool
+zink_bo_get_kms_handle(struct zink_screen *screen, struct zink_bo *bo, int fd, uint32_t *handle);
+
+static ALWAYS_INLINE uint64_t
zink_bo_get_offset(const struct zink_bo *bo)
{
return bo->offset;
}
-static inline VkDeviceMemory
+static ALWAYS_INLINE VkDeviceMemory
zink_bo_get_mem(const struct zink_bo *bo)
{
return bo->mem ? bo->mem : bo->u.slab.real->mem;
}
-static inline VkDeviceSize
+static ALWAYS_INLINE VkDeviceSize
zink_bo_get_size(const struct zink_bo *bo)
{
- return bo->mem ? bo->base.size : bo->u.slab.real->base.size;
+ return bo->mem ? bo->base.base.size : bo->u.slab.real->base.base.size;
}
void *
@@ -194,67 +140,90 @@ void
zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo);
bool
-zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t offset, uint32_t size, bool commit);
+zink_bo_commit(struct zink_context *ctx, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem);
-static inline bool
+static ALWAYS_INLINE bool
zink_bo_has_unflushed_usage(const struct zink_bo *bo)
{
- return zink_batch_usage_is_unflushed(bo->reads) ||
- zink_batch_usage_is_unflushed(bo->writes);
+ return zink_batch_usage_is_unflushed(bo->reads.u) ||
+ zink_batch_usage_is_unflushed(bo->writes.u);
}
-static inline bool
+static ALWAYS_INLINE bool
zink_bo_has_usage(const struct zink_bo *bo)
{
- return zink_batch_usage_exists(bo->reads) ||
- zink_batch_usage_exists(bo->writes);
+ return zink_bo_has_unflushed_usage(bo) ||
+ (zink_batch_usage_exists(bo->reads.u) && bo->reads.submit_count == bo->reads.u->submit_count) ||
+ (zink_batch_usage_exists(bo->writes.u) && bo->writes.submit_count == bo->writes.u->submit_count);
}
-static inline bool
+static ALWAYS_INLINE bool
zink_bo_usage_matches(const struct zink_bo *bo, const struct zink_batch_state *bs)
{
- return zink_batch_usage_matches(bo->reads, bs) ||
- zink_batch_usage_matches(bo->writes, bs);
+ return (zink_batch_usage_matches(bo->reads.u, bs) && bo->reads.submit_count == bo->reads.u->submit_count) ||
+ (zink_batch_usage_matches(bo->writes.u, bs) && bo->writes.submit_count == bo->writes.u->submit_count);
}
-static inline bool
+static ALWAYS_INLINE bool
zink_bo_usage_check_completion(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
{
- if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads))
+ if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads.u))
+ return false;
+ if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes.u))
+ return false;
+ return true;
+}
+
+static ALWAYS_INLINE bool
+zink_bo_usage_check_completion_fast(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
+{
+ if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion_fast(screen, bo->reads.u))
return false;
- if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes))
+ if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion_fast(screen, bo->writes.u))
return false;
return true;
}
-static inline void
+static ALWAYS_INLINE void
zink_bo_usage_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
{
if (access & ZINK_RESOURCE_ACCESS_READ)
- zink_batch_usage_wait(ctx, bo->reads);
+ zink_batch_usage_wait(ctx, bo->reads.u);
if (access & ZINK_RESOURCE_ACCESS_WRITE)
- zink_batch_usage_wait(ctx, bo->writes);
+ zink_batch_usage_wait(ctx, bo->writes.u);
}
-static inline void
+static ALWAYS_INLINE void
+zink_bo_usage_try_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
+{
+ if (access & ZINK_RESOURCE_ACCESS_READ)
+ zink_batch_usage_try_wait(ctx, bo->reads.u);
+ if (access & ZINK_RESOURCE_ACCESS_WRITE)
+ zink_batch_usage_try_wait(ctx, bo->writes.u);
+}
+
+static ALWAYS_INLINE void
zink_bo_usage_set(struct zink_bo *bo, struct zink_batch_state *bs, bool write)
{
- if (write)
- zink_batch_usage_set(&bo->writes, bs);
- else
- zink_batch_usage_set(&bo->reads, bs);
+ if (write) {
+ zink_batch_usage_set(&bo->writes.u, bs);
+ bo->writes.submit_count = bs->usage.submit_count;
+ } else {
+ zink_batch_usage_set(&bo->reads.u, bs);
+ bo->reads.submit_count = bs->usage.submit_count;
+ }
}
-static inline bool
+static ALWAYS_INLINE bool
zink_bo_usage_unset(struct zink_bo *bo, struct zink_batch_state *bs)
{
- zink_batch_usage_unset(&bo->reads, bs);
- zink_batch_usage_unset(&bo->writes, bs);
- return bo->reads || bo->writes;
+ zink_batch_usage_unset(&bo->reads.u, bs);
+ zink_batch_usage_unset(&bo->writes.u, bs);
+ return bo->reads.u || bo->writes.u;
}
-static inline void
+static ALWAYS_INLINE void
zink_bo_unref(struct zink_screen *screen, struct zink_bo *bo)
{
struct pb_buffer *pbuf = &bo->base;