summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlora Cui <Flora.Cui@amd.com>2016-02-04 09:42:45 +0800
committerQiang Yu <Qiang.Yu@amd.com>2017-05-17 10:24:20 +0800
commit29dc49fbf19e4fca31adcace3e3e2e20faaecab9 (patch)
tree9455e85d1eb5c3707663a01acc074337e58437ac
parent8ff2c9c5df265745518a43ffc46850ba7afe5b09 (diff)
amdgpu: support alloc va from range v2
v2: 7f0478c3a7aea7a1a85ade12b2091d41ce5fcf15 [Junwei Zhang] va allocation may fall to the range outside of requested [min,max] 8a0a75fbbe641bb3c7b4564004ed2cbb10a68ffd [Ken Wang] fix a bug in va range allocation Change-Id: I05f24e44863aeffa7bcd735bf787a5328d587044 Signed-off-by: Flora Cui <Flora.Cui@amd.com> Reviewed-by: Ken Wang <Qingqing.Wang@amd.com> Signed-off-by: David Mao <david.mao@amd.com>
-rw-r--r--amdgpu/amdgpu.h51
-rw-r--r--amdgpu/amdgpu_vamgr.c179
2 files changed, 230 insertions, 0 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index b125d06e..94e0f11f 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -1251,6 +1251,57 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
uint64_t flags);
/**
+ * Allocate virtual address range in client defined range
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param va_range_type - \c [in] Type of MC va range from which to allocate
+ * \param size - \c [in] Size of range. Size must be correctly* aligned.
+ * It is client responsibility to correctly aligned size based on the future
+ * usage of allocated range.
+ * \param va_base_alignment - \c [in] Overwrite base address alignment
+ * requirement for GPU VM MC virtual
+ * address assignment. Must be multiple of size alignments received as
+ * 'amdgpu_buffer_size_alignments'.
+ * If 0 use the default one.
+ * \param va_base_required - \c [in] Specified required va base address.
+ * If 0 then library choose available one between [va_base_min, va_base_max].
+ * If !0 value will be passed and those value already "in use" then
+ * corresponding error status will be returned.
+ * \param va_base_min- \c [in] Specified required va range min address.
+ * valid if va_base_required is 0
+ * \param va_base_max - \c [in] Specified required va range max address.
+ * valid if va_base_required is 0
+ * \param va_base_allocated - \c [out] On return: Allocated VA base to be used
+ * by client.
+ * \param va_range_handle - \c [out] On return: Handle assigned to allocation
+ * \param flags - \c [in] flags for special VA range
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+ * \notes \n
+ * It is client responsibility to correctly handle VA assignments and usage.
+ * Neither kernel driver nor libdrm_amdpgu are able to prevent and
+ * detect wrong va assignemnt.
+ *
+ * It is client responsibility to correctly handle multi-GPU cases and to pass
+ * the corresponding arrays of all devices handles where corresponding VA will
+ * be used.
+ *
+*/
+int amdgpu_va_range_alloc_in_range(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t va_range_min,
+ uint64_t va_range_max,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags);
+
+/**
* Free previously allocated virtual address range
*
*
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 54711ee5..63124a4e 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -169,6 +169,104 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
return offset;
}
+static uint64_t amdgpu_vamgr_find_va_in_range(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
+ uint64_t alignment, uint64_t range_min, uint64_t range_max)
+{
+ struct amdgpu_bo_va_hole *hole, *n;
+ uint64_t offset = 0, waste = 0;
+
+ if (mgr->va_min >= range_max ||
+ mgr->va_max <= range_min)
+ return AMDGPU_INVALID_VA_ADDRESS;
+
+ alignment = MAX2(alignment, mgr->va_alignment);
+ size = ALIGN(size, mgr->va_alignment);
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ /* TODO: using more appropriate way to track the holes */
+ /* first look for a hole */
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ if (hole->offset > range_max ||
+ hole->offset + hole->size < range_min ||
+ (hole->offset > range_min && hole->offset + size > range_max) ||
+ (hole->offset < range_min && range_min + size > hole->offset + hole->size) ||
+ hole->size < size)
+ continue;
+ /*
+ * it is possible that the hole covers more than one range,
+ * thus we need to respect the range_min
+ */
+ offset = MAX2(hole->offset, range_min);
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ offset += waste;
+ /* the gap between the range_min and hole->offset need to be covered as well */
+ waste = offset - hole->offset;
+ if (offset >= (hole->offset + hole->size)) {
+ continue;
+ }
+
+ if (offset + size > range_max) {
+ continue;
+ }
+
+ if (!waste && hole->size == size) {
+ offset = hole->offset;
+ list_del(&hole->list);
+ free(hole);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) > size) {
+ if (waste) {
+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ n->size = waste;
+ n->offset = hole->offset;
+ list_add(&n->list, &hole->list);
+ }
+ hole->size -= (size + waste);
+ hole->offset += size + waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) == size) {
+ hole->size = waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ }
+
+ if (mgr->va_offset > range_max) {
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return AMDGPU_INVALID_VA_ADDRESS;
+ } else if (mgr->va_offset > range_min) {
+ offset = mgr->va_offset;
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ if (offset + waste + size > range_max) {
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return AMDGPU_INVALID_VA_ADDRESS;
+ }
+ } else {
+ offset = mgr->va_offset;
+ waste = range_min % alignment;
+ waste = waste ? alignment - waste : 0;
+ waste += range_min - offset ;
+ }
+
+ if (waste) {
+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ n->size = waste;
+ n->offset = offset;
+ list_add(&n->list, &mgr->va_holes);
+ }
+
+ offset += waste;
+ mgr->va_offset = size + offset;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+}
+
drm_private void
amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
{
@@ -294,6 +392,87 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
return 0;
}
+static int _amdgpu_va_range_alloc_in_range(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_range_min,
+ uint64_t va_range_max,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags)
+{
+ struct amdgpu_bo_va_mgr *vamgr;
+
+ if (amdgpu_gpu_va_range_svm == va_range_type) {
+ vamgr = &vamgr_svm;
+ if (!vamgr->valid)
+ return -EINVAL;
+ }
+ else if (flags & AMDGPU_VA_RANGE_32_BIT)
+ vamgr = &dev->vamgr_32;
+ else
+ vamgr = &dev->vamgr;
+
+ va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
+ size = ALIGN(size, vamgr->va_alignment);
+
+ *va_base_allocated = amdgpu_vamgr_find_va_in_range(vamgr, size,
+ va_base_alignment, va_range_min, va_range_max);
+
+ if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
+ (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+ /* fallback to 32bit address */
+ vamgr = &dev->vamgr_32;
+ *va_base_allocated = amdgpu_vamgr_find_va_in_range(vamgr, size,
+ va_base_alignment, va_range_min, va_range_max);
+ }
+
+ if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
+ struct amdgpu_va* va;
+ va = calloc(1, sizeof(struct amdgpu_va));
+ if(!va){
+ amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
+ return -ENOMEM;
+ }
+ va->dev = dev;
+ va->address = *va_base_allocated;
+ va->size = size;
+ va->range = va_range_type;
+ va->vamgr = vamgr;
+ *va_range_handle = va;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_va_range_alloc_in_range(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t va_range_min,
+ uint64_t va_range_max,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags)
+{
+ if (va_base_required)
+ return amdgpu_va_range_alloc(dev, va_range_type,
+ size, va_base_alignment,
+ va_base_required, va_base_allocated,
+ va_range_handle, flags);
+ else
+ return _amdgpu_va_range_alloc_in_range(dev,
+ va_range_type, size,
+ va_base_alignment,
+ va_range_min, va_range_max,
+ va_base_allocated,
+ va_range_handle, flags);
+}
+
int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
{
if(!va_range_handle || !va_range_handle->address)