summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormonk.liu <monk.liu@amd.com>2015-05-13 13:58:43 +0800
committerAlex Deucher <alexander.deucher@amd.com>2015-07-30 00:11:10 -0400
commit27078f02dfb2d608f8ee8598c4deeeaa1cf9c7e9 (patch)
treeca2fc68880204bfaff5d4dd1f8574f2ec62e87f2
parent9b6b2e580eeb88dba3d98f8b80fb11364c0fff8c (diff)
amdgpu: fix code alignment
Signed-off-by: monk.liu <monk.liu@amd.com>
-rw-r--r--amdgpu/amdgpu_vamgr.c236
1 files changed, 118 insertions, 118 deletions
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 070ecc47..877e0baa 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -43,134 +43,134 @@ void amdgpu_vamgr_init(struct amdgpu_device *dev)
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
uint64_t size, uint64_t alignment)
{
- struct amdgpu_bo_va_hole *hole, *n;
- uint64_t offset = 0, waste = 0;
-
- alignment = MAX2(alignment, mgr->va_alignment);
- size = ALIGN(size, mgr->va_alignment);
-
- pthread_mutex_lock(&mgr->bo_va_mutex);
- /* TODO: using more appropriate way to track the holes */
- /* first look for a hole */
- LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
- offset = hole->offset;
- waste = offset % alignment;
- waste = waste ? alignment - waste : 0;
- offset += waste;
- if (offset >= (hole->offset + hole->size)) {
- continue;
- }
- if (!waste && hole->size == size) {
- offset = hole->offset;
- list_del(&hole->list);
- free(hole);
- pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
- }
- if ((hole->size - waste) > size) {
- if (waste) {
- n = calloc(1,
- sizeof(struct amdgpu_bo_va_hole));
- n->size = waste;
- n->offset = hole->offset;
- list_add(&n->list, &hole->list);
- }
- hole->size -= (size + waste);
- hole->offset += size + waste;
- pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
- }
- if ((hole->size - waste) == size) {
- hole->size = waste;
- pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
- }
- }
-
- offset = mgr->va_offset;
- waste = offset % alignment;
- waste = waste ? alignment - waste : 0;
+ struct amdgpu_bo_va_hole *hole, *n;
+ uint64_t offset = 0, waste = 0;
+
+ alignment = MAX2(alignment, mgr->va_alignment);
+ size = ALIGN(size, mgr->va_alignment);
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ /* TODO: using more appropriate way to track the holes */
+ /* first look for a hole */
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ offset = hole->offset;
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ offset += waste;
+ if (offset >= (hole->offset + hole->size)) {
+ continue;
+ }
+ if (!waste && hole->size == size) {
+ offset = hole->offset;
+ list_del(&hole->list);
+ free(hole);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) > size) {
+ if (waste) {
+ n = calloc(1,
+ sizeof(struct amdgpu_bo_va_hole));
+ n->size = waste;
+ n->offset = hole->offset;
+ list_add(&n->list, &hole->list);
+ }
+ hole->size -= (size + waste);
+ hole->offset += size + waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) == size) {
+ hole->size = waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
+ }
+ }
+
+ offset = mgr->va_offset;
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
if (offset + waste + size > mgr->va_max) {
pthread_mutex_unlock(&mgr->bo_va_mutex);
return AMDGPU_INVALID_VA_ADDRESS;
}
- if (waste) {
- n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
- n->size = waste;
- n->offset = offset;
- list_add(&n->list, &mgr->va_holes);
- }
- offset += waste;
- mgr->va_offset += size + waste;
- pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
+ if (waste) {
+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ n->size = waste;
+ n->offset = offset;
+ list_add(&n->list, &mgr->va_holes);
+ }
+ offset += waste;
+ mgr->va_offset += size + waste;
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return offset;
}
void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
uint64_t size)
{
- struct amdgpu_bo_va_hole *hole;
-
- size = ALIGN(size, mgr->va_alignment);
-
- pthread_mutex_lock(&mgr->bo_va_mutex);
- if ((va + size) == mgr->va_offset) {
- mgr->va_offset = va;
- /* Delete uppermost hole if it reaches the new top */
- if (!LIST_IS_EMPTY(&mgr->va_holes)) {
- hole = container_of(mgr->va_holes.next, hole, list);
- if ((hole->offset + hole->size) == va) {
- mgr->va_offset = hole->offset;
- list_del(&hole->list);
- free(hole);
- }
- }
- } else {
- struct amdgpu_bo_va_hole *next;
-
- hole = container_of(&mgr->va_holes, hole, list);
- LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
- if (next->offset < va)
- break;
- hole = next;
- }
-
- if (&hole->list != &mgr->va_holes) {
- /* Grow upper hole if it's adjacent */
- if (hole->offset == (va + size)) {
- hole->offset = va;
- hole->size += size;
- /* Merge lower hole if it's adjacent */
- if (next != hole
- && &next->list != &mgr->va_holes
- && (next->offset + next->size) == va) {
- next->size += hole->size;
- list_del(&hole->list);
- free(hole);
- }
- goto out;
- }
- }
-
- /* Grow lower hole if it's adjacent */
- if (next != hole && &next->list != &mgr->va_holes &&
- (next->offset + next->size) == va) {
- next->size += size;
- goto out;
- }
-
- /* FIXME on allocation failure we just lose virtual address space
- * maybe print a warning
- */
- next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
- if (next) {
- next->size = size;
- next->offset = va;
- list_add(&next->list, &hole->list);
- }
- }
+ struct amdgpu_bo_va_hole *hole;
+
+ size = ALIGN(size, mgr->va_alignment);
+
+ pthread_mutex_lock(&mgr->bo_va_mutex);
+ if ((va + size) == mgr->va_offset) {
+ mgr->va_offset = va;
+ /* Delete uppermost hole if it reaches the new top */
+ if (!LIST_IS_EMPTY(&mgr->va_holes)) {
+ hole = container_of(mgr->va_holes.next, hole, list);
+ if ((hole->offset + hole->size) == va) {
+ mgr->va_offset = hole->offset;
+ list_del(&hole->list);
+ free(hole);
+ }
+ }
+ } else {
+ struct amdgpu_bo_va_hole *next;
+
+ hole = container_of(&mgr->va_holes, hole, list);
+ LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
+ if (next->offset < va)
+ break;
+ hole = next;
+ }
+
+ if (&hole->list != &mgr->va_holes) {
+ /* Grow upper hole if it's adjacent */
+ if (hole->offset == (va + size)) {
+ hole->offset = va;
+ hole->size += size;
+ /* Merge lower hole if it's adjacent */
+ if (next != hole
+ && &next->list != &mgr->va_holes
+ && (next->offset + next->size) == va) {
+ next->size += hole->size;
+ list_del(&hole->list);
+ free(hole);
+ }
+ goto out;
+ }
+ }
+
+ /* Grow lower hole if it's adjacent */
+ if (next != hole && &next->list != &mgr->va_holes &&
+ (next->offset + next->size) == va) {
+ next->size += size;
+ goto out;
+ }
+
+ /* FIXME on allocation failure we just lose virtual address space
+ * maybe print a warning
+ */
+ next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ if (next) {
+ next->size = size;
+ next->offset = va;
+ list_add(&next->list, &hole->list);
+ }
+ }
out:
- pthread_mutex_unlock(&mgr->bo_va_mutex);
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
}