summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerome Glisse <glisse@freedesktop.org>2009-02-11 17:04:19 +0100
committerJerome Glisse <glisse@freedesktop.org>2009-02-11 17:04:19 +0100
commit5c7574555db60ee8ff4af39d9e5dbb8da41e8a3d (patch)
treeb04b2ee13fee87ebc322cdf908bd1b86b6f26ebf
parent815856d5de70e941f54b865faa92bd38916880c3 (diff)
ttm: indentation fixes
-rw-r--r--linux-core/ttm/ttm_bo_util.c329
1 files changed, 184 insertions, 145 deletions
diff --git a/linux-core/ttm/ttm_bo_util.c b/linux-core/ttm/ttm_bo_util.c
index 903399c6..fdfbdfea 100644
--- a/linux-core/ttm/ttm_bo_util.c
+++ b/linux-core/ttm/ttm_bo_util.c
@@ -321,170 +321,209 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
- if (!(caching_flags & TTM_PL_FLAG_CACHED)
- tmp = pgprot_noncached(tmp);
+ if (!(caching_flags & TTM_PL_FLAG_CACHED))
+ tmp = pgprot_noncached(tmp);
#endif
- return tmp;}
-
- static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
- unsigned long bus_base,
- unsigned long bus_offset,
- unsigned long bus_size,
- struct ttm_bo_kmap_obj *map) {
- struct ttm_bo_device * bdev = bo->bdev;
- struct ttm_mem_reg * mem = &bo->mem;
- struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
- map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
- map->bo_kmap_type = ttm_bo_map_iomap;
+ return tmp;
+}
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+ unsigned long bus_base,
+ unsigned long bus_offset,
+ unsigned long bus_size,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_bo_device * bdev = bo->bdev;
+ struct ttm_mem_reg * mem = &bo->mem;
+ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ map->bo_kmap_type = ttm_bo_map_premapped;
+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
+ map->bo_kmap_type = ttm_bo_map_iomap;
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
- if (mem->flags & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
- else
- map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
+ if (mem->flags & TTM_PL_FLAG_WC)
+ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
+ else
+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
#else
- map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
#endif
- }
- return (!map->virtual) ? -ENOMEM : 0;}
-
- static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
- unsigned long start_page,
- unsigned long num_pages,
- struct ttm_bo_kmap_obj *map) {
- struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
- struct ttm_tt * ttm = bo->ttm; struct page * d; int i; BUG_ON(!ttm);
- if (num_pages == 1 && (mem->flags & TTM_PL_FLAG_CACHED)) {
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ unsigned long start_page,
+ unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
+ struct ttm_tt * ttm = bo->ttm;
+ struct page * d;
+ int i;
+ BUG_ON(!ttm);
+ if (num_pages == 1 && (mem->flags & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
-
- map->bo_kmap_type = ttm_bo_map_kmap;
- map->page = ttm_tt_get_page(ttm, start_page);
- map->virtual = kmap(map->page);}
- else {
+ map->bo_kmap_type = ttm_bo_map_kmap;
+ map->page = ttm_tt_get_page(ttm, start_page);
+ map->virtual = kmap(map->page);
+ } else {
/*
* Populate the part we're mapping;
*/
+ for (i = start_page; i < start_page + num_pages; ++i) {
+ d = ttm_tt_get_page(ttm, i); if (!d)
+ return -ENOMEM;
+ }
+
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contigous.
+ */
+ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
+ PAGE_KERNEL :
+ ttm_io_prot(mem->flags, PAGE_KERNEL);
+ map->bo_kmap_type = ttm_bo_map_vmap;
+ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
- for (i = start_page; i < start_page + num_pages; ++i) {
- d = ttm_tt_get_page(ttm, i); if (!d)
- return -ENOMEM;}
-
- /*
- * We need to use vmap to get the desired page protection
- * or to make the buffer object look contigous.
- */
-
- prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL :
- ttm_io_prot(mem->flags, PAGE_KERNEL);
- map->bo_kmap_type = ttm_bo_map_vmap;
- map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);}
- return (!map->virtual) ? -ENOMEM : 0;}
-
- int ttm_bo_kmap(struct ttm_buffer_object *bo,
- unsigned long start_page, unsigned long num_pages,
- struct ttm_bo_kmap_obj *map) {
- int ret; unsigned long bus_base; unsigned long bus_offset;
- unsigned long bus_size; BUG_ON(!list_empty(&bo->swap));
- map->virtual = NULL; if (num_pages > bo->num_pages)
- return -EINVAL; if (start_page > bo->num_pages)
- return -EINVAL;
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+ unsigned long start_page, unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ int ret;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ BUG_ON(!list_empty(&bo->swap));
+ map->virtual = NULL;
+ if (num_pages > bo->num_pages)
+ return -EINVAL;
+ if (start_page > bo->num_pages)
+ return -EINVAL;
#if 0
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size); if (ret)
- return ret; if (bus_size == 0) {
- return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);}
- else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);}
+ &bus_offset, &bus_size);
+ if (ret)
+ return ret;
+ if (bus_size == 0) {
+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+ } else {
+ bus_offset += start_page << PAGE_SHIFT;
+ bus_size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
}
+}
- void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) {
- if (!map->virtual)
- return; switch (map->bo_kmap_type) {
-case ttm_bo_map_iomap:
-iounmap(map->virtual); break; case ttm_bo_map_vmap:
-vunmap(map->virtual); break; case ttm_bo_map_kmap:
-kunmap(map->page); break; case ttm_bo_map_premapped:
-break; default:
- BUG();}
- map->virtual = NULL; map->page = NULL;}
-
- int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t * prot) {
- struct ttm_mem_reg * mem = &bo->mem;
- struct ttm_bo_device * bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size); if (ret)
- return -EINVAL; if (bus_size != 0)
- * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn =
- page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
- *prot =
- (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
- flags,
- PAGE_KERNEL);
- return 0;}
-
- int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- void *sync_obj,
- void *sync_obj_arg,
- int evict, int no_wait,
- struct ttm_mem_reg *new_mem) {
- struct ttm_bo_device * bdev = bo->bdev;
- struct ttm_bo_driver * driver = bdev->driver;
- struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
- struct ttm_mem_reg * old_mem = &bo->mem;
- int ret;
- uint32_t save_flags = old_mem->flags;
- uint32_t save_proposed_flags = old_mem->proposed_flags;
- struct ttm_buffer_object * old_obj; if (bo->sync_obj)
- driver->sync_obj_unref(&bo->sync_obj);
- bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = sync_obj_arg; if (evict) {
- ret = ttm_bo_wait(bo, 0, 0, 0); if (ret)
- return ret;
- ttm_bo_free_old_node(bo);
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
- ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;}
- }
- else {
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+ if (!map->virtual)
+ return;
+ switch (map->bo_kmap_type) {
+ case ttm_bo_map_iomap:
+ iounmap(map->virtual);
+ break;
+ case ttm_bo_map_vmap:
+ vunmap(map->virtual);
+ break;
+ case ttm_bo_map_kmap:
+ kunmap(map->page);
+ break;
+ case ttm_bo_map_premapped:
+ break;
+ default:
+ BUG();
+ }
+ map->virtual = NULL;
+ map->page = NULL;
+}
- /* This should help pipeline ordinary buffer moves.
- *
- * Hang old buffer memory on a new buffer object,
- * and leave it to be released when the GPU
- * operation has completed.
- */
+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
+ unsigned long dst_offset,
+ unsigned long *pfn, pgprot_t * prot)
+{
+ struct ttm_mem_reg * mem = &bo->mem;
+ struct ttm_bo_device * bdev = bo->bdev;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ unsigned long bus_base;
+ int ret;
+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
+ &bus_size);
+ if (ret)
+ return -EINVAL;
+ if (bus_size != 0)
+ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
+ else
+ if (!bo->ttm)
+ return -EINVAL;
+ else
+ *pfn =
+ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
+ *prot =
+ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
+ flags,
+ PAGE_KERNEL);
+ return 0;
+}
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ void *sync_obj,
+ void *sync_obj_arg,
+ int evict, int no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device * bdev = bo->bdev;
+ struct ttm_bo_driver * driver = bdev->driver;
+ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
+ struct ttm_mem_reg * old_mem = &bo->mem;
+ int ret;
+ uint32_t save_flags = old_mem->flags;
+ uint32_t save_proposed_flags = old_mem->proposed_flags;
+ struct ttm_buffer_object * old_obj;
+ if (bo->sync_obj)
+ driver->sync_obj_unref(&bo->sync_obj);
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = sync_obj_arg;
+ if (evict) {
+ ret = ttm_bo_wait(bo, 0, 0, 0);
+ if (ret)
+ return ret;
+ ttm_bo_free_old_node(bo);
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
+ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;
+ }
+ } else {
+
+ /* This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+ ret = ttm_buffer_object_transfer(bo, &old_obj);
+ if (ret)
+ return ret;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ old_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
+ ttm_bo_unreserve(old_obj);
+ }
- ret = ttm_buffer_object_transfer(bo, &old_obj); if (ret)
- return ret; if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
- old_obj->ttm = NULL;
- else
- bo->ttm = NULL;
- bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
- ttm_bo_unreserve(old_obj);}
-
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
- old_mem->proposed_flags = save_proposed_flags;
- ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
- return 0;}
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ old_mem->proposed_flags = save_proposed_flags;
+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
+ return 0;
+}