summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_buddy.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_buddy.c')
-rw-r--r--drivers/gpu/drm/drm_buddy.c420
1 files changed, 335 insertions, 85 deletions
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index d60878bc9c20..72f52f293249 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -211,7 +211,7 @@ static int split_block(struct drm_buddy *mm,
}
static struct drm_buddy_block *
-get_buddy(struct drm_buddy_block *block)
+__get_buddy(struct drm_buddy_block *block)
{
struct drm_buddy_block *parent;
@@ -225,6 +225,23 @@ get_buddy(struct drm_buddy_block *block)
return parent->left;
}
+/**
+ * drm_get_buddy - get buddy address
+ *
+ * @block: DRM buddy block
+ *
+ * Returns the corresponding buddy block for @block, or NULL
+ * if this is a root block and can't be merged further.
+ * Requires some kind of locking to protect against
+ * any concurrent allocate and free operations.
+ */
+struct drm_buddy_block *
+drm_get_buddy(struct drm_buddy_block *block)
+{
+ return __get_buddy(block);
+}
+EXPORT_SYMBOL(drm_get_buddy);
+
static void __drm_buddy_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
@@ -233,7 +250,7 @@ static void __drm_buddy_free(struct drm_buddy *mm,
while ((parent = block->parent)) {
struct drm_buddy_block *buddy;
- buddy = get_buddy(block);
+ buddy = __get_buddy(block);
if (!drm_buddy_block_is_free(buddy))
break;
@@ -282,34 +299,134 @@ void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
}
EXPORT_SYMBOL(drm_buddy_free_list);
-/**
- * drm_buddy_alloc_blocks - allocate power-of-two blocks
- *
- * @mm: DRM buddy manager to allocate from
- * @order: size of the allocation
- *
- * The order value here translates to:
- *
- * 0 = 2^0 * mm->chunk_size
- * 1 = 2^1 * mm->chunk_size
- * 2 = 2^2 * mm->chunk_size
- *
- * Returns:
- * allocated ptr to the &drm_buddy_block on success
- */
-struct drm_buddy_block *
-drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+static struct drm_buddy_block *
+alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *buddy;
+ LIST_HEAD(dfs);
+ int err;
+ int i;
+
+ end = end - 1;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ do {
+ u64 block_start;
+ u64 block_end;
+
+ block = list_first_entry_or_null(&dfs,
+ struct drm_buddy_block,
+ tmp_link);
+ if (!block)
+ break;
+
+ list_del(&block->tmp_link);
+
+ if (drm_buddy_block_order(block) < order)
+ continue;
+
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+ if (!overlaps(start, end, block_start, block_end))
+ continue;
+
+ if (drm_buddy_block_is_allocated(block))
+ continue;
+
+ if (contains(start, end, block_start, block_end) &&
+ order == drm_buddy_block_order(block)) {
+ /*
+ * Find the free block within the range.
+ */
+ if (drm_buddy_block_is_free(block))
+ return block;
+
+ continue;
+ }
+
+ if (!drm_buddy_block_is_split(block)) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+ }
+
+ list_add(&block->right->tmp_link, &dfs);
+ list_add(&block->left->tmp_link, &dfs);
+ } while (1);
+
+ return ERR_PTR(-ENOSPC);
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = __get_buddy(block);
+ if (buddy &&
+ (drm_buddy_block_is_free(block) &&
+ drm_buddy_block_is_free(buddy)))
+ __drm_buddy_free(mm, block);
+ return ERR_PTR(err);
+}
+
+static struct drm_buddy_block *
+get_maxblock(struct list_head *head)
+{
+ struct drm_buddy_block *max_block = NULL, *node;
+
+ max_block = list_first_entry_or_null(head,
+ struct drm_buddy_block,
+ link);
+ if (!max_block)
+ return NULL;
+
+ list_for_each_entry(node, head, link) {
+ if (drm_buddy_block_offset(node) >
+ drm_buddy_block_offset(max_block))
+ max_block = node;
+ }
+
+ return max_block;
+}
+
+static struct drm_buddy_block *
+alloc_from_freelist(struct drm_buddy *mm,
+ unsigned int order,
+ unsigned long flags)
{
struct drm_buddy_block *block = NULL;
unsigned int i;
int err;
for (i = order; i <= mm->max_order; ++i) {
- block = list_first_entry_or_null(&mm->free_list[i],
- struct drm_buddy_block,
- link);
- if (block)
- break;
+ if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+ block = get_maxblock(&mm->free_list[i]);
+ if (block)
+ break;
+ } else {
+ block = list_first_entry_or_null(&mm->free_list[i],
+ struct drm_buddy_block,
+ link);
+ if (block)
+ break;
+ }
}
if (!block)
@@ -320,78 +437,29 @@ drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
while (i != order) {
err = split_block(mm, block);
if (unlikely(err))
- goto out_free;
+ goto err_undo;
- /* Go low */
- block = block->left;
+ block = block->right;
i--;
}
-
- mark_allocated(block);
- mm->avail -= drm_buddy_block_size(mm, block);
- kmemleak_update_trace(block);
return block;
-out_free:
+err_undo:
if (i != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
-EXPORT_SYMBOL(drm_buddy_alloc_blocks);
-
-static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= e2 && e1 >= s2;
-}
-static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= s2 && e1 >= e2;
-}
-
-/**
- * drm_buddy_alloc_range - allocate range
- *
- * @mm: DRM buddy manager to allocate from
- * @blocks: output list head to add allocated blocks
- * @start: start of the allowed range for this block
- * @size: size of the allocation
- *
- * Intended for pre-allocating portions of the address space, for example to
- * reserve a block for the initial framebuffer or similar, hence the expectation
- * here is that drm_buddy_alloc_blocks() is still the main vehicle for
- * allocations, so if that's not the case then the drm_mm range allocator is
- * probably a much better fit, and so you should probably go use that instead.
- *
- * Note that it's safe to chain together multiple alloc_ranges
- * with the same blocks list
- *
- * Returns:
- * 0 on success, error code on failure.
- */
-int drm_buddy_alloc_range(struct drm_buddy *mm,
- struct list_head *blocks,
- u64 start, u64 size)
+static int __alloc_range(struct drm_buddy *mm,
+ struct list_head *dfs,
+ u64 start, u64 size,
+ struct list_head *blocks)
{
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(allocated);
- LIST_HEAD(dfs);
u64 end;
int err;
- int i;
-
- if (size < mm->chunk_size)
- return -EINVAL;
-
- if (!IS_ALIGNED(size | start, mm->chunk_size))
- return -EINVAL;
-
- if (range_overflows(start, size, mm->size))
- return -EINVAL;
-
- for (i = 0; i < mm->n_roots; ++i)
- list_add_tail(&mm->roots[i]->tmp_link, &dfs);
end = start + size - 1;
@@ -399,7 +467,7 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
u64 block_start;
u64 block_end;
- block = list_first_entry_or_null(&dfs,
+ block = list_first_entry_or_null(dfs,
struct drm_buddy_block,
tmp_link);
if (!block)
@@ -436,8 +504,8 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
goto err_undo;
}
- list_add(&block->right->tmp_link, &dfs);
- list_add(&block->left->tmp_link, &dfs);
+ list_add(&block->right->tmp_link, dfs);
+ list_add(&block->left->tmp_link, dfs);
} while (1);
list_splice_tail(&allocated, blocks);
@@ -449,7 +517,7 @@ err_undo:
* bigger is better, so make sure we merge everything back before we
* free the allocated blocks.
*/
- buddy = get_buddy(block);
+ buddy = __get_buddy(block);
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
@@ -459,7 +527,189 @@ err_free:
drm_buddy_free_list(mm, &allocated);
return err;
}
-EXPORT_SYMBOL(drm_buddy_alloc_range);
+
+static int __drm_buddy_alloc_range(struct drm_buddy *mm,
+ u64 start,
+ u64 size,
+ struct list_head *blocks)
+{
+ LIST_HEAD(dfs);
+ int i;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ return __alloc_range(mm, &dfs, start, size, blocks);
+}
+
+/**
+ * drm_buddy_block_trim - free unused pages
+ *
+ * @mm: DRM buddy manager
+ * @new_size: original size requested
+ * @blocks: Input and output list of allocated blocks.
+ * MUST contain single block as input to be trimmed.
+ * On success will contain the newly allocated blocks
+ * making up the @new_size. Blocks always appear in
+ * ascending order
+ *
+ * For contiguous allocation, we round up the size to the nearest
+ * power of two value, drivers consume *actual* size, so remaining
+ * portions are unused and can be optionally freed with this function
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 new_size,
+ struct list_head *blocks)
+{
+ struct drm_buddy_block *parent;
+ struct drm_buddy_block *block;
+ LIST_HEAD(dfs);
+ u64 new_start;
+ int err;
+
+ if (!list_is_singular(blocks))
+ return -EINVAL;
+
+ block = list_first_entry(blocks,
+ struct drm_buddy_block,
+ link);
+
+ if (WARN_ON(!drm_buddy_block_is_allocated(block)))
+ return -EINVAL;
+
+ if (new_size > drm_buddy_block_size(mm, block))
+ return -EINVAL;
+
+ if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
+ return -EINVAL;
+
+ if (new_size == drm_buddy_block_size(mm, block))
+ return 0;
+
+ list_del(&block->link);
+ mark_free(mm, block);
+ mm->avail += drm_buddy_block_size(mm, block);
+
+ /* Prevent recursively freeing this node */
+ parent = block->parent;
+ block->parent = NULL;
+
+ new_start = drm_buddy_block_offset(block);
+ list_add(&block->tmp_link, &dfs);
+ err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
+ if (err) {
+ mark_allocated(block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ list_add(&block->link, blocks);
+ }
+
+ block->parent = parent;
+ return err;
+}
+EXPORT_SYMBOL(drm_buddy_block_trim);
+
+/**
+ * drm_buddy_alloc_blocks - allocate power-of-two blocks
+ *
+ * @mm: DRM buddy manager to allocate from
+ * @start: start of the allowed range for this block
+ * @end: end of the allowed range for this block
+ * @size: size of the allocation
+ * @min_page_size: alignment of the allocation
+ * @blocks: output list head to add allocated blocks
+ * @flags: DRM_BUDDY_*_ALLOCATION flags
+ *
+ * alloc_range_bias() called on range limitations, which traverses
+ * the tree and returns the desired block.
+ *
+ * alloc_from_freelist() called when *no* range restrictions
+ * are enforced, which picks the block from the freelist.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags)
+{
+ struct drm_buddy_block *block = NULL;
+ unsigned int min_order, order;
+ unsigned long pages;
+ LIST_HEAD(allocated);
+ int err;
+
+ if (size < mm->chunk_size)
+ return -EINVAL;
+
+ if (min_page_size < mm->chunk_size)
+ return -EINVAL;
+
+ if (!is_power_of_2(min_page_size))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(start | end | size, mm->chunk_size))
+ return -EINVAL;
+
+ if (end > mm->size)
+ return -EINVAL;
+
+ if (range_overflows(start, size, mm->size))
+ return -EINVAL;
+
+ /* Actual range allocation */
+ if (start + size == end)
+ return __drm_buddy_alloc_range(mm, start, size, blocks);
+
+ pages = size >> ilog2(mm->chunk_size);
+ order = fls(pages) - 1;
+ min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+
+ do {
+ order = min(order, (unsigned int)fls(pages) - 1);
+ BUG_ON(order > mm->max_order);
+ BUG_ON(order < min_order);
+
+ do {
+ if (flags & DRM_BUDDY_RANGE_ALLOCATION)
+ /* Allocate traversing within the range */
+ block = alloc_range_bias(mm, start, end, order);
+ else
+ /* Allocate from freelist */
+ block = alloc_from_freelist(mm, order, flags);
+
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+ } while (1);
+
+ mark_allocated(block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ kmemleak_update_trace(block);
+ list_add_tail(&block->link, &allocated);
+
+ pages -= BIT(order);
+
+ if (!pages)
+ break;
+ } while (1);
+
+ list_splice_tail(&allocated, blocks);
+ return 0;
+
+err_free:
+ drm_buddy_free_list(mm, &allocated);
+ return err;
+}
+EXPORT_SYMBOL(drm_buddy_alloc_blocks);
/**
* drm_buddy_block_print - print block information