summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2017-08-04 19:27:11 -0400
committerJérôme Glisse <jglisse@redhat.com>2017-08-23 16:41:39 -0400
commit18b15be647a05056e4c5f239e67ec00a208afe42 (patch)
treeba730dff5e62c0771b4fa91355eda8503d5ad814
parent79e32d5b7ed6c61fd8c1dbbba3791f23c8f2466b (diff)
drm/nouveau/core/mm: convert to u64 tu support bigger address space
CPU process address space on 64bits architecture is 47bits or bigger hence we need to convert nvkmm_mm to use 64bits integer to support bigger address space. Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c34
2 files changed, 25 insertions, 25 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 7bd4897a8a2a..a1f57ffb4163 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -12,15 +12,15 @@ struct nvkm_mm_node {
#define NVKM_MM_TYPE_NONE 0x00
#define NVKM_MM_TYPE_HOLE 0xff
u8 type;
- u32 offset;
- u32 length;
+ u64 offset;
+ u64 length;
};
struct nvkm_mm {
struct list_head nodes;
struct list_head free;
- u32 block_size;
+ u64 block_size;
int heap_nodes;
};
@@ -30,12 +30,12 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
return mm->heap_nodes;
}
-int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
+int nvkm_mm_init(struct nvkm_mm *, u64 offset, u64 length, u64 block);
int nvkm_mm_fini(struct nvkm_mm *);
-int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
- u32 size_min, u32 align, struct nvkm_mm_node **);
-int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
- u32 size_min, u32 align, struct nvkm_mm_node **);
+int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u64 size_max,
+ u64 size_min, u64 align, struct nvkm_mm_node **);
+int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u64 size_max,
+ u64 size_min, u64 align, struct nvkm_mm_node **);
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 5c7891234eea..02c6cff8f665 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -34,12 +34,12 @@ nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
pr_err("nvkm: %s\n", header);
pr_err("nvkm: node list:\n");
list_for_each_entry(node, &mm->nodes, nl_entry) {
- pr_err("nvkm: \t%08x %08x %d\n",
+ pr_err("nvkm: \t%016llx %016llx %d\n",
node->offset, node->length, node->type);
}
pr_err("nvkm: free list:\n");
list_for_each_entry(node, &mm->free, fl_entry) {
- pr_err("nvkm: \t%08x %08x %d\n",
+ pr_err("nvkm: \t%016llx %016llx %d\n",
node->offset, node->length, node->type);
}
}
@@ -83,7 +83,7 @@ nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
}
static struct nvkm_mm_node *
-region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
+region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u64 size)
{
struct nvkm_mm_node *b;
@@ -108,13 +108,13 @@ region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
}
int
-nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
- u32 align, struct nvkm_mm_node **pnode)
+nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u64 size_max, u64 size_min,
+ u64 align, struct nvkm_mm_node **pnode)
{
struct nvkm_mm_node *prev, *this, *next;
- u32 mask = align - 1;
- u32 splitoff;
- u32 s, e;
+ u64 mask = align - 1;
+ u64 splitoff;
+ u64 s, e;
BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
@@ -158,7 +158,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
}
static struct nvkm_mm_node *
-region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
+region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u64 size)
{
struct nvkm_mm_node *b;
@@ -183,18 +183,18 @@ region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
}
int
-nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
- u32 align, struct nvkm_mm_node **pnode)
+nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u64 size_max, u64 size_min,
+ u64 align, struct nvkm_mm_node **pnode)
{
struct nvkm_mm_node *prev, *this, *next;
- u32 mask = align - 1;
+ u64 mask = align - 1;
BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
list_for_each_entry_reverse(this, &mm->free, fl_entry) {
- u32 e = this->offset + this->length;
- u32 s = this->offset;
- u32 c = 0, a;
+ u64 e = this->offset + this->length;
+ u64 s = this->offset;
+ u64 c = 0, a;
if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
if (this->heap != heap)
continue;
@@ -237,10 +237,10 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
}
int
-nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
+nvkm_mm_init(struct nvkm_mm *mm, u64 offset, u64 length, u64 block)
{
struct nvkm_mm_node *node, *prev;
- u32 next;
+ u64 next;
if (nvkm_mm_initialised(mm)) {
prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);