summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Whitwell <keith@tungstengraphics.com>2006-01-26 14:50:02 +0000
committerKeith Whitwell <keith@tungstengraphics.com>2006-01-26 14:50:02 +0000
commit33ca04f3797ec93ba6d4e437d64a15475aaa96c1 (patch)
treeb867203eaaa0b26a71f2204794c6cf8ee91b74f6
parent398cb30c72cbc44723801d3e87b6b8571dfdebac (diff)
Expand the buffer manager to include a notion of multiple pools within
a memory space. Allow some pools to be excluded from the upload/evict processing, meaning that any buffers within those pools are effectively fixed. Add a mechanism to create buffers in those pools. This allows the legacy fixed front/depth/back buffers to be represented in this scheme and will allow other future pinned buffers to be allocated from fixed pools in such a way that they cannot fragment the rest of the texture memory.
-rw-r--r--src/mesa/drivers/dri/i915/bufmgr.h68
-rw-r--r--src/mesa/drivers/dri/i915/bufmgr_fake.c349
-rw-r--r--src/mesa/drivers/dri/i915/i915_context.c52
-rw-r--r--src/mesa/drivers/dri/i915/i915_context.h1
-rw-r--r--src/mesa/drivers/dri/i915/intel_context.h9
-rw-r--r--src/mesa/drivers/dri/i915/intel_regions.c50
-rw-r--r--src/mesa/drivers/dri/i915/intel_regions.h8
-rw-r--r--src/mesa/drivers/dri/i915/intel_screen.h3
-rw-r--r--src/mesa/drivers/dri/i915/intel_tex_validate.c8
9 files changed, 378 insertions, 170 deletions
diff --git a/src/mesa/drivers/dri/i915/bufmgr.h b/src/mesa/drivers/dri/i915/bufmgr.h
index c999a4ca4c6..1a52fdcfc19 100644
--- a/src/mesa/drivers/dri/i915/bufmgr.h
+++ b/src/mesa/drivers/dri/i915/bufmgr.h
@@ -14,29 +14,54 @@ struct bm_buffer_list;
struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel );
-
/* struct bufmgr *bmCreate( ... ); */
/* struct bufmgr *bmAttach( ... ); */
-void bmInitPool( struct bufmgr *,
- unsigned pool,
- unsigned long low_offset,
- unsigned long high_offset,
- void *virtual_base );
+/* Define an address space. Doesn't really do anything, but the
+ * information could be used to validate the bmInitPool() requests.
+ */
+void bmInitMemType( struct bufmgr *,
+ unsigned mem_type,
+ unsigned long size );
+
+
+/* Create a pool of a given memory type, from a certain offset and a
+ * certain size.
+ *
+ * Also passed in is a virtual pointer to the start of the pool. This
+ * is useful in the faked-out version in i915 so that MapBuffer can
+ * return a pointer to a buffer residing in AGP space.
+ *
+ * Flags passed into a pool are inherited by all buffers allocated in
+ * that pool. So pools representing the static front,back,depth
+ * buffer allocations should have MEM_AGP|NO_UPLOAD|NO_EVICT|NO_MOVE to match
+ * the behaviour of the legacy allocations.
+ *
+ * Returns -1 for failure, pool number for success.
+ */
+int bmInitPool( struct bufmgr *,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ unsigned flags);
+
+/* Flags for validate and other calls. If both NO_UPLOAD and NO_EVICT
+ * are specified, ValidateBuffers is essentially a query.
+ */
#define BM_MEM_LOCAL 0x1
#define BM_MEM_AGP 0x2
-#define BM_MEM_VRAM 0x4 /* not used */
+#define BM_MEM_VRAM 0x4 /* not yet used */
+#define BM_WRITE 0x8 /* not yet used */
+#define BM_READ 0x10 /* not yet used */
+#define BM_NO_UPLOAD 0x20
+#define BM_NO_EVICT 0x40
+#define BM_NO_MOVE 0x80 /* not yet used */
+#define BM_NO_ALLOC 0x100 /* legacy "fixed" buffers only */
-#define BM_WRITE 0x100 /* not used */
-#define BM_READ 0x200 /* not used */
+#define BM_MEM_MASK (BM_MEM_LOCAL|BM_MEM_AGP|BM_MEM_VRAM)
-/* Flags for validate. If both NO_UPLOAD and NO_EVICT are specified,
- * ValidateBuffers is essentially a query.
- */
-#define BM_NO_UPLOAD 0x1
-#define BM_NO_EVICT 0x2
/* Stick closely to ARB_vbo semantics - they're well defined and
@@ -46,6 +71,18 @@ void bmInitPool( struct bufmgr *,
void bmGenBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
void bmDeleteBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
+
+/* Hook to inform faked buffer manager about fixed-position
+ * front,depth,back buffers. These may move to a fully memory-managed
+ * scheme, or they may continue to be managed as is.
+ */
+unsigned bmBufferStatic(struct bufmgr *,
+ unsigned buffer,
+ unsigned size,
+ unsigned pool);
+
+
+
/* The driver has more intimate knowledge of the hardare than a GL
* client would, so flags here is more proscriptive than the usage
* values in the ARB_vbo interface:
@@ -90,8 +127,7 @@ void bmUnmapBuffer( struct bufmgr *,
*/
struct bm_buffer_list *bmNewBufferList( void );
-void bmAddBuffer( struct bufmgr *,
- struct bm_buffer_list *list,
+void bmAddBuffer( struct bm_buffer_list *list,
unsigned buffer,
unsigned flags,
unsigned *pool_return,
diff --git a/src/mesa/drivers/dri/i915/bufmgr_fake.c b/src/mesa/drivers/dri/i915/bufmgr_fake.c
index 077f3a444c4..76c84649191 100644
--- a/src/mesa/drivers/dri/i915/bufmgr_fake.c
+++ b/src/mesa/drivers/dri/i915/bufmgr_fake.c
@@ -22,6 +22,7 @@ struct _mesa_HashTable;
/* Maximum number of buffers to pass to bmValidateBufferList:
*/
#define BM_LIST_MAX 32
+#define BM_POOL_MAX 8
/* Wrapper around mm.c's mem_block, which understands that you must
@@ -32,7 +33,8 @@ struct _mesa_HashTable;
*/
struct block {
struct block *next, *prev;
- int memType;
+ int mem_type;
+ struct pool *pool; /* BM_MEM_AGP */
struct mem_block *mem; /* BM_MEM_AGP */
unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
void *virtual;
@@ -45,50 +47,56 @@ struct buffer {
unsigned size;
unsigned alignment;
unsigned mapped;
+ unsigned flags;
struct block *block;
};
struct pool {
- unsigned size;
+ unsigned flags;
struct mem_block *heap;
void *virtual;
struct block lru;
struct block freed;
};
-/* List of buffers to validate:
- */
-struct bm_buffer_list {
- struct buffer *buffer[BM_LIST_MAX];
- unsigned *offset_return[BM_LIST_MAX];
- unsigned nr;
- unsigned need_fence;
-};
-
-
struct bufmgr {
struct intel_context *intel;
- struct buffer buffer_list;
- struct pool pool;
+ struct pool pool[BM_POOL_MAX];
+ unsigned nr_pools;
+
struct _mesa_HashTable *hash;
unsigned buf_nr; /* for generating ids */
+};
+
- unsigned last_fence;
+/* List of buffers to validate:
+ */
+struct bm_buffer_list {
+ struct {
+ unsigned buffer;
+ unsigned *offset_return;
+ unsigned *memtype_return;
+ } elem[BM_LIST_MAX];
+
+ unsigned nr;
};
-static struct block *alloc_agp( struct bufmgr *bm,
- unsigned size,
- unsigned align )
+
+static struct block *alloc_from_pool( struct bufmgr *bm,
+ unsigned pool_nr,
+ unsigned size,
+ unsigned align )
{
+ struct pool *pool = &bm->pool[pool_nr];
struct block *block = (struct block *)calloc(sizeof *block, 1);
if (!block)
return NULL;
- _mesa_printf("alloc_agp 0x%x\n", size);
+ _mesa_printf("alloc_from_pool %d sz 0x%x\n", pool_nr, size);
- block->mem = mmAllocMem(bm->pool.heap, size, align, 0);
+ block->mem = mmAllocMem(pool->heap, size, align, 0);
if (!block->mem) {
_mesa_printf("\t- failed\n");
free(block);
@@ -96,8 +104,9 @@ static struct block *alloc_agp( struct bufmgr *bm,
}
make_empty_list(block);
- block->memType = BM_MEM_AGP;
- block->virtual = bm->pool.virtual + block->mem->ofs;
+ block->pool = pool;
+ block->mem_type = pool->flags & BM_MEM_MASK;
+ block->virtual = pool->virtual + block->mem->ofs;
_mesa_printf("\t- offset 0x%x\n", block->mem->ofs);
return block;
@@ -112,7 +121,7 @@ static struct block *alloc_local( unsigned size )
_mesa_printf("alloc_local 0x%x\n", size);
- block->memType = BM_MEM_LOCAL;
+ block->mem_type = BM_MEM_LOCAL;
block->virtual = malloc(size);
if (!block->virtual) {
free(block);
@@ -123,30 +132,35 @@ static struct block *alloc_local( unsigned size )
}
+
+
static struct block *alloc_block( struct bufmgr *bm,
unsigned size,
unsigned align,
- int memType )
+ int flags )
{
- switch (memType) {
- case BM_MEM_AGP:
- return alloc_agp(bm, size, align);
- case BM_MEM_LOCAL:
- return alloc_local(size);
- default:
- return NULL;
+ GLuint i;
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (bm->pool[i].flags & BM_NO_ALLOC)
+ continue;
+
+ if ((bm->pool[i].flags & flags & BM_MEM_MASK) == 0)
+ continue;
+
+ return alloc_from_pool(bm, i, size, align);
}
+
+ if (flags & BM_MEM_LOCAL)
+ return alloc_local(size);
+
+ return NULL;
}
static int bmAllocMem( struct bufmgr *bm,
- struct buffer *buf,
- unsigned flags ) /* unused */
+ struct buffer *buf )
{
- if (buf->block == NULL)
- buf->block = alloc_block(bm, buf->size, 4, BM_MEM_AGP);
-
- if (buf->block == NULL)
- buf->block = alloc_block(bm, buf->size, 4, BM_MEM_LOCAL);
+ buf->block = alloc_block(bm, buf->size, 4, buf->flags);
if (buf->block)
buf->block->buf = buf;
@@ -162,15 +176,16 @@ static void free_block( struct bufmgr *bm, struct block *block )
if (!block)
return;
- switch (block->memType) {
+ switch (block->mem_type) {
case BM_MEM_AGP:
+ case BM_MEM_VRAM:
if (bmTestFence(bm, block->fence)) {
mmFreeMem(block->mem);
free(block);
}
else {
block->buf = NULL;
- move_to_tail(&bm->pool.freed, block);
+ move_to_tail(&block->pool->freed, block);
}
break;
@@ -189,13 +204,16 @@ static int delayed_free( struct bufmgr *bm )
{
struct block *block, *tmp;
int ret = 0;
-
- foreach_s(block, tmp, &bm->pool.freed ) {
- if (bmTestFence(bm, block->fence)) {
- ret += block->mem->size;
- remove_from_list(block);
- mmFreeMem(block->mem);
- free(block);
+ int i;
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ foreach_s(block, tmp, &bm->pool[i].freed ) {
+ if (bmTestFence(bm, block->fence)) {
+ ret += block->mem->size;
+ remove_from_list(block);
+ mmFreeMem(block->mem);
+ free(block);
+ }
}
}
@@ -206,7 +224,6 @@ static int delayed_free( struct bufmgr *bm )
static int move_buffers( struct bufmgr *bm,
struct buffer *buffers[],
int nr,
- int newMemType,
int flags )
{
struct block *newMem[BM_LIST_MAX];
@@ -219,22 +236,20 @@ static int move_buffers( struct bufmgr *bm,
/* First do all the allocations (or fail):
*/
for (i = 0; i < nr; i++) {
- if (buffers[i]->block->memType != newMemType) {
+ if (!(buffers[i]->block->mem_type & flags)) {
if (flags & BM_NO_UPLOAD)
goto cleanup;
- _mesa_printf("try to move buffer size 0x%x to pool %d\n",
- buffers[i]->size, newMemType);
+ _mesa_printf("try to move buffer %d size 0x%x to pools 0x%x\n",
+ buffers[i]->id, buffers[i]->size, flags & BM_MEM_MASK);
newMem[i] = alloc_block(bm,
buffers[i]->size,
buffers[i]->alignment,
- newMemType);
+ flags & BM_MEM_MASK);
if (!newMem[i])
goto cleanup;
-
- newMem[i]->buf = buffers[i];
}
}
@@ -243,12 +258,15 @@ static int move_buffers( struct bufmgr *bm,
*/
for (i = 0; i < nr; i++) {
if (newMem[i]) {
+ /* XXX: To be replaced with DMA, GTT bind, and other
+ * mechanisms in final version. Memcpy (or sse_memcpy) is
+ * probably pretty good for local->agp uploads.
+ */
memcpy(newMem[i]->virtual,
buffers[i]->block->virtual,
buffers[i]->size);
free_block(bm, buffers[i]->block);
-
buffers[i]->block = newMem[i];
buffers[i]->block->buf = buffers[i];
}
@@ -256,7 +274,7 @@ static int move_buffers( struct bufmgr *bm,
/* Tell hardware that its texture and other caches may be invalid:
*/
- if (nr)
+ if (nr && (flags & (BM_MEM_AGP|BM_MEM_VRAM)))
bmFlushReadCaches(bm);
_mesa_printf("%s - success\n", __FUNCTION__);
@@ -275,26 +293,36 @@ static int move_buffers( struct bufmgr *bm,
}
-static unsigned evict_lru( struct bufmgr *bm )
+static unsigned evict_lru( struct bufmgr *bm,
+ unsigned flags)
{
- int ret;
+ int i;
_mesa_printf("%s\n", __FUNCTION__);
- ret = delayed_free(bm);
- if (ret)
- return ret;
- else {
- struct block *block = bm->pool.lru.next;
- unsigned size = block->buf->size;
+ if (flags & BM_NO_EVICT)
+ return 0;
- if (block == &bm->pool.lru ||
- !bmTestFence(bm, block->fence))
- return 0;
+ /* XXX: this is broken with >1 active pool - all the first pool
+ * will be evicted before starting on the second. Actually, maybe
+ * you want that in some situations...
+ */
+ for (i = 0; i < bm->nr_pools; i++) {
+ if ((bm->pool[i].flags & flags & BM_MEM_MASK) &&
+ !(bm->pool[i].flags & BM_NO_EVICT)) {
+ struct block *block = bm->pool[i].lru.next;
+ unsigned size = block->buf->size;
+
+ if (block == &bm->pool[i].lru ||
+ !bmTestFence(bm, block->fence))
+ return 0;
- move_buffers(bm, &block->buf, 1, BM_MEM_LOCAL, 0);
- return size;
+ move_buffers(bm, &block->buf, 1, BM_MEM_LOCAL);
+ return size;
+ }
}
+
+ return 0;
}
#if 0
@@ -359,27 +387,43 @@ struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel )
}
+void bmInitMemType( struct bufmgr *bm,
+ unsigned mem_type,
+ unsigned long size )
+{
+ /* Nothing really to do. Could store and use to validate
+ * bmInitPool requests.
+ */
+}
+
+
/* The virtual pointer would go away in a true implementation.
*/
-void bmInitPool( struct bufmgr *bm,
- unsigned pool,
- unsigned long low_offset,
- unsigned long high_offset,
- void *virtual_base )
+int bmInitPool( struct bufmgr *bm,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ unsigned flags)
{
- if (pool > 0 || low_offset >= high_offset)
- return;
+ GLuint i;
+
+ if (bm->nr_pools >= BM_POOL_MAX)
+ return -1;
- _mesa_printf("bmInitPool %d %x..%x\n",
- pool, low_offset, high_offset);
+ i = bm->nr_pools++;
- bm->pool.size = high_offset - low_offset;
- bm->pool.heap = mmInit( low_offset, bm->pool.size );
- bm->pool.virtual = virtual_base;
+ _mesa_printf("bmInitPool %d low_offset %x sz %x\n",
+ i, low_offset, size);
- make_empty_list(&bm->pool.lru);
- make_empty_list(&bm->pool.freed);
+ bm->pool[i].heap = mmInit( low_offset, size );
+ bm->pool[i].virtual = low_virtual - low_offset;
+ bm->pool[i].flags = flags;
+
+ make_empty_list(&bm->pool[i].lru);
+ make_empty_list(&bm->pool[i].freed);
+
+ return i;
}
@@ -390,7 +434,10 @@ void bmGenBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
for (i = 0; i < n; i++) {
struct buffer *buf = calloc(sizeof(*buf), 1);
- buffers[i] = buf->id = ++bm->buf_nr;
+ buf->id = ++bm->buf_nr;
+ buf->alignment = 12; /* page-alignment to fit in with AGP swapping */
+ buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
+ buffers[i] = buf->id;
_mesa_HashInsert(bm->hash, buffers[i], buf);
}
}
@@ -410,6 +457,54 @@ void bmDeleteBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
}
}
+
+
+
+/* Hook to inform faked buffer manager about fixed-position
+ * front,depth,back buffers. These may move to a fully memory-managed
+ * scheme, or they may continue to be managed as is. It will probably
+ * be useful to pass a fixed offset here one day.
+ */
+unsigned bmBufferStatic(struct bufmgr *bm,
+ unsigned buffer,
+ unsigned size,
+ unsigned pool )
+{
+ struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
+
+ assert(!buf->block);
+ assert(bm->pool[pool].flags & BM_NO_EVICT);
+ assert(bm->pool[pool].flags & BM_NO_MOVE);
+
+ buf->size = size;
+ buf->flags = bm->pool[pool].flags;
+ buf->alignment = 0;
+ buf->block = alloc_from_pool(bm, pool, buf->size, buf->alignment);
+ if (!buf->block)
+ return 0;
+
+ buf->block->buf = buf;
+ return buf->block->mem->ofs;
+}
+
+
+#if 0
+/* How wise/useful is this?
+ */
+void bmBufferSetParams( struct bufmgr *bm,
+ unsigned buffer,
+ unsigned flags,
+ unsigned alignment )
+{
+ struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
+ assert(!buf->block);
+ buf->flags = flags;
+ buf->alignment = alignment;
+}
+#endif
+
+
+
/* If buffer size changes, create new buffer in local memory.
* Otherwise update in place.
*/
@@ -424,7 +519,7 @@ void bmBufferData(struct bufmgr *bm,
_mesa_printf("bmBufferData %d sz 0x%x data: %p\n", buffer, size, data);
if (buf->block) {
- if ((buf->block->memType == BM_MEM_AGP && !bmTestFence(bm, buf->block->fence)) ||
+ if ((buf->block->mem_type != BM_MEM_LOCAL && !bmTestFence(bm, buf->block->fence)) ||
(buf->size && buf->size != size) ||
(data == NULL)) {
free_block(bm, buf->block);
@@ -435,7 +530,7 @@ void bmBufferData(struct bufmgr *bm,
buf->size = size;
if (data != NULL) {
- bmAllocMem(bm, buf, flags);
+ bmAllocMem(bm, buf);
memcpy(buf->block->virtual, data, size);
}
}
@@ -453,12 +548,13 @@ void bmBufferSubData(struct bufmgr *bm,
_mesa_printf("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buffer, offset, size);
if (buf->block == 0)
- bmAllocMem(bm, buf, 0);
+ bmAllocMem(bm, buf);
- if (buf->block->memType == BM_MEM_AGP)
+ if (buf->block->mem_type != BM_MEM_LOCAL)
bmFinishFence(bm, buf->block->fence);
- memcpy(buf->block->virtual + offset, data, size);
+ if (size)
+ memcpy(buf->block->virtual + offset, data, size);
}
@@ -478,11 +574,11 @@ void *bmMapBuffer( struct bufmgr *bm,
buf->mapped = 1;
if (buf->block == 0)
- bmAllocMem(bm, buf, 0);
+ bmAllocMem(bm, buf);
/* Finish any outstanding operations to/from this memory:
*/
- if (buf->block->memType == BM_MEM_AGP)
+ if (buf->block->mem_type != BM_MEM_LOCAL)
bmFinishFence(bm, buf->block->fence);
return buf->block->virtual;
@@ -525,32 +621,27 @@ struct bm_buffer_list *bmNewBufferList( void )
return list;
}
-void bmAddBuffer( struct bufmgr *bm,
- struct bm_buffer_list *list,
+void bmAddBuffer( struct bm_buffer_list *list,
unsigned buffer,
unsigned flags,
- unsigned *pool_return,
+ unsigned *memtype_return,
unsigned *offset_return )
{
assert(list->nr < BM_LIST_MAX);
- list->buffer[list->nr] = _mesa_HashLookup(bm->hash, buffer);
- list->offset_return[list->nr] = offset_return;
+ list->elem[list->nr].buffer = buffer;
+ list->elem[list->nr].memtype_return = memtype_return;
+ list->elem[list->nr].offset_return = offset_return;
- _mesa_printf("bmAddBuffer nr %d buf %d (%p/%d)\n", list->nr, buffer,
- list->buffer[list->nr], list->buffer[list->nr]->id);
+ _mesa_printf("bmAddBuffer nr %d buf %d\n",
+ list->nr, buffer);
list->nr++;
-
- if (pool_return)
- *pool_return = 0;
-
}
void bmFreeBufferList( struct bm_buffer_list *list )
{
- assert(!list->need_fence);
free(list);
}
@@ -568,45 +659,39 @@ int bmValidateBufferList( struct bufmgr *bm,
struct bm_buffer_list *list,
unsigned flags )
{
+ struct buffer *bufs[BM_LIST_MAX];
unsigned i;
- unsigned total = 0;
_mesa_printf("%s\n", __FUNCTION__);
if (list->nr > BM_LIST_MAX)
return 0;
- for (i = 0; i < list->nr; i++) {
- assert(!list->buffer[i]->mapped);
- assert(list->buffer[i]->block);
- total += list->buffer[i]->size;
- }
+ for (i = 0; i < list->nr; i++)
+ bufs[i] = _mesa_HashLookup(bm->hash, list->elem[i].buffer);
- /* Don't need to try allocation in this case:
- */
- if (total > bm->pool.size)
- return 0;
/* The old story: evict one texture after another until allocation
* succeeds. This is a pretty poor strategy but really hard to do
* better without more infrastucture... Which is coming - hooray!
*/
- while (!move_buffers(bm, list->buffer, list->nr, BM_MEM_AGP, flags)) {
- if ((flags & BM_NO_EVICT) ||
- !evict_lru(bm))
+ while (!move_buffers(bm, bufs, list->nr, flags)) {
+ if (!delayed_free(bm) &&
+ !evict_lru(bm, flags))
return 0;
}
for (i = 0; i < list->nr; i++) {
_mesa_printf("%d: buf %d ofs 0x%x\n",
- i, list->buffer[i]->id,
- list->buffer[i]->block->mem->ofs);
+ i, bufs[i]->id, bufs[i]->block->mem->ofs);
- list->offset_return[i][0] = list->buffer[i]->block->mem->ofs;
+ list->elem[i].offset_return[0] = bufs[i]->block->mem->ofs;
+
+ if (list->elem[i].memtype_return)
+ list->elem[i].memtype_return[0] = bufs[i]->block->mem_type;
}
- list->need_fence = 1;
return 1;
}
@@ -621,26 +706,22 @@ int bmValidateBufferList( struct bufmgr *bm,
*/
void bmFenceBufferList( struct bufmgr *bm, struct bm_buffer_list *list )
{
- unsigned i;
_mesa_printf("%s (%d bufs)\n", __FUNCTION__, list->nr);
- assert(list->need_fence);
- list->need_fence = 0;
-
- if (!list->nr)
- return;
+ if (list->nr) {
+ unsigned i;
+ unsigned fence = bmSetFence( bm );
- bm->last_fence = bmSetFence( bm );
+ /* Move all buffers to head of resident list and set their fences
+ */
+ for (i = 0; i < list->nr; i++) {
+ struct buffer *buf = _mesa_HashLookup(bm->hash, list->elem[i].buffer);
- /* Move all buffers to head of resident list and set their fences
- */
- for (i = 0; i < list->nr; i++) {
- assert(list->buffer[i]->block->buf == list->buffer[i]);
- move_to_head(&bm->pool.lru, list->buffer[i]->block);
- list->buffer[i]->block->fence = bm->last_fence;
+ move_to_head(&buf->block->pool->lru, buf->block);
+ buf->block->fence = fence;
+ }
}
-
}
diff --git a/src/mesa/drivers/dri/i915/i915_context.c b/src/mesa/drivers/dri/i915/i915_context.c
index 3928fe1d60a..2add866429d 100644
--- a/src/mesa/drivers/dri/i915/i915_context.c
+++ b/src/mesa/drivers/dri/i915/i915_context.c
@@ -42,6 +42,7 @@
#include "i915_reg.h"
#include "bufmgr.h"
+#include "intel_regions.h"
/***************************************
* Mesa's Driver Functions
@@ -103,6 +104,7 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
struct dd_function_table functions;
i915ContextPtr i915 = (i915ContextPtr) CALLOC_STRUCT(i915_context);
intelContextPtr intel = &i915->intel;
+ intelScreenPrivate *intelScreen;
GLcontext *ctx = &intel->ctx;
if (!i915) return GL_FALSE;
@@ -126,12 +128,49 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
intel->bm = bm_fake_intel_Attach( intel );
bmInitPool(intel->bm,
- 0,
- 0, /* low offset */
- intel->intelScreen->tex.size, /* high offset */
- intel->intelScreen->tex.map); /* virtual base */
+ intel->intelScreen->tex.offset, /* low offset */
+ intel->intelScreen->tex.map, /* low virtual */
+ intel->intelScreen->tex.size,
+ BM_MEM_AGP);
+ intelScreen = intel->intelScreen;
+ /* These are still static, but create regions for them.
+ */
+ intel->front_region =
+ intel_region_create_static(intel,
+ BM_MEM_AGP,
+ intelScreen->front.offset,
+ intelScreen->front.map,
+ intelScreen->cpp,
+ intelScreen->front.pitch / intelScreen->cpp,
+ intelScreen->front.size / intelScreen->front.pitch);
+
+
+ intel->back_region =
+ intel_region_create_static(intel,
+ BM_MEM_AGP,
+ intelScreen->back.offset,
+ intelScreen->back.map,
+ intelScreen->cpp,
+ intelScreen->back.pitch / intelScreen->cpp,
+ intelScreen->back.size / intelScreen->back.pitch);
+
+ /* Still assuming front.cpp == depth.cpp
+ */
+ intel->depth_region =
+ intel_region_create_static(intel,
+ BM_MEM_AGP,
+ intelScreen->depth.offset,
+ intelScreen->depth.map,
+ intelScreen->cpp,
+ intelScreen->depth.pitch / intelScreen->cpp,
+ intelScreen->depth.size / intelScreen->depth.pitch);
+
+
+ /* Advertise the full hardware capabilities. The new memory
+ * manager should cope much better with overload situations:
+ */
ctx->Const.MaxTextureLevels = 11;
ctx->Const.Max3DTextureLevels = 8;
ctx->Const.MaxCubeTextureLevels = 11;
@@ -142,15 +181,14 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
* validates programs against these, and in any case one ARB
* instruction can translate to more than one HW instruction, so
* we'll still have to check and fallback each time.
- */
-
+ */
ctx->Const.FragmentProgram.MaxNativeTemps = I915_MAX_TEMPORARY;
ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
ctx->Const.FragmentProgram.MaxNativeParameters = I915_MAX_CONSTANT;
ctx->Const.FragmentProgram.MaxNativeAluInstructions = I915_MAX_ALU_INSN;
ctx->Const.FragmentProgram.MaxNativeTexInstructions = I915_MAX_TEX_INSN;
ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN +
- I915_MAX_TEX_INSN);
+ I915_MAX_TEX_INSN);
ctx->Const.FragmentProgram.MaxNativeTexIndirections = I915_MAX_TEX_INDIRECT;
ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0; /* I don't think we have one */
diff --git a/src/mesa/drivers/dri/i915/i915_context.h b/src/mesa/drivers/dri/i915/i915_context.h
index fa63d794f90..6d9c4ea6ce8 100644
--- a/src/mesa/drivers/dri/i915/i915_context.h
+++ b/src/mesa/drivers/dri/i915/i915_context.h
@@ -306,7 +306,6 @@ extern void i915_update_fog( GLcontext *ctx );
*/
extern void i915UpdateTextureState( intelContextPtr intel );
extern void i915InitTextureFuncs( struct dd_function_table *functions );
-extern intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj );
/*======================================================================
* i915_metaops.c
diff --git a/src/mesa/drivers/dri/i915/intel_context.h b/src/mesa/drivers/dri/i915/intel_context.h
index 638ada4d22e..63749461727 100644
--- a/src/mesa/drivers/dri/i915/intel_context.h
+++ b/src/mesa/drivers/dri/i915/intel_context.h
@@ -47,9 +47,9 @@
#define DV_PF_565 (2<<8)
#define DV_PF_8888 (3<<8)
-typedef struct intel_context intelContext;
+struct intel_region;
+
typedef struct intel_context *intelContextPtr;
-typedef struct intel_texture_object *intelTextureObjectPtr;
typedef void (*intel_tri_func)(intelContextPtr, intelVertex *, intelVertex *,
intelVertex *);
@@ -214,6 +214,11 @@ struct intel_context
char *verts; /* points to tnl->clipspace.vertex_buf */
+ struct intel_region *front_region;
+ struct intel_region *back_region;
+ struct intel_region *depth_region;
+
+
/* Fallback rasterization functions
*/
intel_point_func draw_point;
diff --git a/src/mesa/drivers/dri/i915/intel_regions.c b/src/mesa/drivers/dri/i915/intel_regions.c
index 2709c068d0f..ff9dda6d936 100644
--- a/src/mesa/drivers/dri/i915/intel_regions.c
+++ b/src/mesa/drivers/dri/i915/intel_regions.c
@@ -102,6 +102,47 @@ void intel_region_release( struct intel_context *intel,
}
+struct intel_region *intel_region_create_static( struct intel_context *intel,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp,
+ GLuint pitch,
+ GLuint height )
+{
+ struct intel_region *region = calloc(sizeof(*region), 1);
+ GLuint size = cpp * pitch * height;
+ GLint pool;
+
+ _mesa_printf("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+ region->refcount = 1;
+
+ /* Recipe for creating a static buffer - create a static pool with
+ * the right offset and size, generate a buffer and use a special
+ * call to bind it to all of the memory in that pool.
+ */
+ pool = bmInitPool(intel->bm, offset, virtual, size,
+ (BM_MEM_AGP |
+ BM_NO_UPLOAD |
+ BM_NO_EVICT |
+ BM_NO_MOVE));
+ if (pool < 0) {
+ _mesa_printf("bmInitPool failed for static region\n");
+ exit(1);
+ }
+
+ bmGenBuffers(intel->bm, 1, &region->buffer);
+ bmBufferStatic(intel->bm, region->buffer, size, pool);
+
+ return region;
+}
+
+
+
static void _mesa_copy_rect( GLubyte *dst,
GLuint cpp,
GLuint dst_pitch,
@@ -243,11 +284,10 @@ void intel_region_copy( struct intel_context *intel,
assert(src->cpp == dst->cpp);
LOCK_HARDWARE(intel);
- bmAddBuffer(intel->bm, list, dst->buffer, BM_WRITE, NULL, &dst_offset);
- bmAddBuffer(intel->bm, list, src->buffer, BM_READ, NULL, &src_offset);
+ bmAddBuffer(list, dst->buffer, BM_WRITE, NULL, &dst_offset);
+ bmAddBuffer(list, src->buffer, BM_READ, NULL, &src_offset);
- /* What I really want to do is query if both buffers are already
- * uploaded:
+ /* Query if both buffers are already uploaded:
*/
if (bmValidateBufferList(intel->bm, list, BM_NO_EVICT|BM_NO_UPLOAD)) {
intelEmitCopyBlitLocked(intel,
@@ -293,7 +333,7 @@ void intel_region_fill( struct intel_context *intel,
_mesa_printf("%s\n", __FUNCTION__);
LOCK_HARDWARE(intel);
- bmAddBuffer(intel->bm, list, dst->buffer, BM_WRITE, NULL, &dst_offset);
+ bmAddBuffer(list, dst->buffer, BM_WRITE, NULL, &dst_offset);
if (bmValidateBufferList(intel->bm, list, BM_NO_EVICT)) {
intelEmitFillBlitLocked(intel,
diff --git a/src/mesa/drivers/dri/i915/intel_regions.h b/src/mesa/drivers/dri/i915/intel_regions.h
index 557facb9ab4..85fc238bace 100644
--- a/src/mesa/drivers/dri/i915/intel_regions.h
+++ b/src/mesa/drivers/dri/i915/intel_regions.h
@@ -61,6 +61,14 @@ void intel_region_release(struct intel_context *intel,
struct intel_region *ib );
+struct intel_region *intel_region_create_static( struct intel_context *intel,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp,
+ GLuint pitch,
+ GLuint height );
+
/* Map/unmap regions. This is refcounted also:
*/
GLubyte *intel_region_map(struct intel_context *intel,
diff --git a/src/mesa/drivers/dri/i915/intel_screen.h b/src/mesa/drivers/dri/i915/intel_screen.h
index ebecffa4cb1..75e147100d9 100644
--- a/src/mesa/drivers/dri/i915/intel_screen.h
+++ b/src/mesa/drivers/dri/i915/intel_screen.h
@@ -32,6 +32,9 @@
#include "dri_util.h"
#include "xmlconfig.h"
+/* XXX: change name or eliminate to avoid conflict with "struct
+ * intel_region"!!!
+ */
typedef struct {
drm_handle_t handle;
drmSize size; /* region size in bytes */
diff --git a/src/mesa/drivers/dri/i915/intel_tex_validate.c b/src/mesa/drivers/dri/i915/intel_tex_validate.c
index 6c13e277204..ff2670f66f5 100644
--- a/src/mesa/drivers/dri/i915/intel_tex_validate.c
+++ b/src/mesa/drivers/dri/i915/intel_tex_validate.c
@@ -208,8 +208,7 @@ void intel_add_texoffset_fixup( struct intel_context *intel,
f->delta = (intel->intelScreen->tex.offset +
intel_miptree_image_offset(intelObj->mt, 0, intelObj->firstLevel));
#else
- *ptr = (intelObj->textureOffset +
- intel->intelScreen->tex.offset +
+ *ptr = (intelObj->textureOffset +
intel_miptree_image_offset(intelObj->mt, 0, intelObj->firstLevel));
#endif
}
@@ -270,8 +269,7 @@ GLboolean intel_validate_buffers( struct intel_context *intel )
ok = intel_finalize_mipmap_tree( intel, i );
if (ok) {
- bmAddBuffer(intel->bm,
- intel->buffer_list,
+ bmAddBuffer(intel->buffer_list,
intelObj->mt->region->buffer,
BM_READ,
NULL,
@@ -280,7 +278,7 @@ GLboolean intel_validate_buffers( struct intel_context *intel )
}
}
- ok = bmValidateBufferList(intel->bm, intel->buffer_list, 0);
+ ok = bmValidateBufferList(intel->bm, intel->buffer_list, BM_MEM_AGP);
assert(ok);
return ok;
}