diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-04 12:18:20 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-01-04 12:51:00 +0000 |
commit | 0ed758cd2176ee4f34e03d05d05130d52d75e577 (patch) | |
tree | 66d42195c5f1de0c449e0817d518588e16a5a5e3 | |
parent | 3449f1cbe184a618e662d2a662167f05362b82e5 (diff) |
sna: Limit batch to a single page on 865g
Verified on real hw, this undocumented (at least in the bspec before me)
bug truly exists.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/kgem.c | 15 | ||||
-rw-r--r-- | src/sna/kgem.h | 3 | ||||
-rw-r--r-- | src/sna/sna_render_inline.h | 2 |
3 files changed, 13 insertions, 7 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 0e83dfb6..6db2e928 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -517,6 +517,11 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen) kgem->wedged = drmCommandNone(kgem->fd, DRM_I915_GEM_THROTTLE) == -EIO; kgem->wedged |= DBG_NO_HW; + kgem->max_batch_size = ARRAY_SIZE(kgem->batch); + if (gen == 22) + /* 865g cannot handle a batch spanning multiple pages */ + kgem->max_batch_size = 4096; + kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; list_init(&kgem->partial); @@ -1121,7 +1126,7 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) assert(!kgem_busy(kgem, handle)); /* If there is no surface data, just upload the batch */ - if (kgem->surface == ARRAY_SIZE(kgem->batch)) + if (kgem->surface == kgem->max_batch_size) return gem_write(kgem->fd, handle, 0, sizeof(uint32_t)*kgem->nbatch, kgem->batch); @@ -1184,7 +1189,7 @@ void kgem_reset(struct kgem *kgem) kgem->aperture = 0; kgem->aperture_fenced = 0; kgem->nbatch = 0; - kgem->surface = ARRAY_SIZE(kgem->batch); + kgem->surface = kgem->max_batch_size; kgem->mode = KGEM_NONE; kgem->flush = 0; @@ -1198,7 +1203,7 @@ static int compact_batch_surface(struct kgem *kgem) int size, shrink, n; /* See if we can pack the contents into one or two pages */ - size = ARRAY_SIZE(kgem->batch) - kgem->surface + kgem->nbatch; + size = kgem->max_batch_size - kgem->surface + kgem->nbatch; if (size > 2048) return sizeof(kgem->batch); else if (size > 1024) @@ -1237,7 +1242,7 @@ void _kgem_submit(struct kgem *kgem) kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); - assert(kgem->nbatch <= ARRAY_SIZE(kgem->batch)); + assert(kgem->nbatch <= kgem->max_batch_size); assert(kgem->nbatch <= kgem->surface); assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); @@ -1247,7 +1252,7 @@ void _kgem_submit(struct kgem *kgem) #endif rq = kgem->next_request; - if (kgem->surface != ARRAY_SIZE(kgem->batch)) + if (kgem->surface != kgem->max_batch_size) size = compact_batch_surface(kgem); else size = kgem->nbatch * sizeof(kgem->batch[0]); diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 7f2ebaf1..6aeb1fe2 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -116,6 +116,7 @@ struct kgem { uint16_t nexec; uint16_t nreloc; uint16_t nfence; + uint16_t max_batch_size; uint16_t vma_count; uint32_t flush:1; @@ -146,7 +147,7 @@ struct kgem { #define KGEM_EXEC_RESERVED 1 #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0])) -#define KGEM_BATCH_SIZE(K) (ARRAY_SIZE((K)->batch)-KGEM_BATCH_RESERVED) +#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED) #define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED) #define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED) diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h index 03691237..33fb166e 100644 --- a/src/sna/sna_render_inline.h +++ b/src/sna/sna_render_inline.h @@ -47,7 +47,7 @@ static inline float pack_2s(int16_t x, int16_t y) static inline int batch_space(struct sna *sna) { - return KGEM_BATCH_SIZE(&sna->kgem) - sna->kgem.nbatch; + return sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED; } static inline void batch_emit(struct sna *sna, uint32_t dword) |